source
stringlengths
3
92
c
stringlengths
26
2.25M
DRB004-antidep2-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two nested loops with loop-carried anti-dependence on the outer level. This is a variable-length array version in C99. Data race pair: a[i][j]@70:7 vs. a[i+1][j]@70:18 */ #include <stdlib.h> #include <omp.h> int main(int argc,char *argv[]) { int i; int j; int len = 20; if (argc > 1) len = atoi(argv[1]); double a[len][len]; #pragma omp parallel for private (i,j) for (i = 0; i <= len - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= len - 1; j += 1) { a[i][j] = 0.5; } } for (i = 0; i <= len - 1 - 1; i += 1) { #pragma omp parallel for private (j) for (j = 0; j <= len - 1; j += 1) { a[i][j] += a[i + 1][j]; } } for (i = 0; i <= len - 1; i += 1) { for (j = 0; j <= len - 1; j += 1) { printf("%lf\n",a[i][j]); } } return 0; }
eur.c
#include <string.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <assert.h> #include <time.h> #define MYMAX(x,y) ((x > y) ? x : y) void eur() { double sig2 = 0.04; double E = 10; double r = 0.05; double T = 1.0; double b = 30.0; int nt = 599; double dt = (T / (double)(nt+1)); int ns = 599; double ds = (b / (double)(ns+1)); double eps = 1E-8; int maxIter = 5000; double omega = 1.35; double *values = malloc(sizeof(double) * ns*nt); #pragma omp parallel for for(int i = 0; i < ns; i++) values[(nt-1)*ns+i] = MYMAX(E-i*ds, 0); double *Aj = malloc(sizeof(double) * ns*ns); double *bj = malloc(sizeof(double) * ns); double *uj = malloc(sizeof(double) * ns); double center, left, right; for(int i = nt-1; i >= 1; i--) { for(int j = 0; j < ns; j++) { double j2 = (j+1)*(j+1); double c = (sig2*j2-2/dt); center = -sig2*j2-2*r-2/dt; left = (sig2*j2-r*(j+1))/2; right = (sig2*j2+r*(j+1))/2; int main_diag = j*ns+j; Aj[main_diag] = center; if(j > 0) Aj[main_diag-1] = left; if(j < ns-1) Aj[main_diag+1] = right; int index = i*ns + j; if(j == 0) { bj[j] = c*values[index] - right*values[index+1] - 2*left*E; } else if(j == ns-1) { bj[j] = c*values[index] - left*values[index-1]; } else { bj[j] = c*values[index] - right*values[index+1] - left*values[index-1]; } } double error = 1.0; int nIter = 0; double temp[ns]; memcpy(temp, &values[i*ns], sizeof(double)*ns); while(error > eps && nIter < maxIter) { nIter += 1; for(int j = 0; j < ns; j++) { int m_center = j*ns+j; uj[j] = bj[j]; if(j > 0) uj[j] -= Aj[m_center-1] * uj[j-1]; if(j < ns-1) uj[j] -= Aj[m_center+1] * temp[j+1]; if(fabs(Aj[m_center]) > 1E-8) { uj[j] /= Aj[m_center]; uj[j] = omega*uj[j] + (1-omega)*temp[j]; } } double norm = 0; for(int j = 0; j < ns; j++) { if(fabs(uj[j]-temp[j]) > norm) norm = fabs(uj[j]-temp[j]); } error = norm; for(int j = 0; j < ns; j++) temp[j] = uj[j]; } //for(int j = 0; j < ns; j++) memcpy(&values[(i-1)*ns], uj, sizeof(double)*ns); } free(Aj); free(bj); free(uj); int num_tests[13] = {2,4,6,7,8,9,10,11,12,13,14,15,16}; for(int i = 0; i < 13; i++) { int si = (int) ((double) num_tests[i] / ds); int ti = (int) (0.5 / dt) + 1; printf("%d %12.10f \n", num_tests[i], values[ti*ns+si]); } free(values); } int main() { double start = clock();// omp_get_wtime(); eur(); printf("%f seconds\n", (clock()-start)/ CLOCKS_PER_SEC); return 0; }
gt.scorereads.c
/* * gt.scorereads.c * * Created on: 8 Jul 2013 * Author: heath */ #include <getopt.h> #include <ctype.h> #ifdef HAVE_OPENMP #include <omp.h> #endif #include <pthread.h> #include "gem_tools.h" #define DEFAULT_INS_CUTOFF 0.01 /* Insert sizes in the upper or lower cutoff percentiles will not be used */ #define MAP_THRESHOLD 3 #define AP_BUF_SIZE 16384 #define QUAL_FASTQ 33 #define QUAL_SOLEXA 64 #define SOLEXA_BAD_QUAL 2 #define DEFAULT_QUAL (QUAL_FASTQ) #define MISSING_QUAL 40 // Value to use in alignment score if qualities not available #define INDEL_QUAL 40 // Value to use in alignment score for indels #define MAX_GT_SCORE 0xFFFF #define MAX_QUAL 42 #define ALIGN_NORM 0 #define ALIGN_BS_POS 1 #define ALIGN_BS_NEG 2 #define ALIGN_TYPES 3 #define AL_FORWARD 4 #define AL_REVERSE 8 #define AL_DIRECTIONS ((AL_FORWARD)|(AL_REVERSE)) #define AL_USED 128 typedef struct { char *input_files[2]; char *output_file; char *dist_file; double ins_cutoff; bool mmap_input; bool verbose; gt_output_file_compression compress; gt_generic_parser_attributes *parser_attr; gt_generic_printer_attributes *printer_attr; gt_buffered_output_file *buf_output[2]; int64_t min_insert; int64_t max_insert; double *ins_dist; uint8_t *ins_phred; int num_threads; int mapping_cutoff; int indel_quality; int qual_offset; // quality offset (33 for FASTQ, 64 for Illumina) } sr_param; sr_param param = { .input_files={NULL,NULL}, .output_file=NULL, .dist_file=NULL, .ins_cutoff=DEFAULT_INS_CUTOFF, .mmap_input=false, .compress=NONE, .parser_attr=NULL, .verbose=false, .num_threads=1, .indel_quality=INDEL_QUAL, .qual_offset=DEFAULT_QUAL, .mapping_cutoff=0, .min_insert=0, .max_insert=0, .ins_dist=NULL, .ins_phred=NULL }; void usage(const gt_option* const options,char* groups[],const bool print_inactive) { fprintf(stderr, "USE: ./gt_scorereads [ARGS]...\n"); gt_options_fprint_menu(stderr,options,groups,false,print_inactive); } static void *sr_malloc(size_t s) { void *p; p=malloc(s); gt_cond_fatal_error(!p,MEM_HANDLER); return p; } static void *sr_calloc(size_t n,size_t s) { void *p; p=calloc(n,s); gt_cond_fatal_error(!p,MEM_HANDLER); return p; } static void *sr_realloc(void *ptr,size_t s) { void *p; p=realloc(ptr,s); gt_cond_fatal_error(!p,MEM_HANDLER); return p; } typedef struct { int64_t x; u_int64_t y; } hist_entry; void read_dist_file(sr_param *param,int iset[2]) { gt_input_file* file=gt_input_file_open(param->dist_file,false); gt_buffered_input_file* bfile=gt_buffered_input_file_new(file); gt_status nl=0; typedef enum {V1,V2,UNKNOWN} dist_file_type; dist_file_type ftype=UNKNOWN; bool first=true; size_t sz=1024; size_t ct=0; u_int64_t total=0; hist_entry *hist=sr_malloc(sizeof(hist_entry)*sz); do { nl=gt_buffered_input_file_get_block(bfile,0); int i; char *cur=(char *)gt_vector_get_mem(bfile->block_buffer,char); for(i=0;i<(int)nl;i++) { char *p=cur; cur=strchr(p,'\n'); assert(cur); while(*p!='\n' && isspace(*p)) p++; if(*p && *p!='n') { char *p1=p+1; while(!isspace(*p1)) p1++; if(*p1!='\n') { *(p1++)=0; while(*p1!='\n' && isspace(*p1)) p1++; if(*p1!='\n') { char *p2=p1+1; while(!isspace(*p2)) p2++; *p2=0; if(ftype==UNKNOWN) { if(!strcmp(p,"Size")) { if(!strcmp(p1,"DS_count")) ftype=V1; else if(!strcmp(p1,"Paired")) ftype=V2; } } else { if(p[1]=='=' && (p[0]=='<' || p[0]=='>')) p+=2; int ef=0; int64_t x=strtoul(p,&p2,10); if(*p2) ef=1; else { int64_t y=strtoul(p1,&p2,10); if(*p2 || y<0) ef=2; else { if(first==false && x<=param->max_insert) { ef=3; } else if(x>=0 && y>0) { param->max_insert=x; if(first==true) { first=false; param->min_insert=x; } if(ct==sz) { sz*=1.5; hist=sr_realloc(hist,sizeof(hist_entry)*sz); } hist[ct].x=x; hist[ct++].y=y; total+=y; } } } if(ef) { if(ef==3) fprintf(stderr,"Insert distribution file not sorted\n"); else fprintf(stderr,"Invalid entry in insert distribution file: %s\t%s\n",p,p1); break; } } } } } cur++; } if(i<nl) break; } while(nl); if(first==false && ct>2) { double z1=param->ins_cutoff*(double)total; double z2=(1.0-param->ins_cutoff)*(double)total; double z=0.0; int i; for(i=0;i<ct;i++) { z+=hist[i].y; if(z>=z1) break; } int i1=i; if(!iset[0] || hist[i].x>param->min_insert) param->min_insert=hist[i].x; for(i++;i<ct;i++) { z+=hist[i].y; if(z>=z2) break; } int i2=i-1; if(!iset[1] || hist[i2].x<param->max_insert) param->max_insert=hist[i2].x; fprintf(stderr,"Insert distribution %"PRId64" - %"PRId64"\n",param->min_insert,param->max_insert); int k=param->max_insert-param->min_insert+1; param->ins_dist=sr_malloc(sizeof(double)*k); param->ins_phred=sr_malloc((size_t)k); for(i=0;i<k;i++) param->ins_dist[i]=0.0; for(i=i1;i<=i2;i++) { double zt=(double)hist[i].y/(double)total; param->ins_dist[hist[i].x-param->min_insert]=zt; int phred=255; if(zt>0.0) { phred=(int)(log(zt)*-10.0/log(10.0)+.5); if(phred>255) phred=255; } param->ins_phred[hist[i].x-param->min_insert]=phred; } } else { if(ftype==UNKNOWN) fprintf(stderr,"Insert distribution file format not recognized\n"); else fprintf(stderr,"No valid lines read in from insert distribution file\n"); } free(hist); gt_buffered_input_file_close(bfile); gt_input_file_close(file); } static uint64_t calculate_dist_score(gt_alignment *al, gt_map *map, int qual_offset,int qual_penalty) { register gt_string* const quals = al->qualities; register const bool has_qualities = gt_alignment_has_qualities(al); uint64_t score=0; GT_MAP_ITERATE(map,map_block) { GT_MISMS_ITERATE(map_block,misms) { int quality_misms; if (has_qualities) { quality_misms = gt_string_get_string(quals)[misms->position]-qual_offset; if(quality_misms>MAX_QUAL) quality_misms=MAX_QUAL; else if(quality_misms<0) quality_misms=0; } else quality_misms=MISSING_QUAL; switch (misms->misms_type) { case MISMS: score+=quality_misms; break; case INS: case DEL: score+=qual_penalty; break; } } } if(score>MAX_GT_SCORE) score=MAX_GT_SCORE; return score; } static void pair_read(gt_template *template,gt_alignment *alignment1,gt_alignment *alignment2,sr_param *param) { gt_alignment_recalculate_counters(alignment1); gt_alignment_recalculate_counters(alignment2); gt_mmap_attributes attr; gt_map *mmap[2]; uint64_t nmap[2]; nmap[0]=gt_alignment_get_num_maps(alignment1); nmap[1]=gt_alignment_get_num_maps(alignment2); if(nmap[0]+nmap[1]) { char *map_flag[2]; map_flag[0]=sr_calloc((size_t)(nmap[0]+nmap[1]),sizeof(char)); map_flag[1]=map_flag[0]+nmap[0]; uint64_t i=0; GT_ALIGNMENT_ITERATE(alignment1,map1) { if(map1->gt_score==GT_MAP_NO_GT_SCORE) map1->gt_score=calculate_dist_score(alignment1,map1,param->qual_offset,param->indel_quality); mmap[0]=map1; uint64_t j=0; GT_ALIGNMENT_ITERATE(alignment2,map2) { if(map2->gt_score==GT_MAP_NO_GT_SCORE) map2->gt_score=calculate_dist_score(alignment2,map2,param->qual_offset,param->indel_quality); mmap[1]=map2; gt_status gt_err; int64_t x=gt_template_get_insert_size(mmap,&gt_err,0,0); if(gt_err==GT_TEMPLATE_INSERT_SIZE_OK && x>=param->min_insert && x<=param->max_insert) { attr.distance=gt_map_get_global_distance(map1)+gt_map_get_global_distance(map2); attr.gt_score=map1->gt_score|(map2->gt_score<<16); if(param->ins_phred) attr.gt_score|=((uint64_t)param->ins_phred[x-param->min_insert]<<32); attr.phred_score=255; gt_template_inc_counter(template,attr.distance); gt_template_add_mmap_ends(template,map1,map2,&attr); map_flag[0][i]=map_flag[1][j]=1; } j++; } i++; } for(i=0;i<nmap[0];i++) { if(!map_flag[0][i]) { gt_map *map=gt_alignment_get_map(alignment1,i); attr.distance=gt_map_get_global_distance(map); attr.gt_score=map->gt_score; attr.phred_score=255; gt_template_inc_counter(template,attr.distance); gt_template_add_mmap_ends(template,map,0,&attr); } } for(i=0;i<nmap[1];i++) { if(!map_flag[1][i]) { gt_map *map=gt_alignment_get_map(alignment2,i); attr.distance=gt_map_get_global_distance(map); attr.gt_score=(map->gt_score<<16); attr.phred_score=255; gt_template_inc_counter(template,attr.distance); gt_template_add_mmap_ends(template,0,map,&attr); } } free(map_flag[0]); } gt_attributes_remove(template->attributes,GT_ATTR_ID_TAG_PAIR); } int parse_arguments(int argc,char** argv) { int err=0; param.parser_attr=gt_input_generic_parser_attributes_new(false); struct option* gt_scorereads_getopt = gt_options_adaptor_getopt(gt_scorereads_options); gt_string* const gt_scorereads_short_getopt = gt_options_adaptor_getopt_short(gt_scorereads_options); int option, option_index; char *p; int insert_set[2]={0,0}; while (true) { // Get option & Select case if ((option=getopt_long(argc,argv, gt_string_get_string(gt_scorereads_short_getopt),gt_scorereads_getopt,&option_index))==-1) break; switch (option) { /* I/O */ case 'i': param.dist_file = optarg; break; case 'o': param.output_file = optarg; break; case 300: param.input_files[0] = optarg; break; case 301: param.input_files[1] = optarg; break; case 'p': gt_input_generic_parser_attributes_set_paired(param.parser_attr,true); break; case 'z': #ifdef HAVE_ZLIB param.compress=GZIP; #endif break; case 'j': #ifdef HAVE_BZLIB param.compress=BZIP2; #endif break; case 'Z': param.compress=NONE; break; /* Score function */ case 'q': if (gt_streq(optarg,"offset-64")) { param.qual_offset=64; } else if (gt_streq(optarg,"offset-33")) { param.qual_offset=33; } else { gt_fatal_error_msg("Quality format not recognized: '%s'",optarg); } break; case 401: param.min_insert=(int)strtol(optarg,&p,10); if(*p || param.min_insert<0) { fprintf(stderr,"Illegal minimum insert size: '%s'\n",optarg); err=-7; } else insert_set[0]=1; break; case 402: param.max_insert=(int)strtol(optarg,&p,10); if(*p || param.max_insert<0) { fprintf(stderr,"Illegal maximum insert size: '%s'\n",optarg); err=-7; } else insert_set[1]=1; break; case 403: param.indel_quality=(int)strtol(optarg,&p,10); if(*p || param.indel_quality<0) { fprintf(stderr,"Illegal indel score: '%s'\n",optarg); err=-7; } break; case 'x': param.ins_cutoff=strtod(optarg,&p); if(*p || param.ins_cutoff>0.5 || param.ins_cutoff<0.0) { fprintf(stderr,"Illegal insert distribution cutoff percentile: '%s'\n",optarg); err=-6; } break; case 'm': param.mapping_cutoff=(int)strtol(optarg,&p,10); if(*p || param.mapping_cutoff<0) { fprintf(stderr,"Illegal mapping cutoff: '%s'\n",optarg); err=-7; } break; /* Misc */ case 'v': param.verbose = true; break; case 't': #ifdef HAVE_OPENMP param.num_threads = atol(optarg); #endif break; case 'h': usage(gt_scorereads_options,gt_scorereads_groups,false); exit(1); break; case 'H': usage(gt_scorereads_options,gt_scorereads_groups,true); exit(1); case 'J': gt_options_fprint_json_menu(stderr,gt_map2sam_options,gt_map2sam_groups,true,false); exit(1); break; case '?': default: usage(gt_scorereads_options,gt_scorereads_groups,false); gt_fatal_error_msg("Option '%c' %d not recognized",option,option); } } /* * Parameters check */ if(param.input_files[1]) gt_input_generic_parser_attributes_set_paired(param.parser_attr,true); if(!err && insert_set[0] && insert_set[1] && param.min_insert>param.max_insert) { fputs("Minimum insert size > maximum insert size\n",stderr); usage(gt_scorereads_options,gt_scorereads_groups,false); err=-15; } if(!err) { if(param.output_file && param.compress!=NONE) { size_t l=strlen(param.output_file); switch(param.compress) { case GZIP: if(l<3 || strcmp(param.output_file+l-3,".gz")) { char *s; asprintf(&s,"%s.gz",param.output_file); param.output_file=s; } break; case BZIP2: if(l<4 || strcmp(param.output_file+l-4,".bz2")) { char *s; asprintf(&s,"%s.bz2",param.output_file); param.output_file=s; } break; default: break; } } if(gt_input_generic_parser_attributes_is_paired(param.parser_attr) && param.dist_file) read_dist_file(&param,insert_set); else if(!insert_set[1]) { if(param.min_insert<=1000) param.max_insert=1000; else param.max_insert=param.min_insert+1000; } } // Free gt_string_delete(gt_scorereads_short_getopt); return err; } int main(int argc,char *argv[]) { int err=0; // GT error handler gt_handle_error_signals(); // Parsing command-line options err=parse_arguments(argc,argv); if(!err) { // Open out file gt_output_file *output_file; if(param.output_file) { output_file=gt_output_file_new_compress(param.output_file,UNSORTED_FILE,param.compress); } else { output_file=gt_output_stream_new_compress(stdout,UNSORTED_FILE,param.compress); } gt_cond_fatal_error(!output_file,FILE_OPEN,param.output_file); param.printer_attr=gt_generic_printer_attributes_new(MAP); param.printer_attr->output_map_attributes->print_casava=true; param.printer_attr->output_map_attributes->print_extra=true; param.printer_attr->output_map_attributes->print_scores=true; param.printer_attr->output_map_attributes->hex_print_scores=true; // Do we have two map files as input (one for each read)? if(param.input_files[1]) { pthread_mutex_t mutex=PTHREAD_MUTEX_INITIALIZER; gt_input_file* input_file1=gt_input_file_open(param.input_files[0],param.mmap_input); gt_input_file* input_file2=gt_input_file_open(param.input_files[1],param.mmap_input); if(input_file1->file_format!=MAP || input_file2->file_format!=MAP) { gt_fatal_error_msg("Fatal error: paired files '%s','%s' are not in MAP format\n",param.input_files[0],param.input_files[1]); } #ifdef HAVE_OPENMP #pragma omp parallel num_threads(param.num_threads) #endif { gt_buffered_input_file* buffered_input1=gt_buffered_input_file_new(input_file1); gt_buffered_input_file* buffered_input2=gt_buffered_input_file_new(input_file2); gt_buffered_output_file *buffered_output=gt_buffered_output_file_new(output_file); gt_buffered_input_file_attach_buffered_output(buffered_input1,buffered_output); gt_status error_code; gt_template *template=gt_template_new(); while(gt_input_map_parser_synch_blocks(buffered_input1,buffered_input2,&mutex)) { error_code=gt_input_map_parser_get_template(buffered_input1,template,NULL); if(error_code!=GT_IMP_OK) { gt_input_map_parser_get_template(buffered_input2,template,NULL); gt_error_msg("Error parsing file '%s'\n",param.input_files[0]); continue; } if(gt_template_get_num_blocks(template)!=1) { gt_error_msg("Error parsing files '%s','%s': wrong number of blocks\n",param.input_files[0],param.input_files[1]); continue; } gt_alignment *alignment1=gt_template_get_block(template,0); gt_alignment *alignment2=gt_template_get_block_dyn(template,1); error_code=gt_input_map_parser_get_alignment(buffered_input2,alignment2,NULL); if (error_code!=GT_IMP_OK) { gt_error_msg("Error parsing file '%s'\n",param.input_files[1]); continue; } if(!(gt_string_nequals(template->tag,alignment2->tag,gt_string_get_length(template->tag)))) { gt_error_msg("Fatal ID mismatch ('%*s','%*s') parsing files '%s','%s'\n",PRIgts_content(template->tag),PRIgts_content(alignment2->tag),param.input_files[0],param.input_files[1]); break; } pair_read(template,alignment1,alignment2,&param); if (gt_output_generic_bofprint_template(buffered_output,template,param.printer_attr)) { gt_error_msg("Fatal error outputting read '"PRIgts"'\n",PRIgts_content(gt_template_get_string_tag(template))); } } gt_template_delete(template); gt_buffered_input_file_close(buffered_input1); gt_buffered_input_file_close(buffered_input2); gt_buffered_output_file_close(buffered_output); } gt_input_file_close(input_file1); gt_input_file_close(input_file2); } else { // Single input file (could be single end or interleaved paired end gt_input_file* input_file=param.input_files[0]?gt_input_file_open(param.input_files[0],param.mmap_input):gt_input_stream_open(stdin); #ifdef OPENMP #pragma omp parallel num_threads(param.num_threads) #endif { gt_buffered_input_file* buffered_input=gt_buffered_input_file_new(input_file); gt_buffered_output_file *buffered_output=gt_buffered_output_file_new(output_file); gt_buffered_input_file_attach_buffered_output(buffered_input,buffered_output); gt_status error_code; gt_template *template=gt_template_new(); if(gt_input_generic_parser_attributes_is_paired(param.parser_attr)) { while ((error_code=gt_input_generic_parser_get_template(buffered_input,template,param.parser_attr))) { if (error_code!=GT_IMP_OK) { gt_error_msg("Error parsing file '%s'\n",param.input_files[0]); continue; } gt_alignment *alignment1=gt_template_get_block(template,0); gt_alignment *alignment2=gt_template_get_block(template,1); pair_read(template,alignment1,alignment2,&param); if (gt_output_generic_bofprint_template(buffered_output,template,param.printer_attr)) { gt_error_msg("Fatal error outputting read '"PRIgts"'\n",PRIgts_content(gt_template_get_string_tag(template))); } } } else { while ((error_code=gt_input_generic_parser_get_template(buffered_input,template,param.parser_attr))) { if (error_code!=GT_IMP_OK) { gt_error_msg("Error parsing file '%s'\n",param.input_files[0]); continue; } gt_alignment *alignment=gt_template_get_block(template,0); gt_alignment_recalculate_counters(alignment); GT_ALIGNMENT_ITERATE(alignment,map) { if(map->gt_score==GT_MAP_NO_GT_SCORE) map->gt_score=calculate_dist_score(alignment,map,param.qual_offset,param.indel_quality); map->phred_score=255; } if (gt_output_generic_bofprint_alignment(buffered_output,alignment,param.printer_attr)) { gt_error_msg("Fatal error outputting read '"PRIgts"'\n",PRIgts_content(gt_template_get_string_tag(template))); } } } // Clean gt_template_delete(template); gt_buffered_input_file_close(buffered_input); gt_buffered_output_file_close(buffered_output); } gt_input_file_close(input_file); } gt_output_file_close(output_file); gt_generic_printer_attributes_delete(param.printer_attr); if(param.ins_dist) { free(param.ins_dist); free(param.ins_phred); } } return err; }
PoW.c
// Copyright (c) 2017-2018 The Popchain Core Developers #include "PoW.h" #include <stdio.h> #include <stdint.h> #include <string.h> #include <stdlib.h> #include <assert.h> #ifndef MAC_OSX #include <omp.h> #endif #include "my_time.h" #include "common.h" #include "my_rand48_r.h" #include "oneWayFunction.h" // #define SSE_VERSION /* * Step 1: Initialize working memory. */ void initWorkMemory(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, const uint32_t K) { uint32_t i, j; uint8_t a[OUTPUT_LEN], b[OUTPUT_LEN]; cryptoFunc[0].func(input, inputLen, a); uint64_t randSeed[4] = {0, 0, 0, 0}; #ifndef SSE_VERSION struct my_rand48_data randBuffer[4]; #else struct vrand48_data randBuffer[2]; #endif const uint32_t iterNum = WORK_MEMORY_SIZE >> 5; for (i = 0; i < iterNum; ++i) { if (i % K) { #ifndef SSE_VERSION uint64_t num = 0; for (j = 0; j < 4; ++j) { my_rand64_r(&randBuffer[j], &num); memcpy(b + (j << 3), (uint8_t *)&num, 8*sizeof(uint8_t)); } #else vrand64(b, randBuffer); #endif uint8_t shift_num; uint8_t result[OUTPUT_LEN]; reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); rrs(b, OUTPUT_LEN, result, shift_num); memcpy(Maddr + (i << 5), result, OUTPUT_LEN*sizeof(uint8_t)); for (j = 0; j < 32; ++j) { a[j] ^= result[j]; } } else { uint8_t t = 0, shift_num = 0; reduce_bit(a, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit((uint8_t *)&i, 4, (uint8_t *)&shift_num, 8); uint8_t a_rrs[INPUT_LEN]; rrs(a, OUTPUT_LEN, a_rrs, shift_num); cryptoFunc[t].func(a_rrs, 32, a); reduce_bit(a, 8, (uint8_t *)&randSeed[0], 48); reduce_bit(a + 8, 8, (uint8_t *)&randSeed[1], 48); reduce_bit(a + 16, 8, (uint8_t *)&randSeed[2], 48); reduce_bit(a + 24, 8, (uint8_t *)&randSeed[3], 48); #ifndef SSE_VERSION my_seed48_r(randSeed[0], &randBuffer[0]); my_seed48_r(randSeed[1], &randBuffer[1]); my_seed48_r(randSeed[2], &randBuffer[2]); my_seed48_r(randSeed[3], &randBuffer[3]); #else vseed48(randSeed , &randBuffer[0]); vseed48(randSeed + 2, &randBuffer[1]); #endif memcpy(Maddr + (i << 5), a, 32*sizeof(uint8_t)); } } } /* * Step 2: Modify the working memory contents. */ void modifyWorkMemory(uint8_t *Maddr, const uint32_t L, const uint32_t C, uint8_t *result) { uint32_t i, j; uint8_t a[OUTPUT_LEN], b[64]; cryptoFunc[0].func(Maddr + WORK_MEMORY_SIZE - 32, 32, a); memcpy(result, a, OUTPUT_LEN*sizeof(uint8_t)); uint64_t r = 0; reduce_bit(a, 32, (uint8_t *)&r, 64); const uint32_t iterNum = L << 6; for (i = 0; i < C; ++i) { uint64_t randSeed = 0; reduce_bit(a, 32, (uint8_t *)&randSeed, 48); struct my_rand48_data randBuffer; my_seed48_r(randSeed, &randBuffer); uint8_t t1, t2, s; uint64_t randNum = 0, base = 0; for (j = 0; j < iterNum; ++j) { my_rand48_r(&randBuffer, &randNum); base = randNum + r; uint64_t offset = 0; reduce_bit((uint8_t *)&r, 8, (uint8_t *)&offset, 8); offset = (offset << 8) + 1; uint64_t addr1 = (base + WORK_MEMORY_SIZE - offset) % WORK_MEMORY_SIZE; uint64_t addr2 = (base + offset) % WORK_MEMORY_SIZE; t1 = Maddr[addr1]; t2 = Maddr[addr2]; s = a[j & 0x1f]; Maddr[addr1] = t2 ^ s; Maddr[addr2] = t1 ^ s; b[j & 0x3f] = t1 ^ t2; r = r + s + t1 + t2; } uint8_t t = 0; reduce_bit((uint8_t *)&r, 8, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(b, 64, a, 256); uint8_t shift_num = 0; uint64_t ir = r + i; reduce_bit((uint8_t *)&ir, 8, (uint8_t *)&shift_num, 8); uint8_t a_rrs[INPUT_LEN]; rrs(a, OUTPUT_LEN, a_rrs, shift_num); cryptoFunc[t].func(a_rrs, 32, a); for (j = 0; j < OUTPUT_LEN; ++j) { result[j] ^= a[j]; } } } /* * Step 3: Calculate the final result. */ void calculateFinalResult(uint8_t *Maddr, uint8_t *c, const uint32_t D, uint8_t *result) { uint32_t i = 0, j = 0, k = 0; memcpy(result, c, OUTPUT_LEN*sizeof(uint8_t)); const uint32_t num = (WORK_MEMORY_SIZE >> 5) - 1; uint32_t it = 0; uint8_t result_rrs[OUTPUT_LEN]; while(1) { uint8_t t = 0, shift_num = 0; uint32_t d = 0; reduce_bit(result, 32, (uint8_t *)&t, 8); t = (t & 0x0f) ^ (t >> 4); reduce_bit(result, 32, (uint8_t *)&d, D); ++d; for (j = 0; j < d; ++j) { uint32_t index = i << 5; for (k = 0; k < 32; ++k) { result[k] ^= Maddr[index + k]; } ++i; if (i == num) { it = i + t; reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); rrs(result, OUTPUT_LEN, result_rrs, shift_num); cryptoFunc[0].func(result_rrs, 32, result); return; } } it = t + i; reduce_bit((uint8_t *)&it, 4, (uint8_t *)&shift_num, 8); rrs(result, OUTPUT_LEN, result_rrs, shift_num); cryptoFunc[t].func(result_rrs, 32, result); } } /* * Correctness & Performance test for Proof of work */ void testPowFunction(uint8_t *mess, uint32_t messLen, const int64_t iterNum) { int64_t j; uint32_t inputLen = messLen; uint8_t input[INPUT_LEN], output[OUTPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, messLen*sizeof(char)); // Init all one-way function initOneWayFunction(); uint8_t *Maddr = (uint8_t *)malloc(64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, 64 * WORK_MEMORY_SIZE*sizeof(uint8_t)); printf("****************************** Correctness test (PoW function) ******************************\n"); printf("Test message: %s\n", mess); powFunction(input, inputLen, Maddr, output); view_data_u8("PoW", output, OUTPUT_LEN); printf("*********************************************************************************************\n"); /* printf("*************************************************** Performance test (PoW function) ***************************************************\n"); uint8_t *result = (uint8_t *)malloc(iterNum * OUTPUT_LEN * sizeof(uint8_t)); assert(NULL != result); memset(result, 0, iterNum * OUTPUT_LEN * sizeof(uint8_t)); uint32_t threadNumArr[] = {1, 4, 8, 12, 16, 20, 24, 32, 48, 64}; uint32_t threadNumTypes = sizeof(threadNumArr) / sizeof(uint32_t); printf(" %-18s", "Algorithm"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) printf("%12d", threadNumArr[ix]); printf("\n"); printf("00 %-18s\t", "PoW"); for (uint32_t ix = 0; ix < threadNumTypes; ++ix) { omp_set_num_threads(threadNumArr[ix]); double startTime = get_wall_time(); if (threadNumArr[ix] == 1) { for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr, result + j * OUTPUT_LEN); } } else { #pragma omp parallel for firstprivate(input), private(j) shared(result) for (j = 0; j < iterNum; ++j) { powFunction(input, inputLen, Maddr + omp_get_thread_num() * WORK_MEMORY_SIZE, result + j * OUTPUT_LEN); } } double endTime = get_wall_time(); double costTime = endTime - startTime; printf("%5.0f bps ", iterNum / costTime); fflush(stdout); // Check result for (j = 0; j < iterNum; j += 1) { if (memcmp(output, result + j * OUTPUT_LEN, OUTPUT_LEN)) { printf("Thread num: %d, j: %ld\n", threadNumArr[ix], j); view_data_u8("output", output, OUTPUT_LEN); view_data_u8("result", result + j * OUTPUT_LEN, OUTPUT_LEN); abort(); } } } printf("\n"); printf("***************************************************************************************************************************************\n"); if (NULL != result) { free(result); result = NULL; } */ if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } #define OUTPUT_BUFFER_SIZE (32 * 1024UL * 1024UL) /*popchain ghost*/ //140 to 228 #define MAX_TEST_INPUT_LEN 228 /*popchain ghost*/ #define MAX_OUT_FILE_NAME_LEN 25 const char testInputCase[][MAX_TEST_INPUT_LEN] = { "", "HelloWorld", "0123456789" }; void powNistTest(const char *outFileName) { const uint64_t iterNum = 1024UL * 1024UL; // const uint64_t iterNum = 1024UL; uint8_t *outputBuffer = (uint8_t *)malloc(OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); assert(NULL != outputBuffer); memset(outputBuffer, 0, OUTPUT_BUFFER_SIZE * sizeof(uint8_t)); uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); initOneWayFunction(); uint32_t testInputCaseNum = sizeof(testInputCase) / sizeof(const char [MAX_TEST_INPUT_LEN]); for (uint32_t testCaseIx = 0; testCaseIx < testInputCaseNum; ++testCaseIx) { char curOutFileName[MAX_OUT_FILE_NAME_LEN] = ""; sprintf(curOutFileName, "%s-%u.txt", outFileName, testCaseIx); FILE *fp = NULL; if (NULL != (fp = fopen(curOutFileName, "wb"))) { const uint32_t testInputCaseLen = strlen((char *)testInputCase[testCaseIx]); uint8_t input[MAX_TEST_INPUT_LEN]; memset(input, 0, MAX_TEST_INPUT_LEN*sizeof(uint8_t)); memcpy(input, testInputCase[testCaseIx], testInputCaseLen*sizeof(uint8_t)); double startTime = get_wall_time(); powFunction(input, testInputCaseLen, Maddr, outputBuffer); for (uint64_t i = 1, j = 0; i < iterNum; ++i) { memcpy(input, outputBuffer + j, OUTPUT_LEN * sizeof(uint32_t)); j += OUTPUT_LEN; powFunction(input, OUTPUT_LEN, Maddr, outputBuffer + j); /* if (j == OUTPUT_BUFFER_SIZE) { fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); j = 0; } */ } double endTime = get_wall_time(); double costTime = endTime - startTime; fprintf(stdout, "TestCaseIx: %d, Input: %s, IterNum: %llu, Time: %4.2f, Performance: %5.2f bps\n", testCaseIx, \ testInputCase[testCaseIx], iterNum, costTime, ((double)(iterNum * OUTPUT_LEN)) / costTime); fflush(stdout); fwrite(outputBuffer, sizeof(uint8_t), OUTPUT_BUFFER_SIZE / sizeof(uint8_t), fp); fclose(fp); } else { fprintf(stderr, "Error: Open %s failed!\n", curOutFileName); abort(); } } if (NULL != outputBuffer) { free(outputBuffer); outputBuffer = NULL; } if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } void hashpop(const uint8_t *mess, uint32_t messLen, uint8_t output[OUTPUT_LEN]) { if(messLen != INPUT_LEN) { //won't get in printf("hashpop:Invalid message length %d\n", messLen); return; } int64_t j; uint32_t inputLen =messLen; uint8_t input[INPUT_LEN]; memset(input, 0, INPUT_LEN*sizeof(uint8_t)); memcpy(input, mess, inputLen*sizeof(char)); //operation: input uint8_t *Maddr = (uint8_t *)malloc(WORK_MEMORY_SIZE*sizeof(uint8_t)); //1024*1024*1 assert(NULL != Maddr); memset(Maddr, 0, WORK_MEMORY_SIZE*sizeof(uint8_t)); //printf("Test message: %s\n", mess); powFunction(input, inputLen,Maddr, output); //view_data_u8("PoW", output, OUTPUT_LEN); //output if (NULL != Maddr) { free(Maddr); Maddr = NULL; } } int my_rand64_r (struct my_rand48_data *buffer, uint64_t *result) { uint64_t X = buffer->__x; X = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL; buffer->__x = X; buffer->__x = (X * buffer->__a + buffer->__c) & 0xffffffffffffULL; X ^= buffer->__x << 16; *result = X; return 0; } int my_seed48_r (uint64_t seedval, struct my_rand48_data *buffer) { buffer->__x = seedval & 0xffffffffffffULL; buffer->__a = 0x5deece66dULL; buffer->__c = 0xb; return 0; } void powFunction(uint8_t *input, uint32_t inputLen, uint8_t *Maddr, uint8_t *output) { uint8_t c[OUTPUT_LEN]; // Step 1: Initialize working memory. initWorkMemory(input, inputLen, Maddr, 64); // view_data_u8("Maddr", Maddr, OUTPUT_LEN); // Step 2: Modify the working memory contents. modifyWorkMemory(Maddr, 4, WORK_MEMORY_SIZE >> 11, c); // view_data_u8("c", c, OUTPUT_LEN); // Step 3: Calculate the final result. calculateFinalResult(Maddr, c, 8, output); // view_data_u8("output", output, OUTPUT_LEN); } int my_rand48_r (struct my_rand48_data *buffer, uint64_t *result) { *result = (buffer->__x * buffer->__a + buffer->__c) & 0xffffffffffffULL; buffer->__x = *result; return 0; }
apply_constant_scalarvalue_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // // #if !defined(KRATOS_APPLY_CONSTANT_VALUE_PROCESS_H_INCLUDED ) #define KRATOS_APPLY_CONSTANT_VALUE_PROCESS_H_INCLUDED // System includes #include <string> #include <iostream> // External includes // Project includes #include "includes/define.h" #include "includes/kratos_flags.h" #include "includes/kratos_parameters.h" #include "processes/process.h" namespace Kratos { ///@name Kratos Classes ///@{ /// The base class for all processes in Kratos. /** This function applies a constant value (and fixity) to all of the nodes in a given mesh */ class ApplyConstantScalarValueProcess : public Process { public: ///@name Type Definitions ///@{ KRATOS_DEFINE_LOCAL_FLAG(VARIABLE_IS_FIXED); /// Pointer definition of ApplyConstantScalarValueProcess KRATOS_CLASS_POINTER_DEFINITION(ApplyConstantScalarValueProcess); ///@} ///@name Life Cycle ///@{ ApplyConstantScalarValueProcess(ModelPart& model_part, Parameters rParameters ) : Process(Flags()) , mr_model_part(model_part) { KRATOS_TRY //only include validation with c++11 since raw_literals do not exist in c++03 // Some values need to be mandatorily prescribed since no meaningful default value exist. For this reason try accessing to them // So that an error is thrown if they don't exist rParameters["value"]; rParameters["variable_name"]; rParameters["model_part_name"]; // Now validate agains defaults -- this also ensures no type mismatch rParameters.ValidateAndAssignDefaults(GetDefaultParameters()); mmesh_id = rParameters["mesh_id"].GetInt(); mvariable_name = rParameters["variable_name"].GetString(); this->Set( VARIABLE_IS_FIXED, rParameters["is_fixed"].GetBool()); if( KratosComponents< Variable<double> >::Has( mvariable_name ) ) //case of double variable { mdouble_value = rParameters["value"].GetDouble(); if( model_part.GetNodalSolutionStepVariablesList().Has( KratosComponents< Variable<double> >::Get( mvariable_name ) ) == false ) { KRATOS_THROW_ERROR(std::runtime_error,"trying to fix a variable that is not in the model_part - variable name is ",mvariable_name); } } else if( KratosComponents< VariableComponent< VectorComponentAdaptor<array_1d<double, 3> > > >::Has(mvariable_name) ) //case of component variable { typedef VariableComponent< VectorComponentAdaptor<array_1d<double, 3> > > component_type; component_type var_component = KratosComponents< component_type >::Get(mvariable_name); if( model_part.GetNodalSolutionStepVariablesList().Has( var_component.GetSourceVariable() ) == false ) { KRATOS_THROW_ERROR(std::runtime_error,"trying to fix a variable that is not in the model_part - variable name is ",mvariable_name); } mdouble_value = rParameters["value"].GetDouble(); } else if( KratosComponents< Variable<int> >::Has( mvariable_name ) ) //case of int variable { mint_value = rParameters["value"].GetInt(); if( model_part.GetNodalSolutionStepVariablesList().Has( KratosComponents< Variable<int> >::Get( mvariable_name ) ) == false ) { KRATOS_THROW_ERROR(std::runtime_error,"trying to fix a variable that is not in the model_part - variable name is ",mvariable_name); } if(this->Is(VARIABLE_IS_FIXED)) { KRATOS_THROW_ERROR(std::runtime_error,"sorry it is not possible to fix variables of type Variable<int>. Only double variables or vector components can be fixed",""); } } else if( KratosComponents< Variable<bool> >::Has( mvariable_name ) ) //case of bool variable { mbool_value = rParameters["value"].GetBool(); if( model_part.GetNodalSolutionStepVariablesList().Has( KratosComponents< Variable<bool> >::Get( mvariable_name ) ) == false ) { KRATOS_THROW_ERROR(std::runtime_error,"trying to fix a variable that is not in the model_part - variable name is ",mvariable_name); } if(this->Is(VARIABLE_IS_FIXED)) { KRATOS_THROW_ERROR(std::runtime_error,"sorry it is not possible to fix variables of type Variable<bool>. Only double variables or vector components can be fixed",""); } } KRATOS_CATCH(""); } ApplyConstantScalarValueProcess(ModelPart& model_part, const Variable<double>& rVariable, const double double_value, std::size_t mesh_id, Flags options ) : Process(options) , mr_model_part(model_part),mdouble_value(double_value), mint_value(0), mbool_value(false),mmesh_id(mesh_id) { KRATOS_TRY; if(this->IsDefined(VARIABLE_IS_FIXED) == false ) { KRATOS_THROW_ERROR(std::runtime_error,"please specify if the variable is to be fixed or not (flag VARIABLE_IS_FIXED)",""); } if( model_part.GetNodalSolutionStepVariablesList().Has( rVariable ) == false ) { KRATOS_THROW_ERROR(std::runtime_error,"trying to fix a variable that is not in the model_part - variable name is ",rVariable); } mvariable_name = rVariable.Name(); KRATOS_CATCH(""); } ApplyConstantScalarValueProcess(ModelPart& model_part, const VariableComponent<VectorComponentAdaptor<array_1d<double, 3> > >& rVariable, const double double_value, std::size_t mesh_id, Flags options ) : Process(options) , mr_model_part(model_part),mdouble_value(double_value), mint_value(0), mbool_value(false),mmesh_id(mesh_id) { KRATOS_TRY; if(this->IsDefined(VARIABLE_IS_FIXED) == false ) { KRATOS_THROW_ERROR(std::runtime_error,"please specify if the variable is to be fixed or not (flag VARIABLE_IS_FIXED)","") } mvariable_name = rVariable.Name(); if( model_part.GetNodalSolutionStepVariablesList().Has( rVariable.GetSourceVariable() ) == false ) { KRATOS_THROW_ERROR(std::runtime_error,"trying to fix a variable that is not in the model_part - variable name is ",rVariable); } KRATOS_CATCH(""); } ApplyConstantScalarValueProcess(ModelPart& model_part, const Variable< int >& rVariable, const int int_value, std::size_t mesh_id, Flags options ) : Process(options) , mr_model_part(model_part),mdouble_value(0.0), mint_value(int_value), mbool_value(false),mmesh_id(mesh_id) { KRATOS_TRY; if(this->IsDefined(VARIABLE_IS_FIXED) == false ) { KRATOS_THROW_ERROR(std::runtime_error,"Please specify if the variable is to be fixed or not (flag VARIABLE_IS_FIXED)",""); } if(this->Is(VARIABLE_IS_FIXED)) { KRATOS_THROW_ERROR(std::runtime_error,"Sorry it is not possible to fix variables of type Variable<int>. Only double variables or vector components can be fixed",""); } if( model_part.GetNodalSolutionStepVariablesList().Has( rVariable ) == false ) { KRATOS_THROW_ERROR(std::runtime_error,"Trying to fix a variable that is not in the model_part - variable name is ",rVariable); } mvariable_name = rVariable.Name(); KRATOS_CATCH(""); } ApplyConstantScalarValueProcess(ModelPart& model_part, const Variable< bool >& rVariable, const bool bool_value, std::size_t mesh_id, Flags options ) : Process(options) , mr_model_part(model_part),mdouble_value(0.0), mint_value(0), mbool_value(bool_value),mmesh_id(mesh_id) { KRATOS_TRY; if(this->IsDefined(VARIABLE_IS_FIXED) == false ) { KRATOS_THROW_ERROR(std::runtime_error,"Please specify if the variable is to be fixed or not (flag VARIABLE_IS_FIXED)",""); } if(this->Is(VARIABLE_IS_FIXED)) { KRATOS_THROW_ERROR(std::runtime_error,"Sorry it is not possible to fix variables of type Variable<int>. Only double variables or vector components can be fixed",""); } if( model_part.GetNodalSolutionStepVariablesList().Has( rVariable ) == false ) { KRATOS_THROW_ERROR(std::runtime_error,"Trying to fix a variable that is not in the model_part - variable name is ",rVariable); } mvariable_name = rVariable.Name(); KRATOS_CATCH(""); } /// Destructor. ~ApplyConstantScalarValueProcess() override {} ///@} ///@name Operators ///@{ /// This operator is provided to call the process as a function and simply calls the Execute method. void operator()() { Execute(); } const Parameters GetDefaultParameters() const override { const Parameters default_parameters( R"( { "model_part_name":"PLEASE_CHOOSE_MODEL_PART_NAME", "mesh_id": 0, "variable_name": "PLEASE_PRESCRIBE_VARIABLE_NAME", "is_fixed": false, "value" : 1.0 } )" ); return default_parameters; } ///@} ///@name Operations ///@{ /// Execute method is used to execute the ApplyConstantScalarValueProcess algorithms. void Execute() override {} /// this function is designed for being called at the beginning of the computations /// right after reading the model and the groups void ExecuteInitialize() override { KRATOS_TRY; const bool is_fixed = this->Is(VARIABLE_IS_FIXED); if( KratosComponents< Variable<double> >::Has( mvariable_name ) ) //case of double variable { InternalApplyValue<>(KratosComponents< Variable<double> >::Get(mvariable_name) , is_fixed, mdouble_value); } else if( KratosComponents< VariableComponent< VectorComponentAdaptor<array_1d<double, 3> > > >::Has(mvariable_name) ) //case of component variable { typedef VariableComponent< VectorComponentAdaptor<array_1d<double, 3> > > component_type; component_type var_component = KratosComponents< component_type >::Get(mvariable_name); InternalApplyValue< component_type, double>(var_component , is_fixed, mdouble_value); } else if( KratosComponents< Variable<int> >::Has( mvariable_name ) ) //case of int variable { InternalApplyValueWithoutFixing<>(KratosComponents< Variable<int> >::Get(mvariable_name) , mint_value); } else if( KratosComponents< Variable<bool> >::Has( mvariable_name ) ) //case of bool variable { InternalApplyValueWithoutFixing<>(KratosComponents< Variable<bool> >::Get(mvariable_name), mbool_value); } else { KRATOS_THROW_ERROR(std::logic_error, "Not able to fix the variable. Attempting to fix variable:",mvariable_name); } KRATOS_CATCH(""); } /// this function is designed for being execute once before the solution loop but after all of the /// solvers where built void ExecuteBeforeSolutionLoop() override { } /// this function will be executed at every time step BEFORE performing the solve phase void ExecuteInitializeSolutionStep() override { } /// this function will be executed at every time step AFTER performing the solve phase void ExecuteFinalizeSolutionStep() override { } /// this function will be executed at every time step BEFORE writing the output void ExecuteBeforeOutputStep() override { } /// this function will be executed at every time step AFTER writing the output void ExecuteAfterOutputStep() override { } /// this function is designed for being called at the end of the computations /// right after reading the model and the groups void ExecuteFinalize() override { } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "ApplyConstantScalarValueProcess"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << "ApplyConstantScalarValueProcess"; } /// Print object's data. void PrintData(std::ostream& rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ModelPart& mr_model_part; std::string mvariable_name; double mdouble_value; int mint_value; bool mbool_value; std::size_t mmesh_id; private: ///@name Static Member Variables ///@{ template< class TVarType, class TDataType > void InternalApplyValue(TVarType& rVar, const bool to_be_fixed, const TDataType value) { const int nnodes = mr_model_part.GetMesh(mmesh_id).Nodes().size(); if(nnodes != 0) { ModelPart::NodesContainerType::iterator it_begin = mr_model_part.GetMesh(mmesh_id).NodesBegin(); // ModelPart::NodesContainerType::iterator it_end = mr_model_part.GetMesh(mmesh_id).NodesEnd(); #pragma omp parallel for for(int i = 0; i<nnodes; i++) { ModelPart::NodesContainerType::iterator it = it_begin + i; if(to_be_fixed) { it->Fix(rVar); } it->FastGetSolutionStepValue(rVar) = value; } } } template< class TVarType, class TDataType > void InternalApplyValueWithoutFixing(TVarType& rVar, const TDataType value) { const int nnodes = mr_model_part.GetMesh(mmesh_id).Nodes().size(); if(nnodes != 0) { ModelPart::NodesContainerType::iterator it_begin = mr_model_part.GetMesh(mmesh_id).NodesBegin(); // ModelPart::NodesContainerType::iterator it_end = mr_model_part.GetMesh(mmesh_id).NodesEnd(); #pragma omp parallel for for(int i = 0; i<nnodes; i++) { ModelPart::NodesContainerType::iterator it = it_begin + i; it->FastGetSolutionStepValue(rVar) = value; } } } ///@} ///@name Un accessible methods ///@{ /// Assignment operator. ApplyConstantScalarValueProcess& operator=(ApplyConstantScalarValueProcess const& rOther); /// Copy constructor. //ApplyConstantScalarValueProcess(ApplyConstantScalarValueProcess const& rOther); ///@} }; // Class ApplyConstantScalarValueProcess KRATOS_CREATE_LOCAL_FLAG(ApplyConstantScalarValueProcess,VARIABLE_IS_FIXED, 0); ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >> (std::istream& rIStream, ApplyConstantScalarValueProcess& rThis); /// output stream function inline std::ostream& operator << (std::ostream& rOStream, const ApplyConstantScalarValueProcess& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_APPLY_CONSTANT_VALUE_PROCESS_H_INCLUDED defined
main.c
/* Heat equation solver in 2D. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <mpi.h> #include <omp.h> #include "heat.h" int main(int argc, char **argv) { double a = 0.5; //!< Diffusion constant field current, previous; //!< Current and previous temperature fields double dt; //!< Time step int nsteps; //!< Number of time steps int image_interval = 500; //!< Image output interval parallel_data parallelization; //!< Parallelization info int iter; //!< Iteration counter double dx2, dy2; //!< delta x and y squared double start_clock; //!< Time stamps int provided; // thread-support level int thread_id; // OpenMP thread id needed for multiple-thread communication MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); if (provided < MPI_THREAD_MULTIPLE) { printf("MPI_THREAD_MULTIPLE thread support level required\n"); MPI_Abort(MPI_COMM_WORLD,5); } #pragma omp parallel private(iter, thread_id) { initialize(argc, argv, &current, &previous, &nsteps, &parallelization); thread_id = omp_get_thread_num(); #pragma omp single { /* Output the initial field */ write_field(&current, 0, &parallelization); /* Largest stable time step */ dx2 = current.dx * current.dx; dy2 = current.dy * current.dy; dt = dx2 * dy2 / (2.0 * a * (dx2 + dy2)); } /* Get the start time stamp */ start_clock = MPI_Wtime(); /* Time evolve */ for (iter = 1; iter <= nsteps; iter++) { exchange(&previous, &parallelization, thread_id); evolve(&current, &previous, a, dt); if (iter % image_interval == 0) { #pragma omp single write_field(&current, iter, &parallelization); } /* Swap current field so that it will be used as previous for next iteration step */ #pragma omp single swap_fields(&current, &previous); } } /* end of parallel region */ /* Determine the CPU time used for the iteration */ if (parallelization.rank == 0) { printf("Iteration took %.3f seconds.\n", (MPI_Wtime() - start_clock)); printf("Reference value at 5,5: %f\n", previous.data[5][5]); } finalize(&current, &previous); MPI_Finalize(); return 0; }
linebreak.c
/* * Test for line continuation within pragmas * * Liao 2/5/2010 * * */ void foo1(int a[]) { int i,j; #pragma omp parallel \ private (j) \ shared (a) { a[i]=j; } }
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "cuda.h" #include <stdio.h> #include <stdlib.h> #include <string.h> pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; list *get_paths(char *filename) { char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = rand()%m; indexes[i] = index; random_paths[i] = paths[index]; if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_random_paths(char **paths, int n, int m) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = rand()%m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char **replace_paths = calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { int i; matrix X; X.rows = n; X.vals = calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop; if(center){ crop = center_crop_image(im, size, size); } else { crop = random_augment_image(im, angle, aspect, min, max, size, size); } int flip = rand()%2; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); /* show_image(im, "orig"); show_image(crop, "crop"); cvWaitKey(0); */ free_image(im); X.vals[i] = crop.data; X.cols = crop.h*crop.w*crop.c; } return X; } box_label *read_boxes(char *filename, int *n) { FILE *file = fopen(filename, "r"); if(!file) file_error(filename); float x, y, h, w; int id; int count = 0; int size = 64; box_label *boxes = calloc(size, sizeof(box_label)); while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ if(count == size) { size = size * 2; boxes = realloc(boxes, size*sizeof(box_label)); } boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = rand()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 30; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .005 || h < .005) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } void load_rle(image im, int *rle, int n) { int count = 0; int curr = 0; int i,j; for(i = 0; i < n; ++i){ for(j = 0; j < rle[i]; ++j){ im.data[count++] = curr; } curr = 1 - curr; } for(; count < im.h*im.w*im.c; ++count){ im.data[count] = curr; } } void or_image(image src, image dest, int c) { int i; for(i = 0; i < src.w*src.h; ++i){ if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1; } } void exclusive_image(image src) { int k, j, i; int s = src.w*src.h; for(k = 0; k < src.c-1; ++k){ for(i = 0; i < s; ++i){ if (src.data[k*s + i]){ for(j = k+1; j < src.c; ++j){ src.data[j*s + i] = 0; } } } } } box bound_image(image im) { int x,y; int minx = im.w; int miny = im.h; int maxx = 0; int maxy = 0; for(y = 0; y < im.h; ++y){ for(x = 0; x < im.w; ++x){ if(im.data[y*im.w + x]){ minx = (x < minx) ? x : minx; miny = (y < miny) ? y : miny; maxx = (x > maxx) ? x : maxx; maxy = (y > maxy) ? y : maxy; } } } box b = {minx, miny, maxx-minx + 1, maxy-miny + 1}; //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); return b; } void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; int i = 0; image part = make_image(w, h, 1); while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if(flip) flip_image(sized); box b = bound_image(sized); if(b.w > 0){ image crop = crop_image(sized, b.x, b.y, b.w, b.h); image mask = resize_image(crop, mw, mh); truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w; truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h; truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w; truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h; int j; for(j = 0; j < mw*mh; ++j){ truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j]; } truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id; free_image(crop); free_image(mask); ++i; } free_image(sized); free(rle); } fclose(file); free_image(part); } void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, "raw", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if(count > num_boxes) count = num_boxes; float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if ((w < .001 || h < .001)) continue; truth[i*5+0] = x; truth[i*5+1] = y; truth[i*5+2] = w; truth[i*5+3] = h; truth[i*5+4] = id; } free(boxes); } #define NUMCHARS 37 void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; } } if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path); } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_regression_labels_paths(char **paths, int n) { matrix y = make_matrix(n, 1); int i; for(i = 0; i < n; ++i){ char labelpath[4096]; find_replace(paths[i], "images", "targets", labelpath); find_replace(labelpath, "JPEGImages", "targets", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); fscanf(file, "%f", &(y.vals[i][0])); fclose(file); } return y; } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth(paths[i], labels, k, y.vals[i]); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "imgs", "labels", label); find_replace(label, "_iconl.jpeg", ".txt", label); FILE *file = fopen(label, "r"); if(!file){ find_replace(label, "labels", "labels2", label); file = fopen(label, "r"); if(!file) continue; } ++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } printf("%d/%d\n", count, n); return y; } char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } image get_segmentation_image(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } image get_segmentation_image2(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes+1); int i; for(i = 0; i < w*h; ++i){ mask.data[w*h*classes + i] = 1; } FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); for(i = 0; i < w*h; ++i){ if(part.data[i]) mask.data[w*h*classes + i] = 0; } free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y.rows = n; d.y.cols = h*w*classes/div/div; d.y.vals = calloc(d.X.rows, sizeof(float*)); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes); //image mask = make_image(orig.w, orig.h, classes+1); image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect); if(flip) flip_image(sized_m); d.y.vals[i] = sized_m.data; free_image(orig); free_image(mask); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, (coords+1)*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = calloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = rand()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*30; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, 5*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); image sized = make_image(w, h, orig.c); fill_image(sized, .5); float dw = jitter * orig.w; float dh = jitter * orig.h; float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh)); float scale = rand_uniform(.25, 2); float nw, nh; if(new_ar < 1){ nh = scale * h; nw = nh * new_ar; } else { nw = scale * w; nh = nw / new_ar; } float dx = rand_uniform(0, w - nw); float dy = rand_uniform(0, h - nh); place_image(orig, nw, nh, dx, dy, sized); random_distort_image(sized, hue, saturation, exposure); int flip = rand()%2; if(flip) flip_image(sized); d.X.vals[i] = sized.data; fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h); free_image(orig); } free(random_paths); return d; } void *load_thread(void *ptr) { //printf("Loading data: %d\n", rand()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == REGRESSION_DATA){ *a.d = load_data_regression(a.paths, a.n, a.m, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == INSTANCE_DATA){ *a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == SEGMENTATION_DATA){ *a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = resize_image(*(a.im), a.w, a.h); } else if (a.type == LETTERBOX_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } void *load_threads(void *ptr) { int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data *buffers = calloc(args.threads, sizeof(data)); pthread_t *threads = calloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } void load_data_blocking(load_args args) { struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; load_thread(ptr); } pthread_t load_data(load_args args) { pthread_t thread; struct load_args *ptr = calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = calloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = calloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = rand()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_regression(char **paths, int n, int m, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_regression_labels_paths(paths, n); if(m) free(paths); return d; } data select_data(data *orig, int *inds) { data d = {0}; d.shallow = 1; d.w = orig[0].w; d.h = orig[0].h; d.X.rows = orig[0].X.rows; d.y.rows = orig[0].X.rows; d.X.cols = orig[0].X.cols; d.y.cols = orig[0].y.cols; d.X.vals = calloc(orig[0].X.rows, sizeof(float *)); d.y.vals = calloc(orig[0].y.rows, sizeof(float *)); int i; for(i = 0; i < d.X.rows; ++i){ d.X.vals[i] = orig[inds[i]].X.vals[i]; d.y.vals[i] = orig[inds[i]].y.vals[i]; } return d; } data *tile_data(data orig, int divs, int size) { data *ds = calloc(divs*divs, sizeof(data)); int i, j; #pragma omp parallel for for(i = 0; i < divs*divs; ++i){ data d; d.shallow = 0; d.w = orig.w/divs * size; d.h = orig.h/divs * size; d.X.rows = orig.X.rows; d.X.cols = d.w*d.h*3; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(j = 0; j < orig.X.rows; ++j){ int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2; int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2; image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]); d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data; } ds[i] = d; } return ds; } data resize_data(data orig, int w, int h) { data d = {0}; d.shallow = 0; d.w = w; d.h = h; int i; d.X.rows = orig.X.rows; d.X.cols = w*h*3; d.X.vals = calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(i = 0; i < orig.X.rows; ++i){ image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]); d.X.vals[i] = resize_image(im, w, h).data; } return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.w=size; d.h=size; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center); d.y = load_labels_paths(paths, n, labels, k, hierarchy); if(m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = size; d.h = size; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = calloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); d.w = d1.w; d.h = d1.h; return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data new = concat_data(d[i], out); free_data(out); out = new; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = rand()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i+b*10000][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = rand()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } data copy_data(data d) { data c = {0}; c.w = d.w; c.h = d.h; c.shallow = 0; c.num_boxes = d.num_boxes; c.boxes = d.boxes; c.X = copy_matrix(d.X); c.y = copy_matrix(d.y); return c; } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = calloc(num, sizeof(float *)); r.y.vals = calloc(num, sizeof(float *)); int i; for(i = 0; i < num; ++i){ int index = rand()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data *split = calloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train; data test; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = calloc(train.X.rows, sizeof(float*)); test.X.vals = calloc(test.X.rows, sizeof(float*)); train.y.vals = calloc(train.y.rows, sizeof(float*)); test.y.vals = calloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; }
common.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <limits> #include <string> #include <algorithm> #include <cmath> #include <cstdint> #include <cstdio> #include <functional> #include <iomanip> #include <iterator> #include <memory> #include <sstream> #include <type_traits> #include <utility> #include <vector> #ifdef _MSC_VER #include "intrin.h" #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', '3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9', '4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9', '5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9', '6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9', '7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', '9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = static_cast<char>(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t #ifdef _MSC_VER buffer_len #endif ) { #ifdef _MSC_VER sprintf_s(buffer, buffer_len, "%.17g", value); #else sprintf(buffer, "%.17g", value); #endif } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t #ifdef _MSC_VER buf_len #endif ) const { #ifdef _MSC_VER sprintf_s(buffer, buf_len, "%g", value); #else sprintf(buffer, "%g", value); #endif } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK(strs.size() == static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<> inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << static_cast<int16_t>(strs[0]); for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << static_cast<int16_t>(strs[i]); } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformaton on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (size_t i = 0; i < input.size(); ++i) { ret.push_back(input.at(i).get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; for (size_t i = start; i < keys->size(); ++i) { arr.emplace_back(keys->at(i), values->at(i)); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { keys->at(i) = arr[i].first; values->at(i) = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) { std::vector<T*> ptr(data->size()); for (size_t i = 0; i < data->size(); ++i) { ptr[i] = data->at(i).data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (std::isnan(x)) { return 0.0; } else if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (std::isnan(x)) { return 0.0f; } else if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t> EmptyBitset(int n) { int size = n / 32; if (n % 32 != 0) ++size; return std::vector<uint32_t>(size); } template<typename T> inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) { int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec->size()) < i1 + 1) { vec->resize(i1 + 1, 0); } vec->at(i1) |= (1 << i2); } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } inline bool CheckASCII(const std::string& s) { for (auto c : s) { if (static_cast<unsigned char>(c) > 127) { return false; } } return true; } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
ThresholdFilter.h
/* * ErosionFilter.h * * Created on: 13.06.2016 * Author: Darius Malysiak */ #ifndef IMAGEPROCESSING_THRESHOLDFILTER_H_ #define IMAGEPROCESSING_THRESHOLDFILTER_H_ #include "../BaseObject.h" #include "../DataStructures/Matrix.h" #include "../DataStructures/Image.h" namespace Lazarus { template<typename T> class ThresholdFilter: public Lazarus::BaseObject { public: ThresholdFilter() { } virtual ~ThresholdFilter(){} /** * Will cut off all values outside (>,<) the range of 'limits', all values outside the given range will * be set to 'val'. * Returns the filtered image in case of success otherwise NULL. **/ Lazarus::Image<T>* filterImage( Lazarus::Image<T>* image, const Lazarus::ChannelLimits<T>& limits, T val ) { unsigned int image_width = image->getm_width(); unsigned int image_heigth = image->getm_height(); unsigned int channel_count = image->getm_channel_count(); Lazarus::Image<T>* output = new Lazarus::Image<T>( image_width, image_heigth, image->getm_data_alignment() ); #pragma omp parallel for for(unsigned int i=0; i<image_width; i++) { Lazarus::FastKTuple<T> new_color(channel_count); Lazarus::FastKTuple<T> color(channel_count); for(unsigned int j=0; j<image_heigth; j++) { image->getPixelFast(color,i,j); //over every color channel for(unsigned int c=0; c<channel_count; c++) { if( color[c] >= limits.m_min_values[c] && color[c] <= limits.m_max_values[c] ) { new_color.setElement(c,color[c]); } else { new_color.setElement(c,val); } } output->setPixelFast(&new_color,i,j); } } return output; } }; } #endif /* IMAGEPROCESSING_EROSIONFILTER_H_ */
convolution_3x3_pack1to4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p+1) * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); out1.fill(_bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p+1); for (int q=0; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00_0 = vld1q_f32(k0); float32x4_t _k01_0 = vld1q_f32(k0+4); float32x4_t _k02_0 = vld1q_f32(k0+8); float32x4_t _k10_0 = vld1q_f32(k0+12); float32x4_t _k11_0 = vld1q_f32(k0+16); float32x4_t _k12_0 = vld1q_f32(k0+20); float32x4_t _k20_0 = vld1q_f32(k0+24); float32x4_t _k21_0 = vld1q_f32(k0+28); float32x4_t _k22_0 = vld1q_f32(k0+32); float32x4_t _k00_1 = vld1q_f32(k1); float32x4_t _k01_1 = vld1q_f32(k1+4); float32x4_t _k02_1 = vld1q_f32(k1+8); float32x4_t _k10_1 = vld1q_f32(k1+12); float32x4_t _k11_1 = vld1q_f32(k1+16); float32x4_t _k12_1 = vld1q_f32(k1+20); float32x4_t _k20_1 = vld1q_f32(k1+24); float32x4_t _k21_1 = vld1q_f32(k1+28); float32x4_t _k22_1 = vld1q_f32(k1+32); int i = 0; for (; i < outh; i++) { int j = 0; for (; j+3<outw; j+=4) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" "ld1 {v1.2s}, [%2] \n" "fmla v24.4s, %10.4s, v0.s[0] \n" "fmla v25.4s, %10.4s, v0.s[1] \n" "fmla v26.4s, %10.4s, v0.s[2] \n" "fmla v27.4s, %10.4s, v0.s[3] \n" "fmla v28.4s, %19.4s, v0.s[0] \n" "fmla v29.4s, %19.4s, v0.s[1] \n" "fmla v30.4s, %19.4s, v0.s[2] \n" "fmla v31.4s, %19.4s, v0.s[3] \n" "fmla v24.4s, %11.4s, v0.s[1] \n" "fmla v25.4s, %11.4s, v0.s[2] \n" "fmla v26.4s, %11.4s, v0.s[3] \n" "fmla v27.4s, %11.4s, v1.s[0] \n" "fmla v28.4s, %20.4s, v0.s[1] \n" "fmla v29.4s, %20.4s, v0.s[2] \n" "fmla v30.4s, %20.4s, v0.s[3] \n" "fmla v31.4s, %20.4s, v1.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v2.4s}, [%3], #16 \n" "ld1 {v3.2s}, [%3] \n" "fmla v24.4s, %12.4s, v0.s[2] \n" "fmla v25.4s, %12.4s, v0.s[3] \n" "fmla v26.4s, %12.4s, v1.s[0] \n" "fmla v27.4s, %12.4s, v1.s[1] \n" "fmla v28.4s, %21.4s, v0.s[2] \n" "fmla v29.4s, %21.4s, v0.s[3] \n" "fmla v30.4s, %21.4s, v1.s[0] \n" "fmla v31.4s, %21.4s, v1.s[1] \n" "fmla v24.4s, %13.4s, v2.s[0] \n" "fmla v25.4s, %13.4s, v2.s[1] \n" "fmla v26.4s, %13.4s, v2.s[2] \n" "fmla v27.4s, %13.4s, v2.s[3] \n" "fmla v28.4s, %22.4s, v2.s[0] \n" "fmla v29.4s, %22.4s, v2.s[1] \n" "fmla v30.4s, %22.4s, v2.s[2] \n" "fmla v31.4s, %22.4s, v2.s[3] \n" "fmla v24.4s, %14.4s, v2.s[1] \n" "fmla v25.4s, %14.4s, v2.s[2] \n" "fmla v26.4s, %14.4s, v2.s[3] \n" "fmla v27.4s, %14.4s, v3.s[0] \n" "fmla v28.4s, %23.4s, v2.s[1] \n" "fmla v29.4s, %23.4s, v2.s[2] \n" "fmla v30.4s, %23.4s, v2.s[3] \n" "fmla v31.4s, %23.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4s}, [%4], #16 \n" "ld1 {v1.2s}, [%4] \n" "fmla v24.4s, %15.4s, v2.s[2] \n" "fmla v25.4s, %15.4s, v2.s[3] \n" "fmla v26.4s, %15.4s, v3.s[0] \n" "fmla v27.4s, %15.4s, v3.s[1] \n" "fmla v28.4s, %24.4s, v2.s[2] \n" "fmla v29.4s, %24.4s, v2.s[3] \n" "fmla v30.4s, %24.4s, v3.s[0] \n" "fmla v31.4s, %24.4s, v3.s[1] \n" "fmla v24.4s, %16.4s, v0.s[0] \n" "fmla v25.4s, %16.4s, v0.s[1] \n" "fmla v26.4s, %16.4s, v0.s[2] \n" "fmla v27.4s, %16.4s, v0.s[3] \n" "fmla v28.4s, %25.4s, v0.s[0] \n" "fmla v29.4s, %25.4s, v0.s[1] \n" "fmla v30.4s, %25.4s, v0.s[2] \n" "fmla v31.4s, %25.4s, v0.s[3] \n" "fmla v24.4s, %17.4s, v0.s[1] \n" "fmla v25.4s, %17.4s, v0.s[2] \n" "fmla v26.4s, %17.4s, v0.s[3] \n" "fmla v27.4s, %17.4s, v1.s[0] \n" "fmla v28.4s, %26.4s, v0.s[1] \n" "fmla v29.4s, %26.4s, v0.s[2] \n" "fmla v30.4s, %26.4s, v0.s[3] \n" "fmla v31.4s, %26.4s, v1.s[0] \n" "fmla v24.4s, %18.4s, v0.s[2] \n" "fmla v25.4s, %18.4s, v0.s[3] \n" "fmla v26.4s, %18.4s, v1.s[0] \n" "fmla v27.4s, %18.4s, v1.s[1] \n" "fmla v28.4s, %27.4s, v0.s[2] \n" "fmla v29.4s, %27.4s, v0.s[3] \n" "fmla v30.4s, %27.4s, v1.s[0] \n" "fmla v31.4s, %27.4s, v1.s[1] \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%1], #64 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } for (; j+1<outw; j+=2) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v24.4s, v25.4s}, [%0] \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v26.4s, v27.4s}, [%1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2] \n" "add %2, %2, #8 \n" "fmla v24.4s, %10.4s, v0.s[0] \n" "fmla v25.4s, %10.4s, v0.s[1] \n" "fmla v26.4s, %19.4s, v0.s[0] \n" "fmla v27.4s, %19.4s, v0.s[1] \n" "fmla v24.4s, %11.4s, v0.s[1] \n" "fmla v25.4s, %11.4s, v0.s[2] \n" "fmla v26.4s, %20.4s, v0.s[1] \n" "fmla v27.4s, %20.4s, v0.s[2] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v1.4s}, [%3] \n" "fmla v24.4s, %12.4s, v0.s[2] \n" "fmla v25.4s, %12.4s, v0.s[3] \n" "fmla v26.4s, %21.4s, v0.s[2] \n" "fmla v27.4s, %21.4s, v0.s[3] \n" "add %3, %3, #8 \n" "fmla v24.4s, %13.4s, v1.s[0] \n" "fmla v25.4s, %13.4s, v1.s[1] \n" "fmla v26.4s, %22.4s, v1.s[0] \n" "fmla v27.4s, %22.4s, v1.s[1] \n" "fmla v24.4s, %14.4s, v1.s[1] \n" "fmla v25.4s, %14.4s, v1.s[2] \n" "fmla v26.4s, %23.4s, v1.s[1] \n" "fmla v27.4s, %23.4s, v1.s[2] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v0.4s}, [%4] \n" "fmla v24.4s, %15.4s, v1.s[2] \n" "fmla v25.4s, %15.4s, v1.s[3] \n" "fmla v26.4s, %24.4s, v1.s[2] \n" "fmla v27.4s, %24.4s, v1.s[3] \n" "add %4, %4, #8 \n" "fmla v24.4s, %16.4s, v0.s[0] \n" "fmla v25.4s, %16.4s, v0.s[1] \n" "fmla v26.4s, %25.4s, v0.s[0] \n" "fmla v27.4s, %25.4s, v0.s[1] \n" "fmla v24.4s, %17.4s, v0.s[1] \n" "fmla v25.4s, %17.4s, v0.s[2] \n" "fmla v26.4s, %26.4s, v0.s[1] \n" "fmla v27.4s, %26.4s, v0.s[2] \n" "fmla v24.4s, %18.4s, v0.s[2] \n" "fmla v25.4s, %18.4s, v0.s[3] \n" "fmla v26.4s, %27.4s, v0.s[2] \n" "fmla v27.4s, %27.4s, v0.s[3] \n" "st1 {v24.4s, v25.4s}, [%0], #32 \n" "st1 {v26.4s, v27.4s}, [%1], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(outptr0), "1"(outptr1), "2"(r0), "3"(r1), "4"(r2), "w"(_k00_0), // %10 "w"(_k01_0), // %11 "w"(_k02_0), // %12 "w"(_k10_0), // %13 "w"(_k11_0), // %14 "w"(_k12_0), // %15 "w"(_k20_0), // %16 "w"(_k21_0), // %17 "w"(_k22_0), // %18 "w"(_k00_1), // %19 "w"(_k01_1), // %20 "w"(_k02_1), // %21 "w"(_k10_1), // %22 "w"(_k11_1), // %23 "w"(_k12_1), // %24 "w"(_k20_1), // %25 "w"(_k21_1), // %26 "w"(_k22_1) // %27 : "memory", "v0", "v1", "v24", "v25", "v26", "v27" ); } for (; j<outw; j++) { float32x4_t _sum00 = vld1q_f32(outptr0); float32x4_t _sum10 = vld1q_f32(outptr1); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); _sum00 = vfmaq_laneq_f32(_sum00, _k00_0, _r0, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k01_0, _r0, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k02_0, _r0, 2); _sum00 = vfmaq_laneq_f32(_sum00, _k10_0, _r1, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k11_0, _r1, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k12_0, _r1, 2); _sum00 = vfmaq_laneq_f32(_sum00, _k20_0, _r2, 0); _sum00 = vfmaq_laneq_f32(_sum00, _k21_0, _r2, 1); _sum00 = vfmaq_laneq_f32(_sum00, _k22_0, _r2, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k00_1, _r0, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k01_1, _r0, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k02_1, _r0, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k10_1, _r1, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k11_1, _r1, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k12_1, _r1, 2); _sum10 = vfmaq_laneq_f32(_sum10, _k20_1, _r2, 0); _sum10 = vfmaq_laneq_f32(_sum10, _k21_1, _r2, 1); _sum10 = vfmaq_laneq_f32(_sum10, _k22_1, _r2, 2); vst1q_f32(outptr0, _sum00); vst1q_f32(outptr1, _sum10); r0 += 1; r1 += 1; r2 += 1; outptr0 += 4; outptr1 += 4; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9*4; k1 += 9*4; } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0 = top_blob.channel(p); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); const float* k0 = kernel.channel(p); for (int q=0; q<inch; q++) { float* outptr0 = out0.row(0); const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0+4); float32x4_t _k02 = vld1q_f32(k0+8); float32x4_t _k10 = vld1q_f32(k0+12); float32x4_t _k11 = vld1q_f32(k0+16); float32x4_t _k12 = vld1q_f32(k0+20); float32x4_t _k20 = vld1q_f32(k0+24); float32x4_t _k21 = vld1q_f32(k0+28); float32x4_t _k22 = vld1q_f32(k0+32); int i = 0; for (; i < outh; i++) { int j = 0; #if __aarch64__ for (; j+7<outw; j+=8) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.4s, v1.4s}, [%1], #32 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0] \n" "fmla v24.4s, %8.4s, v0.s[0] \n" "fmla v25.4s, %8.4s, v0.s[1] \n" "fmla v26.4s, %8.4s, v0.s[2] \n" "fmla v27.4s, %8.4s, v0.s[3] \n" "fmla v28.4s, %8.4s, v1.s[0] \n" "fmla v29.4s, %8.4s, v1.s[1] \n" "fmla v30.4s, %8.4s, v1.s[2] \n" "fmla v31.4s, %8.4s, v1.s[3] \n" "ld1 {v2.2s}, [%1] \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "fmla v26.4s, %9.4s, v0.s[3] \n" "fmla v27.4s, %9.4s, v1.s[0] \n" "fmla v28.4s, %9.4s, v1.s[1] \n" "fmla v29.4s, %9.4s, v1.s[2] \n" "fmla v30.4s, %9.4s, v1.s[3] \n" "fmla v31.4s, %9.4s, v2.s[0] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v4.4s, v5.4s}, [%2], #32 \n" "fmla v24.4s, %10.4s, v0.s[2] \n" "fmla v25.4s, %10.4s, v0.s[3] \n" "fmla v26.4s, %10.4s, v1.s[0] \n" "fmla v27.4s, %10.4s, v1.s[1] \n" "fmla v28.4s, %10.4s, v1.s[2] \n" "fmla v29.4s, %10.4s, v1.s[3] \n" "fmla v30.4s, %10.4s, v2.s[0] \n" "fmla v31.4s, %10.4s, v2.s[1] \n" "ld1 {v2.2s}, [%2] \n" "fmla v24.4s, %11.4s, v4.s[0] \n" "fmla v25.4s, %11.4s, v4.s[1] \n" "fmla v26.4s, %11.4s, v4.s[2] \n" "fmla v27.4s, %11.4s, v4.s[3] \n" "fmla v28.4s, %11.4s, v5.s[0] \n" "fmla v29.4s, %11.4s, v5.s[1] \n" "fmla v30.4s, %11.4s, v5.s[2] \n" "fmla v31.4s, %11.4s, v5.s[3] \n" "fmla v24.4s, %12.4s, v4.s[1] \n" "fmla v25.4s, %12.4s, v4.s[2] \n" "fmla v26.4s, %12.4s, v4.s[3] \n" "fmla v27.4s, %12.4s, v5.s[0] \n" "fmla v28.4s, %12.4s, v5.s[1] \n" "fmla v29.4s, %12.4s, v5.s[2] \n" "fmla v30.4s, %12.4s, v5.s[3] \n" "fmla v31.4s, %12.4s, v2.s[0] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" "fmla v24.4s, %13.4s, v4.s[2] \n" "fmla v25.4s, %13.4s, v4.s[3] \n" "fmla v26.4s, %13.4s, v5.s[0] \n" "fmla v27.4s, %13.4s, v5.s[1] \n" "fmla v28.4s, %13.4s, v5.s[2] \n" "fmla v29.4s, %13.4s, v5.s[3] \n" "fmla v30.4s, %13.4s, v2.s[0] \n" "fmla v31.4s, %13.4s, v2.s[1] \n" "ld1 {v2.2s}, [%3] \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %14.4s, v0.s[2] \n" "fmla v27.4s, %14.4s, v0.s[3] \n" "fmla v28.4s, %14.4s, v1.s[0] \n" "fmla v29.4s, %14.4s, v1.s[1] \n" "fmla v30.4s, %14.4s, v1.s[2] \n" "fmla v31.4s, %14.4s, v1.s[3] \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %15.4s, v0.s[3] \n" "fmla v27.4s, %15.4s, v1.s[0] \n" "fmla v28.4s, %15.4s, v1.s[1] \n" "fmla v29.4s, %15.4s, v1.s[2] \n" "fmla v30.4s, %15.4s, v1.s[3] \n" "fmla v31.4s, %15.4s, v2.s[0] \n" "sub %0, %0, #64 \n" "fmla v24.4s, %16.4s, v0.s[2] \n" "fmla v25.4s, %16.4s, v0.s[3] \n" "fmla v26.4s, %16.4s, v1.s[0] \n" "fmla v27.4s, %16.4s, v1.s[1] \n" "fmla v28.4s, %16.4s, v1.s[2] \n" "fmla v29.4s, %16.4s, v1.s[3] \n" "fmla v30.4s, %16.4s, v2.s[0] \n" "fmla v31.4s, %16.4s, v2.s[1] \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v4", "v5", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); } #endif // __aarch64__ for (; j+3<outw; j+=4) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1], #16 \n" "fmla v24.4s, %8.4s, v0.s[0] \n" "fmla v25.4s, %8.4s, v0.s[1] \n" "fmla v26.4s, %8.4s, v0.s[2] \n" "fmla v27.4s, %8.4s, v0.s[3] \n" "ld1 {v1.2s}, [%1] \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "fmla v26.4s, %9.4s, v0.s[3] \n" "fmla v27.4s, %9.4s, v1.s[0] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v2.4s}, [%2], #16 \n" "fmla v24.4s, %10.4s, v0.s[2] \n" "fmla v25.4s, %10.4s, v0.s[3] \n" "fmla v26.4s, %10.4s, v1.s[0] \n" "fmla v27.4s, %10.4s, v1.s[1] \n" "ld1 {v3.2s}, [%2] \n" "fmla v24.4s, %11.4s, v2.s[0] \n" "fmla v25.4s, %11.4s, v2.s[1] \n" "fmla v26.4s, %11.4s, v2.s[2] \n" "fmla v27.4s, %11.4s, v2.s[3] \n" "fmla v24.4s, %12.4s, v2.s[1] \n" "fmla v25.4s, %12.4s, v2.s[2] \n" "fmla v26.4s, %12.4s, v2.s[3] \n" "fmla v27.4s, %12.4s, v3.s[0] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" "fmla v24.4s, %13.4s, v2.s[2] \n" "fmla v25.4s, %13.4s, v2.s[3] \n" "fmla v26.4s, %13.4s, v3.s[0] \n" "fmla v27.4s, %13.4s, v3.s[1] \n" "ld1 {v1.2s}, [%3] \n" "fmla v24.4s, %14.4s, v0.s[0] \n" "fmla v25.4s, %14.4s, v0.s[1] \n" "fmla v26.4s, %14.4s, v0.s[2] \n" "fmla v27.4s, %14.4s, v0.s[3] \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %15.4s, v0.s[3] \n" "fmla v27.4s, %15.4s, v1.s[0] \n" "fmla v24.4s, %16.4s, v0.s[2] \n" "fmla v25.4s, %16.4s, v0.s[3] \n" "fmla v26.4s, %16.4s, v1.s[0] \n" "fmla v27.4s, %16.4s, v1.s[1] \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%0, #512] \n" "vldm %0, {d24-d31} \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1]! \n" "vmla.f32 q12, %q8, d0[0] \n" "vmla.f32 q13, %q8, d0[1] \n" "vmla.f32 q14, %q8, d1[0] \n" "vmla.f32 q15, %q8, d1[1] \n" "vld1.f32 {d2}, [%1] \n" "vmla.f32 q12, %q9, d0[1] \n" "vmla.f32 q13, %q9, d1[0] \n" "vmla.f32 q14, %q9, d1[1] \n" "vmla.f32 q15, %q9, d2[0] \n" "pld [%2, #128] \n" "vld1.f32 {d4-d5}, [%2]! \n" "vmla.f32 q12, %q10, d1[0] \n" "vmla.f32 q13, %q10, d1[1] \n" "vmla.f32 q14, %q10, d2[0] \n" "vmla.f32 q15, %q10, d2[1] \n" "vmla.f32 q12, %q11, d4[0] \n" "vmla.f32 q13, %q11, d4[1] \n" "vmla.f32 q14, %q11, d5[0] \n" "vmla.f32 q15, %q11, d5[1] \n" "vld1.f32 {d3}, [%2] \n" "vmla.f32 q12, %q12, d4[1] \n" "vmla.f32 q13, %q12, d5[0] \n" "vmla.f32 q14, %q12, d5[1] \n" "vmla.f32 q15, %q12, d3[0] \n" "pld [%3, #128] \n" "vld1.f32 {d0-d1}, [%3]! \n" "vmla.f32 q12, %q13, d5[0] \n" "vmla.f32 q13, %q13, d5[1] \n" "vmla.f32 q14, %q13, d3[0] \n" "vmla.f32 q15, %q13, d3[1] \n" "vmla.f32 q12, %q14, d0[0] \n" "vmla.f32 q13, %q14, d0[1] \n" "vmla.f32 q14, %q14, d1[0] \n" "vmla.f32 q15, %q14, d1[1] \n" "vld1.f32 {d2}, [%3] \n" "vmla.f32 q12, %q15, d0[1] \n" "vmla.f32 q13, %q15, d1[0] \n" "vmla.f32 q14, %q15, d1[1] \n" "vmla.f32 q15, %q15, d2[0] \n" "vmla.f32 q12, %q16, d1[0] \n" "vmla.f32 q13, %q16, d1[1] \n" "vmla.f32 q14, %q16, d2[0] \n" "vmla.f32 q15, %q16, d2[1] \n" "vstm %0!, {d24-d31} \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q2", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j+1<outw; j+=2) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v24.4s, v25.4s}, [%0] \n" "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.4s}, [%1] \n" "fmul v26.4s, %8.4s, v0.s[0] \n" "fmul v27.4s, %8.4s, v0.s[1] \n" "fmla v24.4s, %9.4s, v0.s[1] \n" "fmla v25.4s, %9.4s, v0.s[2] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v1.4s}, [%2] \n" "fmla v26.4s, %10.4s, v0.s[2] \n" "fmla v27.4s, %10.4s, v0.s[3] \n" "fmla v24.4s, %11.4s, v1.s[0] \n" "fmla v25.4s, %11.4s, v1.s[1] \n" "add %1, %1, #8 \n" "fmla v26.4s, %12.4s, v1.s[1] \n" "fmla v27.4s, %12.4s, v1.s[2] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3] \n" "fmla v24.4s, %13.4s, v1.s[2] \n" "fmla v25.4s, %13.4s, v1.s[3] \n" "fmla v26.4s, %14.4s, v0.s[0] \n" "fmla v27.4s, %14.4s, v0.s[1] \n" "add %2, %2, #8 \n" "fmla v24.4s, %15.4s, v0.s[1] \n" "fmla v25.4s, %15.4s, v0.s[2] \n" "fmla v26.4s, %16.4s, v0.s[2] \n" "fmla v27.4s, %16.4s, v0.s[3] \n" "add %3, %3, #8 \n" "fadd v24.4s, v24.4s, v26.4s \n" "fadd v25.4s, v25.4s, v27.4s \n" "st1 {v24.4s, v25.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "v0", "v1", "v24", "v25", "v26", "v27" ); #else // __aarch64__ asm volatile( "pld [%0, #256] \n" "vld1.f32 {d24-d27}, [%0 :128] \n" "pld [%1, #128] \n" "vld1.f32 {d0-d1}, [%1] \n" "vmul.f32 q14, %q8, d0[0] \n" "vmul.f32 q15, %q8, d0[1] \n" "vmla.f32 q12, %q9, d0[1] \n" "vmla.f32 q13, %q9, d1[0] \n" "pld [%2, #128] \n" "vld1.f32 {d2-d3}, [%2] \n" "vmla.f32 q14, %q10, d1[0] \n" "vmla.f32 q15, %q10, d1[1] \n" "vmla.f32 q12, %q11, d2[0] \n" "vmla.f32 q13, %q11, d2[1] \n" "add %1, %1, #8 \n" "vmla.f32 q14, %q12, d2[1] \n" "vmla.f32 q15, %q12, d3[0] \n" "pld [%3, #128] \n" "vld1.f32 {d0-d1}, [%3] \n" "vmla.f32 q12, %q13, d3[0] \n" "vmla.f32 q13, %q13, d3[1] \n" "vmla.f32 q14, %q14, d0[0] \n" "vmla.f32 q15, %q14, d0[1] \n" "add %2, %2, #8 \n" "vmla.f32 q12, %q15, d0[1] \n" "vmla.f32 q13, %q15, d1[0] \n" "vmla.f32 q14, %q16, d1[0] \n" "vmla.f32 q15, %q16, d1[1] \n" "add %3, %3, #8 \n" "vadd.f32 q12, q12, q14 \n" "vadd.f32 q13, q13, q15 \n" "vst1.f32 {d24-d27}, [%0 :128]! \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2) // %3 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "w"(_k00), // %8 "w"(_k01), // %9 "w"(_k02), // %10 "w"(_k10), // %11 "w"(_k11), // %12 "w"(_k12), // %13 "w"(_k20), // %14 "w"(_k21), // %15 "w"(_k22) // %16 : "memory", "q0", "q1", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ } for (; j<outw; j++) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); #if __aarch64__ _sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2); #else _sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1); _sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1); _sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0); _sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1); _sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0); #endif vst1q_f32(outptr0, _sum0); r0 += 1; r1 += 1; r2 += 1; outptr0 += 4; } r0 += 2; r1 += 2; r2 += 2; } k0 += 9*4; } } } static void conv3x3s2_pack1to4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = w - 2*outw + w; const float* bias = _bias; int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 2; Mat out0 = top_blob.channel(p); Mat out1 = top_blob.channel(p+1); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); float32x4_t _bias1 = bias ? vld1q_f32((const float*)bias + (p+1) * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); out1.fill(_bias1); const float* k0 = kernel.channel(p); const float* k1 = kernel.channel(p+1); for (int q=0; q<inch; q++) { float* outptr0 = out0; float* outptr1 = out1; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00_0 = vld1q_f32(k0); float32x4_t _k01_0 = vld1q_f32(k0+4); float32x4_t _k02_0 = vld1q_f32(k0+8); float32x4_t _k10_0 = vld1q_f32(k0+12); float32x4_t _k11_0 = vld1q_f32(k0+16); float32x4_t _k12_0 = vld1q_f32(k0+20); float32x4_t _k20_0 = vld1q_f32(k0+24); float32x4_t _k21_0 = vld1q_f32(k0+28); float32x4_t _k22_0 = vld1q_f32(k0+32); float32x4_t _k00_1 = vld1q_f32(k1); float32x4_t _k01_1 = vld1q_f32(k1+4); float32x4_t _k02_1 = vld1q_f32(k1+8); float32x4_t _k10_1 = vld1q_f32(k1+12); float32x4_t _k11_1 = vld1q_f32(k1+16); float32x4_t _k12_1 = vld1q_f32(k1+20); float32x4_t _k20_1 = vld1q_f32(k1+24); float32x4_t _k21_1 = vld1q_f32(k1+28); float32x4_t _k22_1 = vld1q_f32(k1+32); int i = 0; for (; i < outh; i++) { int nn = outw >> 2; int remain = outw & 3; if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1] \n"// sum0 // r0 "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" "ld1r {v4.4s}, [%3] \n" "fmla v6.4s, %12.4s, v0.s[0] \n" "fmla v7.4s, %12.4s, v0.s[2] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2] \n"// sum1 "fmla v8.4s, %12.4s, v1.s[0] \n" "fmla v9.4s, %12.4s, v1.s[2] \n" "fmla v10.4s, %21.4s, v0.s[0] \n" "fmla v11.4s, %21.4s, v0.s[2] \n" "fmla v12.4s, %21.4s, v1.s[0] \n" "fmla v13.4s, %21.4s, v1.s[2] \n" "fmla v6.4s, %13.4s, v0.s[1] \n" "fmla v7.4s, %13.4s, v0.s[3] \n" "fmla v8.4s, %13.4s, v1.s[1] \n" "fmla v9.4s, %13.4s, v1.s[3] \n" "fmla v10.4s, %22.4s, v0.s[1] \n" "fmla v11.4s, %22.4s, v0.s[3] \n" "fmla v12.4s, %22.4s, v1.s[1] \n" "fmla v13.4s, %22.4s, v1.s[3] \n" // r1 "prfm pldl1keep, [%4, #256] \n" "ld1 {v2.4s, v3.4s}, [%4], #32 \n" "ld1r {v5.4s}, [%4] \n" "fmla v6.4s, %14.4s, v0.s[2] \n" "fmla v7.4s, %14.4s, v1.s[0] \n" "fmla v8.4s, %14.4s, v1.s[2] \n" "fmla v9.4s, %14.4s, v4.s[0] \n" "fmla v10.4s, %23.4s, v0.s[2] \n" "fmla v11.4s, %23.4s, v1.s[0] \n" "fmla v12.4s, %23.4s, v1.s[2] \n" "fmla v13.4s, %23.4s, v4.s[0] \n" "fmla v6.4s, %15.4s, v2.s[0] \n" "fmla v7.4s, %15.4s, v2.s[2] \n" "fmla v8.4s, %15.4s, v3.s[0] \n" "fmla v9.4s, %15.4s, v3.s[2] \n" "fmla v10.4s, %24.4s, v2.s[0] \n" "fmla v11.4s, %24.4s, v2.s[2] \n" "fmla v12.4s, %24.4s, v3.s[0] \n" "fmla v13.4s, %24.4s, v3.s[2] \n" "fmla v6.4s, %16.4s, v2.s[1] \n" "fmla v7.4s, %16.4s, v2.s[3] \n" "fmla v8.4s, %16.4s, v3.s[1] \n" "fmla v9.4s, %16.4s, v3.s[3] \n" "fmla v10.4s, %25.4s, v2.s[1] \n" "fmla v11.4s, %25.4s, v2.s[3] \n" "fmla v12.4s, %25.4s, v3.s[1] \n" "fmla v13.4s, %25.4s, v3.s[3] \n" // r2 "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.4s, v1.4s}, [%5], #32 \n" "ld1r {v4.4s}, [%5] \n" "fmla v6.4s, %17.4s, v2.s[2] \n" "fmla v7.4s, %17.4s, v3.s[0] \n" "fmla v8.4s, %17.4s, v3.s[2] \n" "fmla v9.4s, %17.4s, v5.s[0] \n" "fmla v10.4s, %26.4s, v2.s[2] \n" "fmla v11.4s, %26.4s, v3.s[0] \n" "fmla v12.4s, %26.4s, v3.s[2] \n" "fmla v13.4s, %26.4s, v5.s[0] \n" "fmla v6.4s, %18.4s, v0.s[0] \n" "fmla v7.4s, %18.4s, v0.s[2] \n" "fmla v8.4s, %18.4s, v1.s[0] \n" "fmla v9.4s, %18.4s, v1.s[2] \n" "fmla v10.4s, %27.4s, v0.s[0] \n" "fmla v11.4s, %27.4s, v0.s[2] \n" "fmla v12.4s, %27.4s, v1.s[0] \n" "fmla v13.4s, %27.4s, v1.s[2] \n" "fmla v6.4s, %19.4s, v0.s[1] \n" "fmla v7.4s, %19.4s, v0.s[3] \n" "fmla v8.4s, %19.4s, v1.s[1] \n" "fmla v9.4s, %19.4s, v1.s[3] \n" "fmla v10.4s, %28.4s, v0.s[1] \n" "fmla v11.4s, %28.4s, v0.s[3] \n" "fmla v12.4s, %28.4s, v1.s[1] \n" "fmla v13.4s, %28.4s, v1.s[3] \n" "fmla v6.4s, %20.4s, v0.s[2] \n" "fmla v7.4s, %20.4s, v1.s[0] \n" "fmla v8.4s, %20.4s, v1.s[2] \n" "fmla v9.4s, %20.4s, v4.s[0] \n" "fmla v10.4s, %29.4s, v0.s[2] \n" "fmla v11.4s, %29.4s, v1.s[0] \n" "fmla v12.4s, %29.4s, v1.s[2] \n" "fmla v13.4s, %29.4s, v4.s[0] \n" "subs %w0, %w0, #1 \n" "st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n" "st1 {v10.4s, v11.4s, v12.4s, v13.4s}, [%2], #64 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(r0), // %3 "=r"(r1), // %4 "=r"(r2) // %5 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(r0), "4"(r1), "5"(r2), "w"(_k00_0), // %12 "w"(_k01_0), // %13 "w"(_k02_0), // %14 "w"(_k10_0), // %15 "w"(_k11_0), // %16 "w"(_k12_0), // %17 "w"(_k20_0), // %18 "w"(_k21_0), // %19 "w"(_k22_0), // %20 "w"(_k00_1), // %21 "w"(_k01_1), // %22 "w"(_k02_1), // %23 "w"(_k10_1), // %24 "w"(_k11_1), // %25 "w"(_k12_1), // %26 "w"(_k20_1), // %27 "w"(_k21_1), // %28 "w"(_k22_1) // %29 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13" ); } for (; remain>0; remain--) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _sum1 = vld1q_f32(outptr1); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); _sum0 = vfmaq_laneq_f32(_sum0, _k00_0, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01_0, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02_0, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10_0, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11_0, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12_0, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20_0, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21_0, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22_0, _r2, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k00_1, _r0, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k01_1, _r0, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k02_1, _r0, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k10_1, _r1, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k11_1, _r1, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k12_1, _r1, 2); _sum1 = vfmaq_laneq_f32(_sum1, _k20_1, _r2, 0); _sum1 = vfmaq_laneq_f32(_sum1, _k21_1, _r2, 1); _sum1 = vfmaq_laneq_f32(_sum1, _k22_1, _r2, 2); vst1q_f32(outptr0, _sum0); vst1q_f32(outptr1, _sum1); r0 += 2; r1 += 2; r2 += 2; outptr0 += 4; outptr1 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9*4; k1 += 9*4; } } #endif // __ARM_NEON && __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0 = top_blob.channel(p); float32x4_t _bias0 = bias ? vld1q_f32((const float*)bias + p * 4) : vdupq_n_f32(0.f); out0.fill(_bias0); const float* k0 = kernel.channel(p); for (int q=0; q<inch; q++) { float* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const float* r0 = img0.row(0); const float* r1 = img0.row(1); const float* r2 = img0.row(2); float32x4_t _k00 = vld1q_f32(k0); float32x4_t _k01 = vld1q_f32(k0+4); float32x4_t _k02 = vld1q_f32(k0+8); float32x4_t _k10 = vld1q_f32(k0+12); float32x4_t _k11 = vld1q_f32(k0+16); float32x4_t _k12 = vld1q_f32(k0+20); float32x4_t _k20 = vld1q_f32(k0+24); float32x4_t _k21 = vld1q_f32(k0+28); float32x4_t _k22 = vld1q_f32(k0+32); int i = 0; for (; i < outh; i++) { int nn = outw >> 2; int remain = outw & 3; #if __aarch64__ if (nn > 0) { asm volatile( "0: \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1] \n"// sum0 // r0 "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" "ld1r {v4.4s}, [%2] \n" "fmla v6.4s, %10.4s, v0.s[0] \n" "fmla v7.4s, %10.4s, v0.s[2] \n" "fmla v8.4s, %10.4s, v1.s[0] \n" "fmla v9.4s, %10.4s, v1.s[2] \n" "fmla v6.4s, %11.4s, v0.s[1] \n" "fmla v7.4s, %11.4s, v0.s[3] \n" "fmla v8.4s, %11.4s, v1.s[1] \n" "fmla v9.4s, %11.4s, v1.s[3] \n" // r1 "prfm pldl1keep, [%3, #256] \n" "ld1 {v2.4s, v3.4s}, [%3], #32 \n" "ld1r {v5.4s}, [%3] \n" "fmla v6.4s, %12.4s, v0.s[2] \n" "fmla v7.4s, %12.4s, v1.s[0] \n" "fmla v8.4s, %12.4s, v1.s[2] \n" "fmla v9.4s, %12.4s, v4.s[0] \n" "fmla v6.4s, %13.4s, v2.s[0] \n" "fmla v7.4s, %13.4s, v2.s[2] \n" "fmla v8.4s, %13.4s, v3.s[0] \n" "fmla v9.4s, %13.4s, v3.s[2] \n" "fmla v6.4s, %14.4s, v2.s[1] \n" "fmla v7.4s, %14.4s, v2.s[3] \n" "fmla v8.4s, %14.4s, v3.s[1] \n" "fmla v9.4s, %14.4s, v3.s[3] \n" // r2 "prfm pldl1keep, [%4, #256] \n" "ld1 {v0.4s, v1.4s}, [%4], #32 \n" "ld1r {v4.4s}, [%4] \n" "fmla v6.4s, %15.4s, v2.s[2] \n" "fmla v7.4s, %15.4s, v3.s[0] \n" "fmla v8.4s, %15.4s, v3.s[2] \n" "fmla v9.4s, %15.4s, v5.s[0] \n" "fmla v6.4s, %16.4s, v0.s[0] \n" "fmla v7.4s, %16.4s, v0.s[2] \n" "fmla v8.4s, %16.4s, v1.s[0] \n" "fmla v9.4s, %16.4s, v1.s[2] \n" "fmla v6.4s, %17.4s, v0.s[1] \n" "fmla v7.4s, %17.4s, v0.s[3] \n" "fmla v8.4s, %17.4s, v1.s[1] \n" "fmla v9.4s, %17.4s, v1.s[3] \n" "fmla v6.4s, %18.4s, v0.s[2] \n" "fmla v7.4s, %18.4s, v1.s[0] \n" "fmla v8.4s, %18.4s, v1.s[2] \n" "fmla v9.4s, %18.4s, v4.s[0] \n" "subs %w0, %w0, #1 \n" "st1 {v6.4s, v7.4s, v8.4s, v9.4s}, [%1], #64 \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9" ); } #else // __aarch64__ if (nn > 0) { asm volatile( "0: \n" "pld [%1, #512] \n" "vldm %1, {d0-d7} \n"// sum0 // r0 "pld [%2, #256] \n" "vld1.f32 {d8-d11}, [%2]! \n" "vld1.f32 {d12[]}, [%2] \n" "vmla.f32 q0, %q10, d8[0] \n" "vmla.f32 q1, %q10, d9[0] \n" "vmla.f32 q2, %q10, d10[0] \n" "vmla.f32 q3, %q10, d11[0] \n" "vmla.f32 q0, %q11, d8[1] \n" "vmla.f32 q1, %q11, d9[1] \n" "vmla.f32 q2, %q11, d10[1] \n" "vmla.f32 q3, %q11, d11[1] \n" "vmla.f32 q0, %q12, d9[0] \n" "vmla.f32 q1, %q12, d10[0] \n" "vmla.f32 q2, %q12, d11[0] \n" // r1 "pld [%3, #256] \n" "vld1.f32 {d8-d11}, [%3]! \n" "vld1.f32 {d13[]}, [%3] \n" "vmla.f32 q3, %q12, d12[0] \n" "vmla.f32 q0, %q13, d8[0] \n" "vmla.f32 q1, %q13, d9[0] \n" "vmla.f32 q2, %q13, d10[0] \n" "vmla.f32 q3, %q13, d11[0] \n" "vmla.f32 q0, %q14, d8[1] \n" "vmla.f32 q1, %q14, d9[1] \n" "vmla.f32 q2, %q14, d10[1] \n" "vmla.f32 q3, %q14, d11[1] \n" "vmla.f32 q0, %q15, d9[0] \n" "vmla.f32 q1, %q15, d10[0] \n" "vmla.f32 q2, %q15, d11[0] \n" // r2 "pld [%4, #256] \n" "vld1.f32 {d8-d11}, [%4]! \n" "vld1.f32 {d12[]}, [%4] \n" "vmla.f32 q3, %q15, d13[0] \n" "vmla.f32 q0, %q16, d8[0] \n" "vmla.f32 q1, %q16, d9[0] \n" "vmla.f32 q2, %q16, d10[0] \n" "vmla.f32 q3, %q16, d11[0] \n" "vmla.f32 q0, %q17, d8[1] \n" "vmla.f32 q1, %q17, d9[1] \n" "vmla.f32 q2, %q17, d10[1] \n" "vmla.f32 q3, %q17, d11[1] \n" "vmla.f32 q0, %q18, d9[0] \n" "vmla.f32 q1, %q18, d10[0] \n" "vmla.f32 q2, %q18, d11[0] \n" "vmla.f32 q3, %q18, d12[0] \n" "subs %0, %0, #1 \n" "vstm %1!, {d0-d7} \n" "bne 0b \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(r0), // %2 "=r"(r1), // %3 "=r"(r2) // %4 : "0"(nn), "1"(outptr0), "2"(r0), "3"(r1), "4"(r2), "w"(_k00), // %10 "w"(_k01), // %11 "w"(_k02), // %12 "w"(_k10), // %13 "w"(_k11), // %14 "w"(_k12), // %15 "w"(_k20), // %16 "w"(_k21), // %17 "w"(_k22) // %18 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6" ); } #endif // __aarch64__ for (; remain>0; remain--) { float32x4_t _sum0 = vld1q_f32(outptr0); float32x4_t _r0 = vld1q_f32(r0); float32x4_t _r1 = vld1q_f32(r1); float32x4_t _r2 = vld1q_f32(r2); #if __aarch64__ _sum0 = vfmaq_laneq_f32(_sum0, _k00, _r0, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k01, _r0, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k02, _r0, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k10, _r1, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k11, _r1, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k12, _r1, 2); _sum0 = vfmaq_laneq_f32(_sum0, _k20, _r2, 0); _sum0 = vfmaq_laneq_f32(_sum0, _k21, _r2, 1); _sum0 = vfmaq_laneq_f32(_sum0, _k22, _r2, 2); #else _sum0 = vmlaq_lane_f32(_sum0, _k00, vget_low_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k01, vget_low_f32(_r0), 1); _sum0 = vmlaq_lane_f32(_sum0, _k02, vget_high_f32(_r0), 0); _sum0 = vmlaq_lane_f32(_sum0, _k10, vget_low_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k11, vget_low_f32(_r1), 1); _sum0 = vmlaq_lane_f32(_sum0, _k12, vget_high_f32(_r1), 0); _sum0 = vmlaq_lane_f32(_sum0, _k20, vget_low_f32(_r2), 0); _sum0 = vmlaq_lane_f32(_sum0, _k21, vget_low_f32(_r2), 1); _sum0 = vmlaq_lane_f32(_sum0, _k22, vget_high_f32(_r2), 0); #endif vst1q_f32(outptr0, _sum0); r0 += 2; r1 += 2; r2 += 2; outptr0 += 4; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } k0 += 9*4; } } }
DRACC_OMP_021_Large_Data_Copy_no.c
/* Matrix Addition with large matrices, and copying them whole. All Matrices are to big to fit on the accelerator whole resulting in a segmentation fault. Executes in host fallback. */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <stdint.h> #define C 51200L int64_t *a; int64_t *b; int64_t *c; int init(){ for(int64_t i=0; i<C; i++){ for(int64_t j=0; j<C; j++){ a[j+i*C]=1; b[j+i*C]=1; c[j+i*C]=0; } } return 0; } int add(){ #pragma omp target map(to:a[0:C*C],b[0:C*C]) map(from:c[0:C*C]) device(0) { #pragma omp teams printf("Executed on host: %s\n",omp_is_initial_device() ? "true" : "false"); #pragma omp teams distribute parallel for collapse(2) for(int64_t i=0; i<C; i++){ for(int64_t j=0; j<C; j++){ c[j+i*C]=b[j+i*C] + a[j+i*C]; } } } return 0; } int check(){ bool test = false; for(int64_t i=0; i<C*C; i++){ if(c[i]!=2){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } int main(){ a = (int64_t *) malloc(C*C*sizeof(int64_t)); b = (int64_t *) malloc(C*C*sizeof(int64_t)); c = (int64_t *) malloc(C*C*sizeof(int64_t)); init(); add(); check(); free(a); free(b); free(c); return 0; }
DRB034-truedeplinear-var-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A linear expression is used as array subscription. Data race pair: a[2*i+1]@66:5 vs. a[i]@66:14 */ #include <stdlib.h> int main(int argc, char* argv[]) { int i; int len=2000; if (argc>1) len = atoi(argv[1]); int a[len]; #pragma omp parallel for private(i) for (i=0; i<len; i++) a[i]=i; for (i=0;i<len/2;i++) a[2*i+1]=a[i]+1; for (i=0; i<len; i++) printf("%d\n", a[i]); return 0; }
bucle-forModificado.c
/* gcc -fopenmp -O2 src/bucle-forModificado.c -o bin/bucle-forModificado ./bin/bucle-forModificado 8 */ #include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, char **argv) { int i, n = 9; if(argc < 2) { fprintf(stderr,"\n[ERROR] - Falta no iteraciones \n"); exit(-1); } n = atoi(argv[1]); #pragma omp parallel for for (i=0; i<n; i++) printf("thread %d ejecuta la iteración %d del bucle\n", omp_get_thread_num(),i); return(0); }
omptl_tools.h
// Copyright (C) 2006 Fokko Beekhof // Email contact: Fokko.Beekhof@cui.unige.ch // The OMPTL library is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 2.1 of the License, or (at your option) any later version. // This library is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // Lesser General Public License for more details. // You should have received a copy of the GNU Lesser General Public // License along with this library; if not, write to the Free Software // Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #ifndef OMPTL_TOOLS_H #define OMPTL_TOOLS_H #include <utility> #include <vector> #include <cassert> #include <algorithm> namespace omptl { // Log of the number of operations that is expected to run faster in a single // thread. const unsigned int C = 8; template<typename Iterator> bool _linear_serial_is_faster(Iterator first, Iterator last, const unsigned int P) { const unsigned int N = ::std::distance(first, last); // Approximation: (log2(N) - 1) <= l < = log2(N) unsigned int l = 1 << (8*sizeof(unsigned int) - 1); while(l & N) l /= 2; return (N < P) && (l < C); } template<typename Iterator> bool _logn_serial_is_faster(Iterator first, Iterator last, const unsigned int P) { const unsigned int N = ::std::distance(first, last); // Approximation: (log2(N) - 1) <= l < = log2(N) unsigned int l = 1 << (8*sizeof(unsigned int) - 1); while(l & N) l /= 2; return (N < P) && (l < (1 << C)); } template<typename Iterator> bool _nlogn_serial_is_faster(Iterator first, Iterator last, const unsigned int P) { const unsigned int N = ::std::distance(first, last); // Approximation: (log2(N) - 1) <= l < = log2(N) unsigned int l = 1 << (8*sizeof(unsigned int) - 1); while(l & N) l /= 2; return (l < P) && (l*N < (1 << C)); } template<typename Iterator1, typename Iterator2> void _copy_partitions( const ::std::pair<Iterator1, Iterator1> *source_partitions, Iterator2 first, Iterator2 *dest_partitions, const unsigned int P) { for (unsigned int i = 0; i < P; ++i) { dest_partitions[i] = first; // The last "advance is very important, it may create space // if it is an InsertIterator or something like that. ::std::advance(first, ::std::distance( source_partitions[i].first, source_partitions[i].second) ); } } // Divide a given range into P partitions template<typename Iterator> void _partition_range(Iterator first, Iterator last, ::std::pair<Iterator, Iterator> *partitions, const unsigned int P) { typedef ::std::pair<Iterator, Iterator> Partition; const unsigned int N = ::std::distance(first, last); const unsigned int range = N / P + ((N%P)? 1 : 0); // All but last partition have same range Iterator currentLast = first; ::std::advance(currentLast, range); for (unsigned int i = 0; i < P - 1; ++i) { partitions[i] = Partition(first, currentLast); ::std::advance(first, range); ::std::advance(currentLast, range); } // Last range may be shorter partitions[P - 1] = Partition(first, last); } // Given a range, re-arrange the items such that all elements smaller than // the pivot precede all other values. Returns an Iterator to the first // element not smaller than the pivot. template<typename Iterator> Iterator _stable_pivot_range(Iterator first, Iterator last, const typename Iterator::value_type pivot) { Iterator pivotIt = last; while (first < last) { if (*first < pivot) ++first; else { Iterator high = first; while ( (++high < last) && !(*high < pivot) ) /* nop */; if (high < last) ::std::iter_swap(first, last); first = pivotIt = ++high; } } return pivotIt; } template<typename Iterator> Iterator _pivot_range(Iterator first, Iterator last, const typename Iterator::value_type pivot) { while (first < last) { if (*first < pivot) ++first; else { while ( (first < --last) && !(*last < pivot) ) /* nop */; ::std::iter_swap(first, last); } } return last; } template<typename Iterator> void _partition_range_by_pivots(Iterator first, Iterator last, const ::std::vector<typename Iterator::value_type> &pivots, ::std::pair<Iterator, Iterator> *partitions, const unsigned int P) { typedef ::std::pair<Iterator, Iterator> Partition; Iterator ptable[P]; typename Iterator::value_type pvts[pivots.size()]; ::std::vector<Iterator> borders; bool used[pivots.size()]; ::std::fill(&used[0], used + pivots.size(), false); borders.push_back(first); borders.push_back(last); partitions[0].first = first; partitions[0].second = last; unsigned int p = 1; for (/* nop */; (1 << p) <= (int)P; ++p) { const int PROC = (1 << p); const int PIVOTS = (1 << (p-1)); OMPTL_ASSERT(PIVOTS <= (int)pivots.size()); int t; #pragma omp parallel for default(shared) private(t) for (t = 0; t < PIVOTS; ++t) { const int index = int(P / PROC) + 2 * t * int(P / PROC) - 1; OMPTL_ASSERT(index < (int)pivots.size()); OMPTL_ASSERT(!used[index]); used[index] = true; pvts[t] = pivots[index]; /*::std::cout << "pvts T: " << t << " --> " << index << " " << pvts[t] << ::std::endl;*/ } #pragma omp parallel for default(shared) private(t) for (t = 0; t < PIVOTS; ++t) ptable[t] = _pivot_range(partitions[t].first, partitions[t].second, pvts[t]); for (t = 0; t < PIVOTS; ++t) { // ::std::cout << "ADD: " << ::std::distance(first, ptable[t]) << ::std::endl; borders.push_back(ptable[t]); } ::std::sort(borders.begin(), borders.end()); #pragma omp parallel for default(shared) private(t) for (t = 0; t < (int)borders.size() - 1; ++t) { partitions[t].first = borders[t]; partitions[t].second = borders[t + 1]; } /*::std::cout << "PASS: " << p << ::std::endl; for (t = 0; t < (1 << p); ++t) ::std::cout << t << ": " << ::std::distance(first, partitions[t].first) << " - " << ::std::distance(first, partitions[t].second) << ::std::endl;*/ } for (unsigned int i = 0; i < pivots.size(); ++i) if(!used[i]) pvts[i] = pivots[i]; int t; #pragma omp parallel for default(shared) private(t) for (t = 0; t < (int)pivots.size(); ++t) if (!used[t]) ptable[t] = _pivot_range(partitions[t].first, partitions[t].second, pvts[t]); for (unsigned int i = 0; i < pivots.size(); ++i) { if (!used[i]) { // ::std::cout << "LAST ADD: " << ::std::distance(first, ptable[i]) << ::std::endl; borders.push_back(ptable[i]); } } ::std::sort(borders.begin(), borders.end()); OMPTL_ASSERT(borders.size() - 1 == P); #pragma omp parallel for default(shared) private(t) for (t = 0; t < (int)P; ++t) { partitions[t].first = borders[t]; partitions[t].second = borders[t + 1]; } // ::std::cout << "LAST: " << p << ::std::endl; // for (t = 0; t < P; ++t) // ::std::cout << t << ": " << ::std::distance(first, partitions[t].first) // << " - " << ::std::distance(first, partitions[t].second) // << ::std::endl; } template<typename Iterator> void _partition_range_stable_by_pivots(Iterator first, Iterator last, const ::std::vector<typename Iterator::value_type> &pivots, ::std::pair<Iterator, Iterator> *partitions, const unsigned int P) { typedef ::std::pair<Iterator, Iterator> Partition; Iterator start = first; for (unsigned int i = 0; i < P - 1; ++i) { Iterator low = start; while (low < last) { // Find a value not lower than the pivot. while( (*low < pivots[i]) && (low < last) ) ::std::advance(low, 1); // Entire range scanned ? if (low == last) break; // Find a value lower than the pivot, starting from // low, working our way up. Iterator high = low; ::std::advance(high, 1); while( !(*high < pivots[i]) && (high < last) ) ::std::advance(high, 1); // Entire range scanned ? if (high == last) break; // Swap values OMPTL_ASSERT( !(*low < pivots[i]) && (*high < pivots[i]) ); ::std::iter_swap(low, high); } partitions[i] = Partition(start, low); start = low; } partitions[P - 1] = Partition(start, last); } /* * The sample ratio is used to sample more data. This way, the pivots can be * chosen more wisely, which is our only guarantee we can generate partitions * of equal size. */ template<typename RandomAccessIterator> void _find_pivots(RandomAccessIterator first, RandomAccessIterator last, ::std::vector<typename RandomAccessIterator::value_type> &pivots, const unsigned int P, unsigned int SAMPLE_RATIO = 10) { OMPTL_ASSERT(SAMPLE_RATIO > 0); const unsigned int N = ::std::distance(first, last); OMPTL_ASSERT(N > P); // Adjust the constant. Erm. while (SAMPLE_RATIO * (P + 1) > N) SAMPLE_RATIO /= 2; pivots.clear(); pivots.reserve(P - 1); ::std::vector<typename RandomAccessIterator::value_type> samples; const unsigned int NSAMPLES = SAMPLE_RATIO * P + SAMPLE_RATIO; samples.reserve(NSAMPLES); for (unsigned int i = 0; i < NSAMPLES; ++i) { const unsigned int offset = i * (N-1) / (NSAMPLES - 1); OMPTL_ASSERT(offset < N); samples.push_back(*(first + offset)); // std::cout << "offset: " << offset << " sample: " << samples[i] << std::endl; } OMPTL_ASSERT(samples.size() == NSAMPLES); // Sort samples to create relative ordering in data ::std::sort(samples.begin(), samples.end()); // Take pivots from sampled data for (unsigned int i = 1; i < P; ++i) { pivots.push_back(samples[i * samples.size() / P]); /*std::cout << "pivot: " << i << " idx: " << (i * samples.size() / P) << " " << pivots[i-1] << std::endl;*/ } OMPTL_ASSERT(pivots.size() == P - 1); } } // namespace omptl #endif /* OMPTL_TOOLS_H */
GB_serialize_array.c
//------------------------------------------------------------------------------ // GB_serialize_array: serialize an array, with optional compression //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Parallel compression method for an array. The array is compressed into // a sequence of independently allocated blocks, or returned as-is if not // compressed. Currently, only LZ4 is supported. #include "GB.h" #include "GB_serialize.h" #include "GB_lz4.h" #define GB_FREE_ALL \ { \ GB_FREE (&Sblocks, Sblocks_size) ; \ GB_serialize_free_blocks (&Blocks, Blocks_size, nblocks, Context) ; \ } GrB_Info GB_serialize_array ( // output: GB_blocks **Blocks_handle, // Blocks: array of size nblocks+1 size_t *Blocks_size_handle, // size of Blocks int64_t **Sblocks_handle, // Sblocks: array of size nblocks+1 size_t *Sblocks_size_handle, // size of Sblocks int32_t *nblocks_handle, // # of blocks int32_t *method_used, // method used size_t *compressed_size, // size of compressed block, or upper // bound if dryrun is true // input: bool dryrun, // if true, just esimate the size GB_void *X, // input array of size len int64_t len, // size of X, in bytes int32_t method, // compression method requested bool intel, // if true, use Intel IPPS int32_t algo, // compression algorithm int32_t level, // compression level GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (Blocks_handle != NULL) ; ASSERT (Blocks_size_handle != NULL) ; ASSERT (Sblocks_handle != NULL) ; ASSERT (Sblocks_size_handle != NULL) ; ASSERT (nblocks_handle != NULL) ; ASSERT (method_used != NULL) ; ASSERT (compressed_size != NULL) ; GB_blocks *Blocks = NULL ; size_t Blocks_size = 0, Sblocks_size = 0 ; int32_t nblocks = 0 ; int64_t *Sblocks = NULL ; //-------------------------------------------------------------------------- // check for quick return //-------------------------------------------------------------------------- (*Blocks_handle) = NULL ; (*Blocks_size_handle) = 0 ; (*Sblocks_handle) = NULL ; (*Sblocks_size_handle) = 0 ; (*nblocks_handle) = 0 ; (*method_used) = GxB_COMPRESSION_NONE ; (*compressed_size) = 0 ; if (X == NULL || len == 0) { // input array is empty return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // check for no compression //-------------------------------------------------------------------------- if (method <= GxB_COMPRESSION_NONE || len < 256) { // no compression, return result as a single block (plus the sentinel) if (!dryrun) { Blocks = GB_MALLOC (2, GB_blocks, &Blocks_size) ; Sblocks = GB_MALLOC (2, int64_t, &Sblocks_size) ; if (Blocks == NULL || Sblocks == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } Blocks [0].p = X ; // first block is all of the array X Blocks [0].p_size_allocated = 0 ; // p is shallow Sblocks [0] = 0 ; // start of first block Blocks [1].p = NULL ; // 2nd block is the final sentinel Blocks [1].p_size_allocated = 0 ; // p is shallow Sblocks [1] = len ; // first block ends at len-1 (*Blocks_handle) = Blocks ; (*Blocks_size_handle) = Blocks_size ; (*Sblocks_handle) = Sblocks ; (*Sblocks_size_handle) = Sblocks_size ; } (*compressed_size) = len ; (*nblocks_handle) = 1 ; return (GrB_SUCCESS) ; } (*method_used) = method ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (len, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // determine # of blocks and allocate them //-------------------------------------------------------------------------- // divide the array into blocks, 4 per thread, or a single block if 1 thread int64_t blocksize = (nthreads == 1) ? len : GB_ICEIL (len, 4*nthreads) ; // ensure the blocksize does not exceed the LZ4 maximum ASSERT (LZ4_MAX_INPUT_SIZE < INT32_MAX) ; blocksize = GB_IMIN (blocksize, LZ4_MAX_INPUT_SIZE/2) ; // ensure the blocksize is not too small blocksize = GB_IMAX (blocksize, (64*1024)) ; // determine the final # of blocks nblocks = GB_ICEIL (len, blocksize) ; nthreads = GB_IMIN (nthreads, nblocks) ; (*nblocks_handle) = nblocks ; // allocate the output Blocks: one per block plus the sentinel block if (!dryrun) { Blocks = GB_CALLOC (nblocks+1, GB_blocks, &Blocks_size) ; Sblocks = GB_CALLOC (nblocks+1, int64_t, &Sblocks_size) ; if (Blocks == NULL || Sblocks == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } } // allocate the blocks, one at a time int32_t blockid ; bool ok = true ; for (blockid = 0 ; blockid < nblocks && ok ; blockid++) { // allocate a single block for the compression of X [kstart:kend-1] int64_t kstart, kend ; GB_PARTITION (kstart, kend, len, blockid, nblocks) ; size_t uncompressed = kend - kstart ; ASSERT (uncompressed < INT32_MAX) ; ASSERT (uncompressed > 0) ; size_t s = (size_t) LZ4_compressBound ((int) uncompressed) ; ASSERT (s < INT32_MAX) ; if (dryrun) { // do not allocate the block; just sum up the upper bound sizes (*compressed_size) += s ; } else { // allocate the block size_t size_allocated = 0 ; GB_void *p = GB_MALLOC (s, GB_void, &size_allocated) ; ok = (p != NULL) ; Blocks [blockid].p = p ; Blocks [blockid].p_size_allocated = size_allocated ; } } if (dryrun) { // GrB_Matrix_serializeSize: no more work to do. (*compressed_size) is // an upper bound of the blob_size required when the matrix is // compressed, and (*nblocks_handle) is the number of blocks to be used. // No space has been allocated. return (GrB_SUCCESS) ; } if (!ok) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // compress the blocks in parallel //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic) \ reduction(&&:ok) for (blockid = 0 ; blockid < nblocks ; blockid++) { // compress X [kstart:kend-1] into Blocks [blockid].p int64_t kstart, kend ; GB_PARTITION (kstart, kend, len, blockid, nblocks) ; const char *src = (const char *) (X + kstart) ; // source char *dst = (char *) Blocks [blockid].p ; // destination int srcSize = (int) (kend - kstart) ; // size of source size_t dsize = Blocks [blockid].p_size_allocated ; // size of dest int dstCapacity = GB_IMIN (dsize, INT32_MAX) ; int s ; switch (algo) { default : case GxB_COMPRESSION_LZ4 : s = LZ4_compress_default (src, dst, srcSize, dstCapacity) ; break ; case GxB_COMPRESSION_LZ4HC : s = LZ4_compress_HC (src, dst, srcSize, dstCapacity, level) ; break ; } ok = ok && (s > 0) ; // compressed block is now in dst [0:s-1], of size s Sblocks [blockid] = (int64_t) s ; } if (!ok) { // compression failure: this can "never" occur GB_FREE_ALL ; return (GrB_INVALID_OBJECT) ; } //-------------------------------------------------------------------------- // compute cumulative sum of the compressed blocks //-------------------------------------------------------------------------- GB_cumsum (Sblocks, nblocks, NULL, 1, Context) ; //-------------------------------------------------------------------------- // free workspace return result //-------------------------------------------------------------------------- (*Blocks_handle) = Blocks ; (*Blocks_size_handle) = Blocks_size ; (*Sblocks_handle) = Sblocks ; (*Sblocks_size_handle) = Sblocks_size ; (*compressed_size) = Sblocks [nblocks] ; // actual size of the blob return (GrB_SUCCESS) ; }
blackberry_ES10_fmt_plug.c
/* Cracker for BlackBerry Enterprise Server 10 hashes. * * Thanks to Nicolas RUFF for providing the algorithm details and sample * hashes! * * USE BDSMgmt; * SELECT LoginPassword FROM EASUsers; * * This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_blackberry1; #elif FMT_REGISTERS_H john_register_one(&fmt_blackberry1); #else #include <string.h> #include <errno.h> #include "sha2.h" #include "arch.h" //#undef _OPENMP //#undef SIMD_COEF_64 //#undef SIMD_PARA_SHA512 #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "johnswap.h" #include "simd-intrinsics.h" #ifdef _OPENMP #include <omp.h> // OMP_SCALE tests (intel core i7) // 8 - 77766 // 64 - 80075 // 128 - 82016 -test=0 is still almost instant. // 256 - 81753 // 512 - 80537 #ifndef OMP_SCALE #define OMP_SCALE 128 #endif #endif #include "memdbg.h" #define FORMAT_TAG "$bbes10$" #define FORMAT_TAG_LENGTH 8 #define FORMAT_LABEL "Blackberry-ES10" #define FORMAT_NAME "" #define ALGORITHM_NAME "SHA-512 " SHA512_ALGORITHM_NAME #define BENCHMARK_COMMENT " (101x)" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 64 #define BINARY_ALIGN 4 #define MAX_SALT_SIZE 64 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 4 #ifdef SIMD_COEF_64 #define MIN_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #define MAX_KEYS_PER_CRYPT (SIMD_COEF_64*SIMD_PARA_SHA512) #else #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif static struct fmt_tests blackberry_tests[] = { {"$bbes10$76BDF6BE760FCF5DEE7B20E27632D1FEDD9D64E1BBCC941F42957E87CBFB96F176324B2E2C71976CEBE67CA6F400F33F001D7453D80F4AF5D80C8A93ED0BA0E6$DB1C19C0", "toulouse"}, {"$bbes10$57ECCAA65BB087E3E506A8C5CEBEE193DD051538CE44F4156D65F1B44E0266DF49337EA11812DF12E39C8B12EB46F19C291FD9529CD4F09B3C8109BE6F4861E5$0wzWUnuQ", "test"}, {"$bbes10$217A6A0646ACF599B5A05A3D2B47F96B576353C74E4D28E857A476EFDFB36B27930FEDAA8064FFD17F36C7C854BED49FF95029B3310434BB2D05524043AE6E44$A5Dr4lXa", "ripper"}, {"$bbes10$DE1A954989FFED2D74900463A1AD7B14D852164D84AA0443F0EC59A0875A911C92CEF73E7C082B13864132644FA49DFEBDCF1D2DA0C9711CD4DC348A855F7285$MnphRIkf", "superbadPass"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static struct custom_salt { int iterations; char unsigned salt[MAX_SALT_SIZE + 1]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; char *p; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += FORMAT_TAG_LENGTH; if (0 < strlen(ctcopy) && '$' == ctcopy[strlen(ctcopy) - 1]) /* Can not end with '$' */ goto err; if ((p = strtokm(ctcopy, "$")) == NULL) /* hash */ goto err; if(strlen(p) != BINARY_SIZE * 2) goto err; if (!ishexuc(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) /* salt */ goto err; if(strlen(p) > MAX_SALT_SIZE) goto err; p = strtokm(NULL, "$"); if (p) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); p = strrchr(ciphertext, '$') + 1; strcpy((char*)cs.salt, p); return (void *)&cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD_32 dummy; } buf; unsigned char *out = buf.c; int i; char *p = ciphertext + FORMAT_TAG_LENGTH; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT) { int j; SHA512_CTX ctx; #ifdef SIMD_COEF_64 unsigned int i; unsigned char _IBuf[128*MAX_KEYS_PER_CRYPT+MEM_ALIGN_CACHE], *keys, tmpBuf[128]; ARCH_WORD_64 *keys64, *tmpBuf64=(ARCH_WORD_64*)tmpBuf, *p64; keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE); keys64 = (ARCH_WORD_64*)keys; memset(keys, 0, 128*MAX_KEYS_PER_CRYPT); for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) { SHA512_Init(&ctx); SHA512_Update(&ctx, saved_key[index+i], strlen(saved_key[index+i])); SHA512_Update(&ctx, cur_salt->salt, strlen((char*)cur_salt->salt)); SHA512_Final(tmpBuf, &ctx); p64 = &keys64[i%SIMD_COEF_64+i/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64]; for (j = 0; j < 8; ++j) p64[j*SIMD_COEF_64] = JOHNSWAP64(tmpBuf64[j]); p64[8*SIMD_COEF_64] = 0x8000000000000000ULL; p64[15*SIMD_COEF_64] = 0x200; } for (j = 0; j < 98; j++) SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT); // Last one with FLAT_OUT SIMDSHA512body(keys, (ARCH_WORD_64*)crypt_out[index], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT|SSEi_FLAT_OUT); #else SHA512_Init(&ctx); SHA512_Update(&ctx, saved_key[index], strlen(saved_key[index])); SHA512_Update(&ctx, cur_salt->salt, strlen((char*)cur_salt->salt)); SHA512_Final((unsigned char *)crypt_out[index], &ctx); /* now "h" (crypt_out[index] becomes our input * total SHA-512 calls => 101 */ for (j = 0; j < 99; j++) { SHA512_CTX ctx; SHA512_Init(&ctx); SHA512_Update(&ctx, (unsigned char*)crypt_out[index], 64); SHA512_Final((unsigned char *)crypt_out[index], &ctx); } #endif } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void blackberry_set_key(char *key, int index) { int saved_len = strlen(key); if (saved_len > PLAINTEXT_LENGTH) saved_len = PLAINTEXT_LENGTH; memcpy(saved_key[index], key, saved_len); saved_key[index][saved_len] = 0; } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_blackberry1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, blackberry_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, blackberry_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_unaryop__abs_int64_bool.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_bool // op(A') function: GB_tran__abs_int64_bool // C type: int64_t // A type: bool // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ bool #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_bool ( int64_t *Cx, // Cx and Ax may be aliased bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
BSSNScalar_Field.c
// Part P0: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER #define NGHOSTS 3 // Step P1a: Import needed header files #include "stdio.h" #include "stdlib.h" #include <stdint.h> #include "math.h" #include "time.h" #ifndef M_PI #define M_PI 3.14159265358979323846 #endif // Step P1b: Import necessary gsl libraries for interpolating the initial data onto the grid #include "gsl/gsl_spline.h" #include "gsl/gsl_errno.h" #include "gsl/gsl_interp.h" // Step P2: Add needed #define's to set data type, the IDX4() macro, and the gridfunctions // Step P2a: set REAL=double, so that all floating point numbers are stored to at least ~16 significant digits. #define REAL double // Step P3: Set free parameters // Step P3a: Free parameters for the numerical grid // Spherical coordinates parameter const REAL RMAX = 30.; /* Set to approximately the time you wish to evolve for, * so that at t=t_final data at the origin is not * affected by the boundary conditions */ // Time coordinate parameters const REAL t_final = 50.; const REAL CFL_FACTOR = 0.5; // Set the CFL Factor // Step P3b: Free parameters for the spacetime evolution const REAL eta = 2.; // Gamma-driving shift condition parameter. // Step P4: Implement the algorithm for upwinding. // *NOTE*: This upwinding is backwards from // usual upwinding algorithms, because the // upwinding control vector in BSSN (the shift) // acts like a *negative* velocity. #define UPWIND_ALG(UpwindVecU) UpwindVecU > 0.0 ? 1.0 : 0.0 // Step P5: Set free parameters for Psi initial data const REAL psi_posn_x = 0.0,psi_posn_y = 0.0,psi_posn_z = 0.0; // Step P5b: Set free parameters for the scalar field const REAL scalar_posn_x = 0.0; const REAL scalar_posn_y = 0.0; const REAL scalar_posn_z = 0.0; const REAL br_on = 1.; // Turn on(1.)/off(0.) scalar field backreaction on the metric const REAL pot1_on = 0.; // Turn on(1.)/off(0.) quadratic potential const REAL pot2_on = 0.; // Turn on(1.)/off(0.) self-interactiong potential // Make sure only one potential is on at a time // Variables for the scalar field potential const REAL scalarmass = 1.; // Scalar mass, \mu = c/\hbar m const REAL fa = 0.05; // Decay constant, only relevant for the self-interacting potential //Step P5c: Declare vars for initial data arrays // We use initial data profiles for the scalar // and the conformal factor that is known to // lead to stable scalar field evolution REAL uu_in; REAL vv_in; REAL psi_in; REAL alpha_in; REAL r_scalar; REAL r_psi; // Step P6: Declare the IDX4(gf,i,j,k) macro, which enables us to store 4-dimensions of // data in a 1D array. In this case, consecutive values of "i" // (all other indices held to a fixed value) are consecutive in memory, where // consecutive values of "j" (fixing all other indices) are separated by // Nxx_plus_2NGHOSTS[0] elements in memory. Similarly, consecutive values of // "k" are separated by Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1] in memory, etc. #define IDX4(g,i,j,k) \ ( (i) + Nxx_plus_2NGHOSTS[0] * ( (j) + Nxx_plus_2NGHOSTS[1] * ( (k) + Nxx_plus_2NGHOSTS[2] * (g) ) ) ) #define IDX3(i,j,k) ( (i) + Nxx_plus_2NGHOSTS[0] * ( (j) + Nxx_plus_2NGHOSTS[1] * (k) ) ) // Assuming idx = IDX3(i,j,k). Much faster if idx can be reused over and over: #define IDX4pt(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1]*Nxx_plus_2NGHOSTS[2]) * (g) ) // Step P7: Set #define's for BSSN gridfunctions. C code generated above #include "gridfunction_defines.h" #define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \ for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++) void xxCart(REAL *xx[3],const int i0,const int i1,const int i2, REAL xCart[3]) { REAL xx0 = xx[0][i0]; REAL xx1 = xx[1][i1]; REAL xx2 = xx[2][i2]; #include "xxCart.h" } // Step P8: Include basic functions needed to impose curvilinear // parity and boundary conditions. #include "curvilinear_parity_and_outer_boundary_conditions.h" #include "enforce_detgammabar_constraint.h" // Step P9: Find the CFL-constrained timestep REAL find_timestep(const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3],REAL *xx[3], const REAL CFL_FACTOR) { const REAL dxx0 = dxx[0], dxx1 = dxx[1], dxx2 = dxx[2]; REAL dsmin = 1e38; // Start with a crazy high value... close to the largest number in single precision. LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS[0]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[1]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[2]-NGHOSTS) { const REAL xx0 = xx[0][i0], xx1 = xx[1][i1], xx2 = xx[2][i2]; REAL ds_dirn0, ds_dirn1, ds_dirn2; #include "ds_dirn.h" #define MIN(A, B) ( ((A) < (B)) ? (A) : (B) ) // Set dsmin = MIN(dsmin, ds_dirn0, ds_dirn1, ds_dirn2); dsmin = MIN(dsmin,MIN(ds_dirn0,MIN(ds_dirn1,ds_dirn2))); } return dsmin*CFL_FACTOR; } // Contains BSSN_ID() for arbitrary initial data array #include "ID_array_psi.h" // Step P10: Declare the function for the exact solution. time==0 corresponds to the initial data. void initial_data(const int Nxx_plus_2NGHOSTS[3],REAL *xx[3], REAL *in_gfs) { // Step P11a: Declare initial data arrays FILE *uu_file = fopen("BSSN_SF/InitialData/phiCC9.csv", "r"); FILE *vv_file = fopen("BSSN_SF/InitialData/PiCC9.csv", "r"); FILE *psi_file = fopen("BSSN_SF/InitialData/psiCC9.csv", "r"); FILE *alpha_file = fopen("BSSN_SF/InitialData/alphaCC9.csv", "r"); int temp; int alen = 0; while(fscanf(uu_file,"%lf\n",&temp)==1){ alen++; } double r_arr[alen]; double uu_in_arr[alen]; double vv_in_arr[alen]; double psi_in_arr[alen]; double alpha_in_arr[alen]; rewind(uu_file); for(int i=0;i<alen;i++){ r_arr[i] = 0.01*i; fscanf(uu_file, "%lf\n", &uu_in_arr[i]); fscanf(vv_file, "%lf\n", &vv_in_arr[i]); fscanf(psi_file, "%lf\n", &psi_in_arr[i]); fscanf(alpha_file, "%lf\n", &alpha_in_arr[i]); } // Step P11b: Declare splines to interpolate onto the cartesian grid gsl_interp_accel *acc = gsl_interp_accel_alloc (); gsl_spline *spline_u = gsl_spline_alloc (gsl_interp_cspline, alen); gsl_spline_init(spline_u, r_arr, uu_in_arr, alen); gsl_spline *spline_v = gsl_spline_alloc (gsl_interp_cspline, alen); gsl_spline_init(spline_v, r_arr, vv_in_arr, alen); gsl_spline *spline_psi = gsl_spline_alloc (gsl_interp_cspline, alen); gsl_spline_init(spline_psi, r_arr, psi_in_arr, alen); gsl_spline *spline_alpha = gsl_spline_alloc (gsl_interp_cspline, alen); gsl_spline_init(spline_alpha, r_arr, alpha_in_arr, alen); #pragma omp parallel for LOOP_REGION(0,Nxx_plus_2NGHOSTS[0], 0,Nxx_plus_2NGHOSTS[1], 0,Nxx_plus_2NGHOSTS[2]) { const int idx = IDX3(i0,i1,i2); REAL xCart[3]; xxCart(xx, i0,i1,i2, xCart); { r_psi = sqrt(pow(-psi_posn_x + xCart[0], 2) + pow(-psi_posn_y + xCart[1], 2) + pow(-psi_posn_z + xCart[2], 2)); psi_in = gsl_spline_eval (spline_psi, r_psi, acc); alpha_in = gsl_spline_eval (spline_alpha, r_psi, acc); } BSSN_ID(xx[0][i0],xx[1][i1],xx[2][i2],xCart[0],xCart[1],xCart[2], &in_gfs[IDX4pt(HDD00GF,idx)],&in_gfs[IDX4pt(HDD01GF,idx)],&in_gfs[IDX4pt(HDD02GF,idx)], &in_gfs[IDX4pt(HDD11GF,idx)],&in_gfs[IDX4pt(HDD12GF,idx)],&in_gfs[IDX4pt(HDD22GF,idx)], &in_gfs[IDX4pt(TRKGF,idx)], &in_gfs[IDX4pt(ADD00GF,idx)],&in_gfs[IDX4pt(ADD01GF,idx)],&in_gfs[IDX4pt(ADD02GF,idx)], &in_gfs[IDX4pt(ADD11GF,idx)],&in_gfs[IDX4pt(ADD12GF,idx)],&in_gfs[IDX4pt(ADD22GF,idx)], &in_gfs[IDX4pt(LAMBDAU0GF,idx)],&in_gfs[IDX4pt(LAMBDAU1GF,idx)],&in_gfs[IDX4pt(LAMBDAU2GF,idx)], &in_gfs[IDX4pt(VETU0GF,idx)],&in_gfs[IDX4pt(VETU1GF,idx)],&in_gfs[IDX4pt(VETU2GF,idx)], &in_gfs[IDX4pt(BETU0GF,idx)],&in_gfs[IDX4pt(BETU1GF,idx)],&in_gfs[IDX4pt(BETU2GF,idx)], &in_gfs[IDX4pt(ALPHAGF,idx)],&in_gfs[IDX4pt(CFGF,idx)]); REAL xx0 = xCart[0]; REAL xx1 = xCart[1]; REAL xx2 = xCart[2]; { r_scalar = sqrt(pow(-scalar_posn_x + xx0, 2) + pow(-scalar_posn_y + xx1, 2) + pow(-scalar_posn_z + xx2, 2)); in_gfs[IDX4(UUGF, i0, i1, i2)] = gsl_spline_eval (spline_u, r_scalar, acc); in_gfs[IDX4(VVGF, i0, i1, i2)] = gsl_spline_eval (spline_v, r_scalar, acc); } } } // Step P12: Implement Hamiltonian constraint diagnostic void Hamiltonian_constraint(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3], REAL *xx[3], REAL *in_gfs, REAL *aux_gfs) { #include "Hamiltonian.h" } // Step P13: Declare the function to evaluate the BSSN RHSs void rhs_eval(const int Nxx[3],const int Nxx_plus_2NGHOSTS[3],const REAL dxx[3], REAL *xx[3], const REAL *in_gfs,REAL *rhs_gfs) { #include "BSSN_RHSs.h" } #include "ID_array_ADM.h" // main() function: // Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates // Step 1: Set up scalar wave initial data // Step 2: Evolve scalar wave initial data forward in time using Method of Lines with RK4 algorithm, // applying quadratic extrapolation outer boundary conditions. // Step 3: Output relative error between numerical and exact solution. // Step 4: Free all allocated memory int main(int argc, const char *argv[]) { // Step 0a: Read command-line input, error out if nonconformant if(argc != 4 || atoi(argv[1]) < NGHOSTS) { printf("Error: Expected one command-line argument: ./BSSNCurvilinear_Playground Nx0 Nx1 Nx2,\n"); printf("where Nx[0,1,2] is the number of grid points in the 0, 1, and 2 directions.\n"); printf("Nx[] MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS); exit(1); } // Step 0b: Set up numerical grid structure, first in space... const int Nx0 = atoi(argv[1]); const int Nx1 = atoi(argv[2]); const int Nx2 = atoi(argv[3]); if(Nx0%2 != 0 || Nx1%2 != 0 || Nx2%2 != 0) { printf("Error: Cannot guarantee a proper cell-centered grid if number of grid cells not set to even number.\n"); printf(" For example, in case of angular directions, proper symmetry zones will not exist.\n"); exit(1); } const int Nxx[3] = { Nx0, Nx1, Nx2 }; const int Nxx_plus_2NGHOSTS[3] = { Nxx[0]+2*NGHOSTS, Nxx[1]+2*NGHOSTS, Nxx[2]+2*NGHOSTS }; const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS[0]*Nxx_plus_2NGHOSTS[1]*Nxx_plus_2NGHOSTS[2]; #include "xxminmax.h" // Step 0c: Allocate memory for gridfunctions REAL *evol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *next_in_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *aux_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUX_GFS * Nxx_plus_2NGHOSTS_tot); REAL *k1_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *k2_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *k3_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); REAL *k4_gfs = (REAL *)malloc(sizeof(REAL) * NUM_EVOL_GFS * Nxx_plus_2NGHOSTS_tot); // Step 0d: Set up space and time coordinates // Step 0d.i: Set \Delta x^i on uniform grids. REAL dxx[3]; for(int i=0;i<3;i++) dxx[i] = (xxmax[i] - xxmin[i]) / ((REAL)Nxx[i]); // Step 0d.ii: Set up uniform coordinate grids REAL *xx[3]; for(int i=0;i<3;i++) { xx[i] = (REAL *)malloc(sizeof(REAL)*Nxx_plus_2NGHOSTS[i]); for(int j=0;j<Nxx_plus_2NGHOSTS[i];j++) { xx[i][j] = xxmin[i] + ((REAL)(j-NGHOSTS) + (1.0/2.0))*dxx[i]; // Cell-centered grid. } } // Step 0d.iii: Set timestep based on smallest proper distance between gridpoints and CFL factor REAL dt = find_timestep(Nxx_plus_2NGHOSTS, dxx,xx, CFL_FACTOR); //printf("# Timestep set to = %e\n",(double)dt); int N_final = (int)(t_final / dt + 0.5); // The number of iterations in time. //Add 0.5 to account for C rounding down integers. // Step 0e: Find ghostzone mappings and parities: gz_map *bc_gz_map = (gz_map *)malloc(sizeof(gz_map)*Nxx_plus_2NGHOSTS_tot); parity_condition *bc_parity_conditions = (parity_condition *)malloc(sizeof(parity_condition)*Nxx_plus_2NGHOSTS_tot); set_up_bc_gz_map_and_parity_conditions(Nxx_plus_2NGHOSTS,xx,dxx,xxmin,xxmax, bc_gz_map, bc_parity_conditions); // Step 1: Set up initial data to be exact solution at time=0: initial_data(Nxx_plus_2NGHOSTS, xx, evol_gfs); // Step 1b: Apply boundary conditions *FOR VALIDATION PURPOSES* apply_bcs(Nxx, Nxx_plus_2NGHOSTS, bc_gz_map,bc_parity_conditions, evol_gfs); enforce_detgammabar_constraint(Nxx_plus_2NGHOSTS, xx, evol_gfs); // Step 2: Evaluate Hamiltonian constraint violation //Hamiltonian_constraint(Nxx,Nxx_plus_2NGHOSTS,dxx, xx, evol_gfs, aux_gfs); // Step 3: Start the timer, for keeping track of how fast the simulation is progressing. struct timespec start, end; //clock_gettime(CLOCK_REALTIME, &start); // Step 4: Integrate the initial data forward in time using the Method of Lines and RK4 char filename2[100]; sprintf(filename2,"BSSN_SF-evolution/quad_pot_uu_vv_cf.txt"); FILE *evol = fopen(filename2, "w"); for(int n=0;n<=N_final;n++) { // Main loop to progress forward in time. /***************************************************/ /* Implement RK4 for Method of Lines timestepping: */ /***************************************************/ /* -= RK4: Step 1 of 4 =- */ /* First evaluate k1 = RHSs expression */ rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, xx,evol_gfs, k1_gfs); /* Next k1 -> k1*dt, and then set the input for */ /* the next RHS eval call to y_n+k1/2 */ #pragma omp parallel for for(int i=0;i<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;i++) { k1_gfs[i] *= dt; next_in_gfs[i] = evol_gfs[i] + k1_gfs[i]*0.5; } /* Finally, apply boundary conditions to */ /* next_in_gfs, so its data are set everywhere. */ apply_bcs(Nxx, Nxx_plus_2NGHOSTS, bc_gz_map,bc_parity_conditions, next_in_gfs); enforce_detgammabar_constraint(Nxx_plus_2NGHOSTS, xx, next_in_gfs); /* -= RK4: Step 2 of 4 =- */ rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, xx,next_in_gfs, k2_gfs); #pragma omp parallel for for(int i=0;i<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;i++) { k2_gfs[i] *= dt; next_in_gfs[i] = evol_gfs[i] + k2_gfs[i]*0.5; } apply_bcs(Nxx, Nxx_plus_2NGHOSTS, bc_gz_map,bc_parity_conditions, next_in_gfs); enforce_detgammabar_constraint(Nxx_plus_2NGHOSTS, xx, next_in_gfs); /* -= RK4: Step 3 of 4 =- */ rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, xx,next_in_gfs, k3_gfs); #pragma omp parallel for for(int i=0;i<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;i++) { k3_gfs[i] *= dt; next_in_gfs[i] = evol_gfs[i] + k3_gfs[i]; } apply_bcs(Nxx, Nxx_plus_2NGHOSTS, bc_gz_map,bc_parity_conditions, next_in_gfs); enforce_detgammabar_constraint(Nxx_plus_2NGHOSTS, xx, next_in_gfs); /* -= RK4: Step 4 of 4 =- */ rhs_eval(Nxx,Nxx_plus_2NGHOSTS,dxx, xx,next_in_gfs, k4_gfs); #pragma omp parallel for for(int i=0;i<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;i++) { k4_gfs[i] *= dt; evol_gfs[i] += (1.0/6.0)*(k1_gfs[i] + 2.0*k2_gfs[i] + 2.0*k3_gfs[i] + k4_gfs[i]); } Hamiltonian_constraint(Nxx,Nxx_plus_2NGHOSTS,dxx, xx, evol_gfs, aux_gfs); apply_bcs(Nxx, Nxx_plus_2NGHOSTS, bc_gz_map,bc_parity_conditions, evol_gfs); enforce_detgammabar_constraint(Nxx_plus_2NGHOSTS, xx, evol_gfs); /* Output the solution of the scalar field and the conformal factor at diffrent time slices on a 2D grid */ if(n%10 == 0) { char filename[100]; sprintf(filename,"BSSN_SF-output2D/quad_pot_2d_t-%08d.txt",n); FILE *out2D = fopen(filename, "w"); const int i0MIN=NGHOSTS; // In spherical, r=Delta r/2. const int i1mid=Nxx_plus_2NGHOSTS[1]/2; const int i2mid=Nxx_plus_2NGHOSTS[2]/2; LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS[0]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[1]-NGHOSTS, NGHOSTS,Nxx_plus_2NGHOSTS[2]-NGHOSTS) { REAL xx0 = xx[0][i0]; REAL xx1 = xx[1][i1]; REAL xx2 = xx[2][i2]; REAL xCart[3]; #include "xxCart.h" int idx = IDX3(i0,i1,i2); ADMCart_ID(out2D, n*(double)dt , xx0, xx1, xx2, xCart[0], xCart[1], xCart[2], evol_gfs[IDX4pt(HDD00GF,idx)], evol_gfs[IDX4pt(HDD01GF,idx)], evol_gfs[IDX4pt(HDD02GF,idx)], evol_gfs[IDX4pt(HDD11GF,idx)], evol_gfs[IDX4pt(HDD12GF,idx)], evol_gfs[IDX4pt(HDD22GF,idx)], evol_gfs[IDX4pt(ADD00GF,idx)], evol_gfs[IDX4pt(ADD01GF,idx)], evol_gfs[IDX4pt(ADD02GF,idx)], evol_gfs[IDX4pt(ADD11GF,idx)], evol_gfs[IDX4pt(ADD12GF,idx)], evol_gfs[IDX4pt(ADD22GF,idx)], evol_gfs[IDX4pt(TRKGF,idx)], evol_gfs[IDX4pt(LAMBDAU0GF,idx)],evol_gfs[IDX4pt(LAMBDAU1GF,idx)],evol_gfs[IDX4pt(LAMBDAU2GF,idx)], evol_gfs[IDX4pt(VETU0GF,idx)],evol_gfs[IDX4pt(VETU1GF,idx)],evol_gfs[IDX4pt(VETU2GF,idx)], evol_gfs[IDX4pt(BETU0GF,idx)],evol_gfs[IDX4pt(BETU1GF,idx)],evol_gfs[IDX4pt(BETU2GF,idx)], evol_gfs[IDX4pt(ALPHAGF,idx)],evol_gfs[IDX4pt(CFGF,idx)], evol_gfs[IDX4pt(UUGF,idx)],evol_gfs[IDX4pt(VVGF,idx)]); } fclose(out2D); } // Output time evolution at r=0 int idx0 = IDX3(0,0,0); fprintf(evol,"%e %e %e %e\n", n*dt, evol_gfs[IDX4pt(UUGF,idx0)],evol_gfs[IDX4pt(VVGF,idx0)],evol_gfs[IDX4pt(CFGF,idx0)]); // Progress indicator printing to stdout // Measure average time per iteration //clock_gettime(CLOCK_REALTIME, &end); const long long unsigned int time_in_ns = 1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec; const REAL s_per_iteration_avg = ((REAL)time_in_ns / (REAL)n) / 1.0e9; const int iterations_remaining = N_final - n; const REAL time_remaining_in_mins = s_per_iteration_avg * (REAL)iterations_remaining / 60.0; const REAL num_RHS_pt_evals = (REAL)(Nxx[0]*Nxx[1]*Nxx[2]) * 4.0 * (REAL)n; // 4 RHS evals per gridpoint for RK4 const REAL RHS_pt_evals_per_sec = num_RHS_pt_evals / ((REAL)time_in_ns / 1.0e9); // Progress indicator printing to stdout printf("%c[2K", 27); // Clear the line printf("It: %d t=%.2f | %.1f%%; ETA %.0f s | t/h %.2f | gp/s %.2e\r", // \r is carriage return, move cursor to the beginning of the line n, n * (double)dt, (double)(100.0 * (REAL)n / (REAL)N_final), (double)time_remaining_in_mins*60, (double)(dt * 3600.0 / s_per_iteration_avg), (double)RHS_pt_evals_per_sec); fflush(stdout); // Flush the stdout buffer } // End main loop to progress forward in time. printf("\n"); // Clear the line. fclose(evol); /* Step 4: Free all allocated memory */ free(bc_parity_conditions); free(bc_gz_map); free(k4_gfs); free(k3_gfs); free(k2_gfs); free(k1_gfs); free(aux_gfs); free(next_in_gfs); free(evol_gfs); for(int i=0;i<3;i++) free(xx[i]); return 0; }
pr46032-2.c
/* { dg-do compile } */ /* { dg-options "-O2 -fopenmp -std=c99 -fipa-pta -fdump-tree-optimized" } */ #define N 2 int foo (void) { int a[N], b[N], c[N]; int *ap = &a[0]; int *bp = &b[0]; int *cp = &c[0]; #pragma omp parallel for for (unsigned int idx = 0; idx < N; idx++) { ap[idx] = 1; bp[idx] = 2; cp[idx] = ap[idx]; } return *cp; } /* { dg-final { scan-tree-dump-times "\\] = 1;" 2 "optimized" } } */ /* { dg-final { scan-tree-dump-times "\\] = 2;" 1 "optimized" } } */ /* { dg-final { scan-tree-dump-times "\\] = _\[0-9\]*;" 0 "optimized" } } */ /* { dg-final { scan-tree-dump-times "\\] = " 3 "optimized" } } */
drt_dft_solver.h
#ifndef _DRT_DFT_SOLVER_ #define _DRT_DFT_SOLVER_ #include <complex> #include "spectral/spectral.h" #include "blueprint.h" #include "equations.h" namespace spectral { /*! @brief Solver for dirichlet type x-boundary conditions of the spectral equations. * @ingroup solvers */ template< size_t n> class DRT_DFT_Solver { public: typedef Matrix<double, TL_DRT_DFT> Matrix_Type; /*! @brief Construct a solver for dirichlet type boundary conditions * * The constructor allocates storage for the solver * and initializes all fourier coefficients as well as * all low level solvers needed. * @param blueprint Contains all the necessary parameters. * @throw Message If your parameters are inconsistent. */ DRT_DFT_Solver( const Blueprint& blueprint); /*! @brief Prepare Solver for execution * * This function takes the fields and computes the missing * one according to the target parameter passed. After that * it performs three initializing steps (one onestep-, * one twostep-method and the threestep-method used in the step function) * in order to initialize the karniadakis scheme. The actual time is * thus T_0 + 3*dt after initialisation. * @param v Container with three non void matrices * @param t which Matrix is missing? */ void init( std::array< Matrix<double,TL_DRT_DFT>, n>& v, enum target t); /** * @brief Perform first initializing step * */ void first_step(); /** * @brief Perform second initializing step * * After that the step function can be used */ void second_step(); /*! @brief Perform a step by the 3 step Karniadakis scheme * * @attention At least one call of first_step() and second_step() is necessary * */ void step(){ step_<TL_ORDER3>();} /*! @brief Get the result You get the solution matrix of the current timestep. @param t The field you want @return A Read only reference to the field @attention The reference is only valid until the next call to the step() function! */ const Matrix<double, TL_DRT_DFT>& getField( enum target t) const; /*! @brief Get the result Use this function when you want to call step() without destroying the solution. @param m In exchange for the solution matrix you have to provide storage for further calculations. The field is swapped in. @param t The field you want. @attention The fields you get are not the ones of the current timestep. You get the fields that are not needed any more. This means the densities are 4 timesteps "old" whereas the potential is the one of the last timestep. */ void getField( Matrix<double, TL_DRT_DFT>& m, enum target t); /*! @brief Get the parameters of the solver. @return The parameters in use. @note You cannot change parameters once constructed. */ const Blueprint& blueprint() const { return blue;} private: typedef std::complex<double> complex; //methods void init_coefficients( const Boundary& bound, const Physical& phys); void compute_cphi();//multiply cphi //void first_steps(); template< enum stepper S> void step_(); //members const size_t rows, cols; const size_t crows, ccols; const Blueprint blue; /////////////////fields////////////////////////////////// //GhostMatrix<double, TL_DRT_DFT> ghostdens, ghostphi; std::array< Matrix<double, TL_DRT_DFT>, n> dens, phi, nonlinear; /////////////////Complex (void) Matrices for fourier transforms/////////// std::array< Matrix< complex>, n> cdens, cphi; ///////////////////Solvers//////////////////////// Arakawa arakawa; Karniadakis<n, complex, TL_DRT_DFT> karniadakis; DRT_DFT drt_dft; /////////////////////Coefficients////////////////////// Matrix< std::array< double, n> > phi_coeff; std::array< Matrix< double>, n-1> gamma_coeff; }; template< size_t n> DRT_DFT_Solver<n>::DRT_DFT_Solver( const Blueprint& bp): rows( bp.algorithmic().ny ), cols( bp.algorithmic().nx ), crows( cols), ccols( rows/2+1), blue( bp), //fields dens( MatrixArray<double, TL_DRT_DFT,n>::construct( rows, cols)), phi( dens), nonlinear( dens), cdens( MatrixArray<complex, TL_NONE, n>::construct( crows, ccols)), cphi(cdens), //Solvers arakawa( bp.algorithmic().h), karniadakis(rows, cols, crows, ccols, bp.algorithmic().dt), drt_dft( rows, cols, fftw_convert( bp.boundary().bc_x), FFTW_MEASURE), //Coefficients phi_coeff( crows, ccols), gamma_coeff( MatrixArray< double, TL_NONE, n-1>::construct( crows, ccols)) { bp.consistencyCheck(); Physical phys = bp.physical(); if( bp.isEnabled( TL_GLOBAL)) { std::cerr << "WARNING: GLOBAL solver not implemented yet! \n\ Switch to local solver...\n"; } init_coefficients( bp.boundary(), phys); } //aware of BC template< size_t n> void DRT_DFT_Solver<n>::init_coefficients( const Boundary& bound, const Physical& phys) { Matrix< QuadMat< complex, n> > coeff( crows, ccols); double laplace; const complex dymin( 0, 2.*M_PI/bound.ly); const double kxmin2 = M_PI*M_PI/(double)(bound.lx*bound.lx), kymin2 = 4.*M_PI*M_PI/(double)(bound.ly*bound.ly); double add; if( bound.bc_x == TL_DST00 || bound.bc_x == TL_DST10) add = 1.0; else add = 0.5; Equations e( phys, blue.isEnabled( TL_MHW)); Poisson p( phys); // drt_dft is transposing so i is the x index for( unsigned i = 0; i<crows; i++) for( unsigned j = 0; j<ccols; j++) { laplace = - kxmin2*(double)((i+add)*(i+add)) - kymin2*(double)(j*j); if( n == 2) gamma_coeff[0](i,j) = p.gamma1_i( laplace); else if( n == 3) { gamma_coeff[0](i,j) = p.gamma1_i( laplace); gamma_coeff[1](i,j) = p.gamma1_z( laplace); } e( coeff( i,j), laplace, (double)j*dymin); p( phi_coeff(i,j), laplace); } double norm = fftw_normalisation( bound.bc_x, cols)*(double)rows; karniadakis.init_coeff( coeff, norm); } //unaware of BC except FFT template< size_t n> void DRT_DFT_Solver<n>::init( std::array< Matrix<double, TL_DRT_DFT>,n>& v, enum target t) { //fourier transform input into cdens for( unsigned k=0; k<n; k++) { #ifdef TL_DEBUG if( v[k].isVoid()) throw Message("You gave me a void Matrix!!", _ping_); #endif drt_dft.r2c_T( v[k], cdens[k]); } //don't forget to normalize coefficients!! double norm = fftw_normalisation( blue.boundary().bc_x, cols)*(double)rows; for( unsigned k=0; k<n; k++) for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols;j++) cdens[k](i,j) /= norm; switch( t) //which field must be computed? { case( TL_ELECTRONS): //bring cdens and cphi in the right order swap_fields( cphi[0], cdens[n-1]); for( unsigned k=n-1; k>0; k--) swap_fields( cdens[k], cdens[k-1]); //now solve for cdens[0] for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cdens[0](i,j) = cphi[0](i,j)/phi_coeff(i,j)[0]; for( unsigned k=0; k<n && k!=0; k++) cdens[0](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[0]; } break; case( TL_IONS): //bring cdens and cphi in the right order swap_fields( cphi[0], cdens[n-1]); for( unsigned k=n-1; k>1; k--) swap_fields( cdens[k], cdens[k-1]); //solve for cdens[1] for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cdens[1](i,j) = cphi[0](i,j) /phi_coeff(i,j)[1]; for( unsigned k=0; k<n && k!=1; k++) cdens[1](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[1]; } break; case( TL_IMPURITIES): //bring cdens and cphi in the right order swap_fields( cphi[0], cdens[n-1]); for( unsigned k=n-1; k>2; k--) //i.e. never for n = 3 swap_fields( cdens[k], cdens[k-1]); //solve for cdens[2] for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols; j++) { cdens[2](i,j) = cphi[0](i,j) /phi_coeff(i,j)[2]; for( unsigned k=0; k<n && k!=2; k++) cdens[2](i,j) -= cdens[k](i,j)*phi_coeff(i,j)[k]/phi_coeff(i,j)[2]; } break; case( TL_POTENTIAL): //solve for cphi for( unsigned i=0; i<crows; i++) for( unsigned j=0; j<ccols/2+1; j++) { cphi[0](i,j) = 0; for( unsigned k=0; k<n && k!=2; k++) cphi[0](i,j) += cdens[k](i,j)*phi_coeff(i,j)[k]; } break; case( TL_ALL): throw Message( "TL_ALL not treated yet!", _ping_); } //compute the rest cphi[k] for( unsigned k=0; k<n-1; k++) for( size_t i = 0; i < crows; i++) for( size_t j = 0; j < ccols; j++) cphi[k+1](i,j) = gamma_coeff[k](i,j)*cphi[0](i,j); //backtransform to x-space for( unsigned k=0; k<n; k++) { drt_dft.c_T2r( cdens[k], dens[k]); drt_dft.c_T2r( cphi[k], phi[k]); } //now the density and the potential is given in x-space //first_steps(); } template< size_t n> void DRT_DFT_Solver<n>::getField( Matrix<double, TL_DRT_DFT>& m, enum target t) { #ifdef TL_DEBUG if(m.isVoid()) throw Message( "You may not swap in a void Matrix!\n", _ping_); #endif switch( t) { case( TL_ELECTRONS): swap_fields( m, nonlinear[0]); break; case( TL_IONS): swap_fields( m, nonlinear[1]); break; case( TL_IMPURITIES): swap_fields( m, nonlinear[2]); break; case( TL_POTENTIAL): swap_fields( m, cphi[0]); break; case( TL_ALL): throw Message( "TL_ALL not allowed here", _ping_); } } template< size_t n> const Matrix<double, TL_DRT_DFT>& DRT_DFT_Solver<n>::getField( enum target t) const { Matrix<double, TL_DRT_DFT> const * m = 0; switch( t) { case( TL_ELECTRONS): m = &dens[0]; break; case( TL_IONS): m = &dens[1]; break; case( TL_IMPURITIES): m = &dens[2]; break; case( TL_POTENTIAL): m = &phi[0]; break; case( TL_ALL): throw Message( "TL_ALL not allowed here", _ping_); } return *m; } template< size_t n> void DRT_DFT_Solver<n>::first_step() { karniadakis.template invert_coeff<TL_EULER>( ); step_<TL_EULER>(); } template< size_t n> void DRT_DFT_Solver<n>::second_step() { karniadakis.template invert_coeff<TL_ORDER2>(); step_<TL_ORDER2>(); karniadakis.template invert_coeff<TL_ORDER3>(); } template< size_t n> void DRT_DFT_Solver<n>::compute_cphi() { if( n==2) { #pragma omp parallel for for( size_t i = 0; i < crows; i++) for( size_t j = 0; j < ccols; j++) cphi[0](i,j) = phi_coeff(i,j)[0]*cdens[0](i,j) + phi_coeff(i,j)[1]*cdens[1](i,j); #pragma omp parallel for for( size_t i = 0; i < crows; i++) for( size_t j = 0; j < ccols; j++) cphi[1](i,j) = gamma_coeff[0](i,j)*cphi[0](i,j); } else if( n==3) { #pragma omp parallel for for( size_t i = 0; i < crows; i++) for( size_t j = 0; j < ccols; j++) cphi[0](i,j) = phi_coeff(i,j)[0]*cdens[0](i,j) + phi_coeff(i,j)[1]*cdens[1](i,j) + phi_coeff(i,j)[2]*cdens[2](i,j); #pragma omp parallel for for( size_t i = 0; i < crows; i++) for( size_t j = 0; j < ccols; j++) { cphi[1](i,j) = gamma_coeff[0](i,j)*cphi[0](i,j); cphi[2](i,j) = gamma_coeff[1](i,j)*cphi[0](i,j); } } } //unaware of BC except FFT template< size_t n> template< enum stepper S> void DRT_DFT_Solver<n>::step_() { //1. Compute nonlinearity #pragma omp parallel for for( unsigned k=0; k<n; k++) { GhostMatrix<double, TL_DRT_DFT> ghostphi{ rows, cols, TL_PERIODIC, blue.boundary().bc_x}; GhostMatrix<double, TL_DRT_DFT> ghostdens{ rows, cols, TL_PERIODIC, blue.boundary().bc_x}; swap_fields( dens[k], ghostdens); //now dens[j] is void swap_fields( phi[k], ghostphi); //now phi[j] is void ghostdens.initGhostCells( ); ghostphi.initGhostCells( ); arakawa( ghostdens, ghostphi, nonlinear[k]); swap_fields( dens[k], ghostdens); //now ghostdens is void swap_fields( phi[k], ghostphi); //now ghostphi is void } //2. perform karniadakis step karniadakis.template step_i<S>( dens, nonlinear); //3. solve linear equation //3.1. transform v_hut #pragma omp parallel for for( unsigned k=0; k<n; k++) drt_dft.r2c_T( dens[k], cdens[k]); //3.2. perform karniadaksi step and multiply coefficients for phi karniadakis.step_ii( cdens); compute_cphi(); //3.3. backtransform #pragma omp parallel for for( unsigned k=0; k<n; k++) { drt_dft.c_T2r( cdens[k], dens[k]); drt_dft.c_T2r( cphi[k], phi[k]); } } }//namespace spectral #endif //_DRT_DFT_SOLVER_
restriction.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ static inline void restriction_pc_block(level_type *level_c, int id_c, level_type *level_f, int id_f, blockCopy_type *block, int restrictionType){ // restrict 3D array from read_i,j,k of read[] to write_i,j,k in write[] using piecewise constant restriction (cell averaged) int dim_i = block->dim.i; // calculate the dimensions of the resultant coarse block int dim_j = block->dim.j; int dim_k = block->dim.k; int read_i = block->read.i; int read_j = block->read.j; int read_k = block->read.k; int read_jStride = block->read.jStride; int read_kStride = block->read.kStride; int write_i = block->write.i; int write_j = block->write.j; int write_k = block->write.k; int write_jStride = block->write.jStride; int write_kStride = block->write.kStride; // general case is a copy to/from a general pointer... double * __restrict__ read = block->read.ptr; double * __restrict__ write = block->write.ptr; // copies to/from boxes need a different pointer... if(block->read.box >=0){ read_jStride = level_f->my_boxes[block->read.box ].jStride; read_kStride = level_f->my_boxes[block->read.box ].kStride; read = level_f->my_boxes[ block->read.box].vectors[id_f] + level_f->my_boxes[ block->read.box].ghosts*(1+ read_jStride+ read_kStride); } if(block->write.box>=0){ write_jStride = level_c->my_boxes[block->write.box].jStride; write_kStride = level_c->my_boxes[block->write.box].kStride; write = level_c->my_boxes[block->write.box].vectors[id_c] + level_c->my_boxes[block->write.box].ghosts*(1+write_jStride+write_kStride); } // shift pointers by starting coordinates of the block... read += ( read_i) + ( read_j)* read_jStride + ( read_k)* read_kStride; write += (write_i) + (write_j)*write_jStride + (write_k)*write_kStride; int i,j,k; int ii,jj,kk; switch(restrictionType){ case RESTRICT_CELL: for(k=0,kk=0;k<dim_k;k++,kk+=2){ for(j=0,jj=0;j<dim_j;j++,jj+=2){ for(i=0,ii=0;i<dim_i;i++,ii+=2){ int write_ijk = i + j *write_jStride + k *write_kStride; int read_ijk = ii + jj* read_jStride + kk* read_kStride; write[write_ijk] = ( read[read_ijk ]+read[read_ijk+1 ] + read[read_ijk +read_jStride ]+read[read_ijk+1+read_jStride ] + read[read_ijk +read_kStride]+read[read_ijk+1 +read_kStride] + read[read_ijk +read_jStride+read_kStride]+read[read_ijk+1+read_jStride+read_kStride] ) * 0.125; }}}break; case RESTRICT_FACE_I: for(k=0,kk=0;k<dim_k;k++,kk+=2){ for(j=0,jj=0;j<dim_j;j++,jj+=2){ for(i=0,ii=0;i<dim_i;i++,ii+=2){ int write_ijk = i + j *write_jStride + k *write_kStride; int read_ijk = ii + jj* read_jStride + kk* read_kStride; write[write_ijk] = ( read[read_ijk ] + read[read_ijk+read_jStride ] + read[read_ijk +read_kStride] + read[read_ijk+read_jStride+read_kStride] ) * 0.25; }}}break; case RESTRICT_FACE_J: for(k=0,kk=0;k<dim_k;k++,kk+=2){ for(j=0,jj=0;j<dim_j;j++,jj+=2){ for(i=0,ii=0;i<dim_i;i++,ii+=2){ int write_ijk = i + j *write_jStride + k *write_kStride; int read_ijk = ii + jj* read_jStride + kk* read_kStride; write[write_ijk] = ( read[read_ijk ] + read[read_ijk+1 ] + read[read_ijk +read_kStride] + read[read_ijk+1+read_kStride] ) * 0.25; }}}break; case RESTRICT_FACE_K: for(k=0,kk=0;k<dim_k;k++,kk+=2){ for(j=0,jj=0;j<dim_j;j++,jj+=2){ for(i=0,ii=0;i<dim_i;i++,ii+=2){ int write_ijk = i + j *write_jStride + k *write_kStride; int read_ijk = ii + jj* read_jStride + kk* read_kStride; write[write_ijk] = ( read[read_ijk ] + read[read_ijk+1 ] + read[read_ijk +read_jStride] + read[read_ijk+1+read_jStride] ) * 0.25; }}}break; } } //------------------------------------------------------------------------------------------------------------------------------ // perform a (inter-level) restriction on vector id_f of the fine level and stores the result in vector id_c on the coarse level // restrictionType specifies whether this is either cell-averaged restriction, or one of three face-averaged restrictions // piecewise constant restriction requires neither a ghost zone exchange nor a boundary condition // This is a rather bulk synchronous implementation which packs all MPI buffers before initiating any sends // Similarly, it waits for all remote data before copying any into local boxes. // It does however attempt to overlap local restriction with MPI void restriction(level_type * level_c, int id_c, level_type *level_f, int id_f, int restrictionType){ double _timeCommunicationStart = getTime(); double _timeStart,_timeEnd; int buffer=0; int n; int my_tag = (level_f->tag<<4) | 0x5; #ifdef USE_MPI // by convention, level_f allocates a combined array of requests for both level_f sends and level_c recvs... int nMessages = level_c->restriction[restrictionType].num_recvs + level_f->restriction[restrictionType].num_sends; MPI_Request *recv_requests = level_f->restriction[restrictionType].requests; MPI_Request *send_requests = level_f->restriction[restrictionType].requests + level_c->restriction[restrictionType].num_recvs; // loop through packed list of MPI receives and prepost Irecv's... if(level_c->restriction[restrictionType].num_recvs>0){ _timeStart = getTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level_c->restriction[restrictionType].num_recvs;n++){ MPI_Irecv(level_c->restriction[restrictionType].recv_buffers[n], level_c->restriction[restrictionType].recv_sizes[n], MPI_DOUBLE, level_c->restriction[restrictionType].recv_ranks[n], my_tag, MPI_COMM_WORLD, &recv_requests[n] ); } _timeEnd = getTime(); level_f->timers.restriction_recv += (_timeEnd-_timeStart); } // pack MPI send buffers... if(level_f->restriction[restrictionType].num_blocks[0]>0){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->restriction[restrictionType].num_blocks[0]) for(buffer=0;buffer<level_f->restriction[restrictionType].num_blocks[0];buffer++){ restriction_pc_block(level_c,id_c,level_f,id_f,&level_f->restriction[restrictionType].blocks[0][buffer],restrictionType); } _timeEnd = getTime(); level_f->timers.restriction_pack += (_timeEnd-_timeStart); } // loop through MPI send buffers and post Isend's... if(level_f->restriction[restrictionType].num_sends>0){ _timeStart = getTime(); #ifdef USE_MPI_THREAD_MULTIPLE #pragma omp parallel for schedule(dynamic,1) #endif for(n=0;n<level_f->restriction[restrictionType].num_sends;n++){ MPI_Isend(level_f->restriction[restrictionType].send_buffers[n], level_f->restriction[restrictionType].send_sizes[n], MPI_DOUBLE, level_f->restriction[restrictionType].send_ranks[n], my_tag, MPI_COMM_WORLD, &send_requests[n] ); } _timeEnd = getTime(); level_f->timers.restriction_send += (_timeEnd-_timeStart); } #endif // perform local restriction[restrictionType]... try and hide within Isend latency... if(level_f->restriction[restrictionType].num_blocks[1]>0){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_f->restriction[restrictionType].num_blocks[1]) for(buffer=0;buffer<level_f->restriction[restrictionType].num_blocks[1];buffer++){ restriction_pc_block(level_c,id_c,level_f,id_f,&level_f->restriction[restrictionType].blocks[1][buffer],restrictionType); } _timeEnd = getTime(); level_f->timers.restriction_local += (_timeEnd-_timeStart); } // wait for MPI to finish... #ifdef USE_MPI if(nMessages){ _timeStart = getTime(); MPI_Waitall(nMessages,level_f->restriction[restrictionType].requests,level_f->restriction[restrictionType].status); _timeEnd = getTime(); level_f->timers.restriction_wait += (_timeEnd-_timeStart); } // unpack MPI receive buffers if(level_c->restriction[restrictionType].num_blocks[2]>0){ _timeStart = getTime(); PRAGMA_THREAD_ACROSS_BLOCKS(level_f,buffer,level_c->restriction[restrictionType].num_blocks[2]) for(buffer=0;buffer<level_c->restriction[restrictionType].num_blocks[2];buffer++){ CopyBlock(level_c,id_c,&level_c->restriction[restrictionType].blocks[2][buffer]); } _timeEnd = getTime(); level_f->timers.restriction_unpack += (_timeEnd-_timeStart); } #endif level_f->timers.restriction_total += (double)(getTime()-_timeCommunicationStart); }
rar_fmt_plug.c
/* RAR 3.x cracker patch for JtR. Hacked together during * April of 2011 by Dhiru Kholia <dhiru.kholia at gmail.com> for GSoC. * magnum added -p mode support, using code based on libclamav * and OMP, AES-NI and OpenCL support. * jimf added dyna_salt support, Oct 2014. * * This software is Copyright (c) 2011, Dhiru Kholia <dhiru.kholia at gmail.com> * and Copyright (c) 2012, magnum and it is hereby released to the general public * under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This code is based on the work of Alexander L. Roshal (C) * * The unRAR sources may be used in any software to handle RAR * archives without limitations free of charge, but cannot be used * to re-create the RAR compression algorithm, which is proprietary. * Distribution of modified unRAR sources in separate form or as a * part of other software is permitted, provided that it is clearly * stated in the documentation and source comments that the code may * not be used to develop a RAR (WinRAR) compatible archiver. * * Huge thanks to Marc Bevand <m.bevand (at) gmail.com> for releasing unrarhp * (http://www.zorinaq.com/unrarhp/) and documenting the RAR encryption scheme. * This patch is made possible by unrarhp's documentation. * * http://anrieff.net/ucbench/technical_qna.html is another useful reference * for RAR encryption scheme. * * Thanks also to Pavel Semjanov for crucial help with Huffman table checks. * * For type = 0 for files encrypted with "rar -hp ..." option * archive_name:$RAR3$*type*hex(salt)*hex(partial-file-contents):type::::archive_name * * For type = 1 for files encrypted with "rar -p ..." option * archive_name:$RAR3$*type*hex(salt)*hex(crc)*PACK_SIZE*UNP_SIZE*archive_name*offset-for-ciphertext*method:type::file_name * * or (inlined binary) * * archive_name:$RAR3$*type*hex(salt)*hex(crc)*PACK_SIZE*UNP_SIZE*1*hex(full encrypted file)*method:type::file_name * */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rar; #elif FMT_REGISTERS_H john_register_one(&fmt_rar); #else #include <string.h> #include <assert.h> #include <errno.h> #include <openssl/engine.h> #include <openssl/evp.h> #include <openssl/ssl.h> #include "arch.h" #include "sha.h" #if AC_BUILT #include "autoconfig.h" #endif #if _MSC_VER || __MINGW32__ || __MINGW64__ || __CYGWIN__ || HAVE_WINDOWS_H #include "win32_memmap.h" #ifndef __CYGWIN__ #include "mmap-windows.c" #elif defined HAVE_MMAP #include <sys/mman.h> #endif #elif defined(HAVE_MMAP) #include <sys/mman.h> #endif #include "crc32.h" #include "misc.h" #include "common.h" #include "formats.h" #include "dyna_salt.h" #include "memory.h" #include "params.h" #include "options.h" #include "unicode.h" #include "johnswap.h" #include "unrar.h" #include "config.h" #include "jumbo.h" #define FORMAT_LABEL "rar" #define FORMAT_NAME "RAR3" #define ALGORITHM_NAME "SHA1 AES 32/" ARCH_BITS_STR #ifdef DEBUG #define BENCHMARK_COMMENT " (1-16 characters)" #else #define BENCHMARK_COMMENT " (4 characters)" #endif #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 125 #define UNICODE_LENGTH (2 * PLAINTEXT_LENGTH) #define BINARY_SIZE 0 #define BINARY_ALIGN MEM_ALIGN_NONE #define SALT_SIZE sizeof(rarfile*) #define SALT_ALIGN sizeof(rarfile*) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define ROUNDS 0x40000 #define MIN(a, b) (((a) > (b)) ? (b) : (a)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) /* The reason we want to bump OMP_SCALE in this case is to even out the difference in processing time for different length keys. It doesn't boost performance in other ways */ #ifdef _MSC_VER #undef _OPENMP #endif #ifdef _OPENMP #include <omp.h> #include <pthread.h> #define OMP_SCALE 4 static pthread_mutex_t *lockarray; #endif #include "memdbg.h" static int omp_t = 1; static unsigned char *saved_salt; static unsigned char *saved_key; static int (*cracked); static unpack_data_t (*unpack_data); static unsigned int *saved_len; static unsigned char *aes_key; static unsigned char *aes_iv; typedef struct { dyna_salt dsalt; /* must be first. allows dyna_salt to work */ /* place all items we are NOT going to use for salt comparison, first */ unsigned char *blob; /* data from this point on, is part of the salt for compare reasons */ unsigned char salt[8]; int type; /* 0 = -hp, 1 = -p */ /* for rar -p mode only: */ union { unsigned int w; unsigned char c[4]; } crc; unsigned long long pack_size; unsigned long long unp_size; int method; unsigned char blob_hash[20]; // holds an sha1, but could be 'any' hash. // raw_data should be word aligned, and 'ok' unsigned char raw_data[1]; } rarfile; static rarfile *cur_file; /* cRARk use 4-char passwords for CPU benchmark */ static struct fmt_tests cpu_tests[] = { {"$RAR3$*0*b109105f5fe0b899*d4f96690b1a8fe1f120b0290a85a2121", "test"}, {"$RAR3$*0*42ff7e92f24fb2f8*9d8516c8c847f1b941a0feef064aaf0d", "1234"}, {"$RAR3$*0*56ce6de6ddee17fb*4c957e533e00b0e18dfad6accc490ad9", "john"}, /* -p mode tests, -m0 and -m3 (in that order) */ {"$RAR3$*1*c47c5bef0bbd1e98*965f1453*48*47*1*c5e987f81d316d9dcfdb6a1b27105ce63fca2c594da5aa2f6fdf2f65f50f0d66314f8a09da875ae19d6c15636b65c815*30", "test"}, {"$RAR3$*1*b4eee1a48dc95d12*965f1453*64*47*1*0fe529478798c0960dd88a38a05451f9559e15f0cf20b4cac58260b0e5b56699d5871bdcc35bee099cc131eb35b9a116adaedf5ecc26b1c09cadf5185b3092e6*33", "test"}, #ifdef DEBUG /* Various lengths, these should be in self-test but not benchmark */ /* from CMIYC 2012 */ {"$RAR3$*1*0f263dd52eead558*834015cd*384*693*1*e28e9648f51b59e32f573b302f0e94aadf1050678b90c38dd4e750c7dd281d439ab4cccec5f1bd1ac40b6a1ead60c75625666307171e0fe2639d2397d5f68b97a2a1f733289eac0038b52ec6c3593ff07298fce09118c255b2747a02c2fa3175ab81166ebff2f1f104b9f6284a66f598764bd01f093562b5eeb9471d977bf3d33901acfd9643afe460e1d10b90e0e9bc8b77dc9ac40d40c2d211df9b0ecbcaea72c9d8f15859d59b3c85149b5bb5f56f0218cbbd9f28790777c39e3e499bc207289727afb2b2e02541b726e9ac028f4f05a4d7930efbff97d1ffd786c4a195bbed74997469802159f3b0ae05b703238da264087b6c2729d9023f67c42c5cbe40b6c67eebbfc4658dfb99bfcb523f62133113735e862c1430adf59c837305446e8e34fac00620b99f574fabeb2cd34dc72752014cbf4bd64d35f17cef6d40747c81b12d8c0cd4472089889a53f4d810b212fb314bf58c3dd36796de0feeefaf26be20c6a2fd00517152c58d0b1a95775ef6a1374c608f55f416b78b8c81761f1d*33:1::to-submit-challenges.txt", "wachtwoord"}, {"$RAR3$*1*9759543e04fe3a22*834015cd*384*693*1*cdd2e2478e5153a581c47a201490f5d9b69e01584ae488a2a40203da9ba8c5271ed8edc8f91a7bd262bb5e5de07ecbe9e2003d054a314d16caf2ea1de9f54303abdee1ed044396f7e29c40c38e638f626442efd9f511b4743758cd4a6025c5af81d1252475964937d80bfd50d10c171e7e4041a66c02a74b2b451ae83b6807990fb0652a8cdab530c5a0c497575a6e6cbe2db2035217fe849d2e0b8693b70f3f97b757229b4e89c8273197602c23cc04ff5f24abf3d3c7eb686fc3eddce1bfe710cc0b6e8bd012928127da38c38dd8f056095982afacb4578f6280d51c6739739e033674a9413ca88053f8264c5137d4ac018125c041a3489daaf175ef75e9282d245b92948c1bbcf1c5f25b7028f6d207d87fe9598c2c7ccd1553e842a91ab8ca9261a51b14601a756070388d08039466dfa36f0b4c7ea7dd9ff25c9d98687203c58f9ec8757cafe4d2ed785d5a9e6d5ea838e4cc246a9e6d3c30979dcce56b380b05f9103e6443b35357550b50229c47f845a93a48602790096828d9d6bef0*33:1::to-submit-challenges.txt", "Sleepingbaby210"}, {"$RAR3$*1*79e17c26407a7d52*834015cd*384*693*1*6844a189e732e9390b5a958b623589d5423fa432d756fd00940ac31e245214983507a035d4e0ee09469491551759a66c12150fe6c5d05f334fb0d8302a96d48ef4da04954222e0705507aaa84f8b137f284dbec344eee9cea6b2c4f63540c64df3ee8be3013466d238c5999e9a98eb6375ec5462869bba43401ec95077d0c593352339902c24a3324178e08fe694d11bfec646c652ffeafbdda929052c370ffd89168c83194fedf7c50fc7d9a1fbe64332063d267a181eb07b5d70a5854067db9b66c12703fde62728d3680cf3fdb9933a0f02bfc94f3a682ad5e7c428d7ed44d5ff554a8a445dea28b81e3a2631870e17f3f3c0c0204136802c0701590cc3e4c0ccd9f15e8be245ce9caa6969fab9e8443ac9ad9e73e7446811aee971808350c38c16c0d3372c7f44174666d770e3dd321e8b08fb2dc5e8a6a5b2a1720bad66e54abc194faabc5f24225dd8fee137ba5d4c2ed48c6462618e6333300a5b8dfc75c65608925e786eb0988f7b3a5ab106a55168d1001adc47ce95bba77b38c35b*33:1::to-submit-challenges.txt", "P-i-r-A-T-E"}, {"$RAR3$*1*e1df79fd9ee1dadf*771a163b*64*39*1*edc483d67b94ab22a0a9b8375a461e06fa1108fa72970e16d962092c311970d26eb92a033a42f53027bdc0bb47231a12ed968c8d530a9486a90cbbc00040569b*33", "333"}, {"$RAR3$*1*c83c00534d4af2db*771a163b*64*39*1*05244526d6b32cb9c524a15c79d19bba685f7fc3007a9171c65fc826481f2dce70be6148f2c3497f0d549aa4e864f73d4e4f697fdb66ff528ed1503d9712a414*33", "11eleven111"}, {"$RAR3$*0*c203c4d80a8a09dc*49bbecccc08b5d893f308bce7ad36c0f", "sator"}, {"$RAR3$*0*672fca155cb74ac3*8d534cd5f47a58f6493012cf76d2a68b", "arepo"}, {"$RAR3$*0*c203c4d80a8a09dc*c3055efe7ca6587127fd541a5b88e0e4", "tenet"}, {"$RAR3$*0*672fca155cb74ac3*c760267628f94060cca57be5896003c8", "opera"}, {"$RAR3$*0*c203c4d80a8a09dc*1f406154556d4c895a8be207fd2b5d0c", "rotas"}, {"$RAR3$*0*345f5f573a077ad7*638e388817cc7851e313406fd77730b9", "Boustrophedon"}, {"$RAR3$*0*c9dea41b149b53b4*fcbdb66122d8ebdb32532c22ca7ab9ec", "password"}, {"$RAR3$*0*7ce241baa2bd521b*f2b26d76424efa351c728b321671d074", "@"}, {"$RAR3$*0*ea0ea55ce549c8ab*cf89099c620fcc244bdcbae55a616e76", "ow"}, {"$RAR3$*0*ea0ea55ce549c8ab*6a35a76b1ce9ddc4229b9166d60dc113", "aes"}, {"$RAR3$*0*ea0ea55ce549c8ab*1830771da109f53e2d6e626be16c2666", "sha1"}, {"$RAR3$*0*7e52d3eba9bad316*ee8e1edd435cfa9b8ab861d958a4d588", "fiver"}, {"$RAR3$*0*7e52d3eba9bad316*01987735ab0be7b6538470bd5f5fbf80", "magnum"}, {"$RAR3$*0*7e52d3eba9bad316*f2fe986ed266c6617c48d04a429cf2e3", "7777777"}, {"$RAR3$*0*7e52d3eba9bad316*f0ad6e7fdff9f82fff2aa990105fde21", "password"}, {"$RAR3$*0*7ce241baa2bd521b*3eb0017fa8843017952c53a3ac8332b6", "nine9nine"}, {"$RAR3$*0*7ce241baa2bd521b*ccbf0c3f8e059274606f33cc388b8a2f", "10tenten10"}, {"$RAR3$*0*5fa43f823a60da63*af2630863e12046e42c4501c915636c9", "eleven11111"}, {"$RAR3$*0*5fa43f823a60da63*88c0840d0bd98844173d35f867558ec2", "twelve121212"}, {"$RAR3$*0*4768100a172fa2b6*48edcb5283ee2e4f0e8edb25d0d85eaa", "subconsciousness"}, #endif {NULL} }; #if defined (_OPENMP) static void lock_callback(int mode, int type, const char *file, int line) { (void)file; (void)line; if (mode & CRYPTO_LOCK) pthread_mutex_lock(&(lockarray[type])); else pthread_mutex_unlock(&(lockarray[type])); } static unsigned long thread_id(void) { return omp_get_thread_num(); } static void init_locks(void) { int i; lockarray = (pthread_mutex_t*) OPENSSL_malloc(CRYPTO_num_locks() * sizeof(pthread_mutex_t)); for (i = 0; i < CRYPTO_num_locks(); i++) pthread_mutex_init(&(lockarray[i]), NULL); CRYPTO_set_id_callback(thread_id); CRYPTO_set_locking_callback(lock_callback); } #endif /* _OPENMP */ /* Use AES-NI if available. This is not supported with low-level calls, we have to use EVP) */ static void init_aesni(void) { ENGINE *e; const char *engine_id = "aesni"; ENGINE_load_builtin_engines(); e = ENGINE_by_id(engine_id); if (!e) { //fprintf(stderr, "AES-NI engine not available\n"); return; } if (!ENGINE_init(e)) { fprintf(stderr, "AES-NI engine could not init\n"); ENGINE_free(e); return; } if (!ENGINE_set_default(e, ENGINE_METHOD_ALL & ~ENGINE_METHOD_RAND)) { /* This should only happen when 'e' can't initialise, but the * previous statement suggests it did. */ fprintf(stderr, "AES-NI engine initialized but then failed\n"); abort(); } ENGINE_finish(e); ENGINE_free(e); } #ifndef __APPLE__ /* Apple segfaults on this :) */ static void openssl_cleanup(void) { ENGINE_cleanup(); ERR_free_strings(); CRYPTO_cleanup_all_ex_data(); EVP_cleanup(); } #endif #undef set_key static void set_key(char *key, int index) { int plen; UTF16 buf[PLAINTEXT_LENGTH + 1]; /* UTF-16LE encode the password, encoding aware */ plen = enc_to_utf16(buf, PLAINTEXT_LENGTH, (UTF8*) key, strlen(key)); if (plen < 0) plen = strlen16(buf); memcpy(&saved_key[UNICODE_LENGTH * index], buf, UNICODE_LENGTH); saved_len[index] = plen << 1; } static void *get_salt(char *ciphertext) { unsigned int i, type, ex_len; static unsigned char *ptr; /* extract data from "salt" */ char *encoded_salt; char *saltcopy = strdup(ciphertext); char *keep_ptr = saltcopy; rarfile *psalt; unsigned char tmp_salt[8]; int inlined = 1; SHA_CTX ctx; if (!ptr) ptr = mem_alloc_tiny(sizeof(rarfile*),sizeof(rarfile*)); saltcopy += 7; /* skip over "$RAR3$*" */ type = atoi(strtok(saltcopy, "*")); encoded_salt = strtok(NULL, "*"); for (i = 0; i < 8; i++) tmp_salt[i] = atoi16[ARCH_INDEX(encoded_salt[i * 2])] * 16 + atoi16[ARCH_INDEX(encoded_salt[i * 2 + 1])]; if (type == 0) { /* rar-hp mode */ char *encoded_ct = strtok(NULL, "*"); psalt = mem_calloc(sizeof(*psalt)+16); psalt->type = type; ex_len = 16; memcpy(psalt->salt, tmp_salt, 8); for (i = 0; i < 16; i++) psalt->raw_data[i] = atoi16[ARCH_INDEX(encoded_ct[i * 2])] * 16 + atoi16[ARCH_INDEX(encoded_ct[i * 2 + 1])]; psalt->blob = psalt->raw_data; psalt->pack_size = 16; } else { char *p = strtok(NULL, "*"); char crc_c[4]; unsigned long long pack_size; unsigned long long unp_size; for (i = 0; i < 4; i++) crc_c[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; pack_size = atoll(strtok(NULL, "*")); unp_size = atoll(strtok(NULL, "*")); inlined = atoi(strtok(NULL, "*")); ex_len = pack_size; /* load ciphertext. We allocate and load all files here, and they are freed when password found. */ #if HAVE_MMAP psalt = mem_calloc(sizeof(*psalt) + (inlined ? ex_len : 0)); #else psalt = mem_calloc(sizeof(*psalt)+ex_len); #endif psalt->type = type; memcpy(psalt->salt, tmp_salt, 8); psalt->pack_size = pack_size; psalt->unp_size = unp_size; memcpy(psalt->crc.c, crc_c, 4); if (inlined) { unsigned char *d = psalt->raw_data; p = strtok(NULL, "*"); for (i = 0; i < psalt->pack_size; i++) *d++ = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; psalt->blob = psalt->raw_data; } else { FILE *fp; char *archive_name = strtok(NULL, "*"); long long pos = atoll(strtok(NULL, "*")); #if HAVE_MMAP if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s\n", archive_name, strerror(errno)); error(); } #ifdef DEBUG fprintf(stderr, "RAR mmap() len %llu offset 0\n", pos + psalt->pack_size); #endif psalt->blob = mmap(NULL, pos + psalt->pack_size, PROT_READ, MAP_SHARED, fileno(fp), 0); if (psalt->blob == MAP_FAILED) { fprintf(stderr, "Error loading file from " "archive '%s'. Archive possibly " "damaged.\n", archive_name); error(); } psalt->blob += pos; #else size_t count; if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s\n", archive_name, strerror(errno)); error(); } jtr_fseek64(fp, pos, SEEK_SET); count = fread(psalt->raw_data, 1, psalt->pack_size, fp); if (count != psalt->pack_size) { fprintf(stderr, "Error loading file from archive '%s', expected %llu bytes, got %zu. Archive possibly damaged.\n", archive_name, psalt->pack_size, count); error(); } psalt->blob = psalt->raw_data; #endif fclose(fp); } p = strtok(NULL, "*"); psalt->method = atoi16[ARCH_INDEX(p[0])] * 16 + atoi16[ARCH_INDEX(p[1])]; if (psalt->method != 0x30) #if ARCH_LITTLE_ENDIAN psalt->crc.w = ~psalt->crc.w; #else psalt->crc.w = JOHNSWAP(~psalt->crc.w); #endif } SHA1_Init(&ctx); SHA1_Update(&ctx, psalt->blob, psalt->pack_size); SHA1_Final(psalt->blob_hash, &ctx); MEM_FREE(keep_ptr); #if HAVE_MMAP psalt->dsalt.salt_alloc_needs_free = inlined; #else psalt->dsalt.salt_alloc_needs_free = 1; #endif psalt->dsalt.salt_cmp_offset = SALT_CMP_OFF(rarfile, salt); psalt->dsalt.salt_cmp_size = SALT_CMP_SIZE(rarfile, salt, raw_data, 0); memcpy(ptr, &psalt, sizeof(rarfile*)); return (void*)ptr; } static void set_salt(void *salt) { cur_file = *((rarfile**)salt); memcpy(saved_salt, cur_file->salt, 8); } static void init(struct fmt_main *self) { #if defined (_OPENMP) omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; self->params.max_keys_per_crypt = omp_t * OMP_SCALE * MAX_KEYS_PER_CRYPT; init_locks(); #endif /* _OPENMP */ if (pers_opts.target_enc == UTF_8) self->params.plaintext_length = MIN(125, 3 * PLAINTEXT_LENGTH); unpack_data = mem_calloc_tiny(sizeof(unpack_data_t) * omp_t, MEM_ALIGN_WORD); cracked = mem_calloc_tiny(sizeof(*cracked) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_key = mem_calloc_tiny(UNICODE_LENGTH * self->params.max_keys_per_crypt, MEM_ALIGN_NONE); saved_len = mem_calloc_tiny(sizeof(*saved_len) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_salt = mem_calloc_tiny(8, MEM_ALIGN_NONE); aes_key = mem_calloc_tiny(16 * self->params.max_keys_per_crypt, MEM_ALIGN_NONE); aes_iv = mem_calloc_tiny(16 * self->params.max_keys_per_crypt, MEM_ALIGN_NONE); #ifdef DEBUG self->params.benchmark_comment = " (1-16 characters)"; #endif /* OpenSSL init */ init_aesni(); SSL_load_error_strings(); SSL_library_init(); OpenSSL_add_all_algorithms(); #ifndef __APPLE__ atexit(openssl_cleanup); #endif /* CRC-32 table init, do it before we start multithreading */ { CRC32_t crc; CRC32_Init(&crc); } } static int hexlen(char *q) { char *s = q; size_t len = strlen(q); while (atoi16[ARCH_INDEX(*q)] != 0x7F) q++; return (len == (size_t)(q - s)) ? (int)(q - s) : -1 - (int)(q - s); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *ptr, *keeptr; int mode; if (strncmp(ciphertext, "$RAR3$*", 7)) return 0; if (!(ctcopy = strdup(ciphertext))) { fprintf(stderr, "Memory allocation failed in %s, unable to check if hash is valid!", FORMAT_LABEL); return 0; } keeptr = ctcopy; ctcopy += 7; if (!(ptr = strtok(ctcopy, "*"))) /* -p or -h mode */ goto error; if (hexlen(ptr) != 1) goto error; mode = atoi(ptr); if (mode < 0 || mode > 1) goto error; if (!(ptr = strtok(NULL, "*"))) /* salt */ goto error; if (hexlen(ptr) != 16) /* 8 bytes of salt */ goto error; if (!(ptr = strtok(NULL, "*"))) goto error; if (mode == 0) { if (hexlen(ptr) != 32) /* 16 bytes of encrypted known plain */ goto error; MEM_FREE(keeptr); return 1; } else { int inlined; long long plen, ulen; if (hexlen(ptr) != 8) /* 4 bytes of CRC */ goto error; if (!(ptr = strtok(NULL, "*"))) /* pack_size */ goto error; if (strlen(ptr) > 12) { // pack_size > 1 TB? Really? fprintf(stderr, "pack_size > 1TB not supported (%s)\n", FORMAT_NAME); goto error; } if ((plen = atoll(ptr)) < 16) goto error; if (!(ptr = strtok(NULL, "*"))) /* unp_size */ goto error; if (strlen(ptr) > 12) { fprintf(stderr, "unp_size > 1TB not supported (%s)\n", FORMAT_NAME); goto error; } if ((ulen = atoll(ptr)) < 1) goto error; if (!(ptr = strtok(NULL, "*"))) /* inlined */ goto error; if (hexlen(ptr) != 1) goto error; inlined = atoi(ptr); if (inlined < 0 || inlined > 1) goto error; if (!(ptr = strtok(NULL, "*"))) /* pack_size / archive_name */ goto error; if (inlined) { if (hexlen(ptr) != plen * 2) goto error; } else { FILE *fp; char *archive_name; archive_name = ptr; if (!(fp = fopen(archive_name, "rb"))) { fprintf(stderr, "! %s: %s, skipping.\n", archive_name, strerror(errno)); goto error; } if (!(ptr = strtok(NULL, "*"))) /* pos */ goto error; /* We could go on and actually try seeking to pos but this is enough for now */ fclose(fp); } if (!(ptr = strtok(NULL, "*"))) /* method */ goto error; } MEM_FREE(keeptr); return 1; error: #ifdef RAR_DEBUG { char buf[68]; strnzcpy(buf, ciphertext, sizeof(buf)); fprintf(stderr, "rejecting %s\n", buf); } #endif MEM_FREE(keeptr); return 0; } static char *get_key(int index) { UTF16 tmpbuf[PLAINTEXT_LENGTH + 1]; memcpy(tmpbuf, &((UTF16*) saved_key)[index * PLAINTEXT_LENGTH], saved_len[index]); memset(&tmpbuf[saved_len[index] >> 1], 0, 2); return (char*) utf16_to_enc(tmpbuf); } #define ADD_BITS(n) \ { \ if (bits < 9) { \ hold |= ((unsigned int)*next++ << (24 - bits)); \ bits += 8; \ } \ hold <<= n; \ bits -= n; \ } /* * This function is loosely based on JimF's check_inflate_CODE2() from * pkzip_fmt. Together with the other bit-checks, we are rejecting over 96% * of the candidates without resorting to a slow full check (which in turn * may reject semi-early, especially if it's a PPM block) * * Input is first 16 bytes of RAR buffer decrypted, as-is. It also contain the * first 2 bits, which have already been decoded, and have told us we had an * LZ block (RAR always use dynamic Huffman table) and keepOldTable was not set. * * RAR use 20 x (4 bits length, optionally 4 bits zerocount), and reversed * byte order. */ static MAYBE_INLINE int check_huffman(unsigned char *next) { unsigned int bits, hold, i; int left; unsigned int ncount[4]; unsigned char *count = (unsigned char*)ncount; unsigned char bit_length[20]; #ifdef DEBUG unsigned char *was = next; #endif #if ARCH_LITTLE_ENDIAN && ARCH_ALLOWS_UNALIGNED hold = JOHNSWAP(*(unsigned int*)next); #else hold = next[3] + (((unsigned int)next[2]) << 8) + (((unsigned int)next[1]) << 16) + (((unsigned int)next[0]) << 24); #endif next += 4; // we already have the first 32 bits hold <<= 2; // we already processed 2 bits, PPM and keepOldTable bits = 32 - 2; /* First, read 20 pairs of (bitlength[, zerocount]) */ for (i = 0 ; i < 20 ; i++) { int length, zero_count; length = hold >> 28; ADD_BITS(4); if (length == 15) { zero_count = hold >> 28; ADD_BITS(4); if (zero_count == 0) { bit_length[i] = 15; } else { zero_count += 2; while (zero_count-- > 0 && i < sizeof(bit_length) / sizeof(bit_length[0])) bit_length[i++] = 0; i--; } } else { bit_length[i] = length; } } #ifdef DEBUG if (next - was > 16) { fprintf(stderr, "*** (possible) BUG: check_huffman() needed %u bytes, we only have 16 (bits=%d, hold=0x%08x)\n", (int)(next - was), bits, hold); dump_stuff_msg("complete buffer", was, 16); error(); } #endif /* Count the number of codes for each code length */ memset(count, 0, 16); for (i = 0; i < 20; i++) { ++count[bit_length[i]]; } count[0] = 0; if (!ncount[0] && !ncount[1] && !ncount[2] && !ncount[3]) return 0; /* No codes at all */ left = 1; for (i = 1; i < 16; ++i) { left <<= 1; left -= count[i]; if (left < 0) { return 0; /* over-subscribed */ } } if (left) { return 0; /* incomplete set */ } return 1; /* Passed this check! */ } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int i16 = index*16; unsigned int i; unsigned char RawPsw[UNICODE_LENGTH + 8 + 3]; int RawLength; SHA_CTX ctx, tempctx; unsigned int digest[5]; unsigned char *PswNum, tempout[20]; RawLength = saved_len[index] + 8 + 3; PswNum = (unsigned char*) &RawPsw[saved_len[index] + 8]; PswNum[1] = PswNum[2] = 0; /* derive IV and key for AES from saved_key and saved_salt, this code block is based on unrarhp's and unrar's sources */ memcpy(RawPsw, &saved_key[UNICODE_LENGTH * index], saved_len[index]); memcpy(RawPsw + saved_len[index], saved_salt, 8); SHA1_Init(&ctx); for (i = 0; i < ROUNDS; i++) { PswNum[0] = (unsigned char) i; if ( ((unsigned char) i) == 0) { PswNum[1] = (unsigned char) (i >> 8); PswNum[2] = (unsigned char) (i >> 16); } SHA1_Update(&ctx, RawPsw, RawLength); if (i % (ROUNDS / 16) == 0) { tempctx = ctx; SHA1_Final(tempout, &tempctx); aes_iv[i16 + i / (ROUNDS / 16)] = tempout[19]; } } SHA1_Final((unsigned char*)digest, &ctx); for (i = 0; i < 4; i++) /* reverse byte order */ digest[i] = JOHNSWAP(digest[i]); memcpy(&aes_key[i16], (unsigned char*)digest, 16); } #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { int i16 = index*16; unsigned int inlen = 16; int outlen; EVP_CIPHER_CTX aes_ctx; EVP_CIPHER_CTX_init(&aes_ctx); EVP_DecryptInit_ex(&aes_ctx, EVP_aes_128_cbc(), NULL, &aes_key[i16], &aes_iv[i16]); EVP_CIPHER_CTX_set_padding(&aes_ctx, 0); //fprintf(stderr, "key %s\n", utf16_to_enc((UTF16*)&saved_key[index * UNICODE_LENGTH])); /* AES decrypt, uses aes_iv, aes_key and blob */ if (cur_file->type == 0) { /* rar-hp mode */ unsigned char plain[16]; outlen = 0; EVP_DecryptUpdate(&aes_ctx, plain, &outlen, cur_file->blob, inlen); EVP_DecryptFinal_ex(&aes_ctx, &plain[outlen], &outlen); cracked[index] = !memcmp(plain, "\xc4\x3d\x7b\x00\x40\x07\x00", 7); } else { if (cur_file->method == 0x30) { /* stored, not deflated */ CRC32_t crc; unsigned char crc_out[4]; unsigned char plain[0x8010]; unsigned long long size = cur_file->unp_size; unsigned char *cipher = cur_file->blob; /* Use full decryption with CRC check. Compute CRC of the decompressed plaintext */ CRC32_Init(&crc); outlen = 0; while (size > 0x8000) { inlen = 0x8000; EVP_DecryptUpdate(&aes_ctx, plain, &outlen, cipher, inlen); CRC32_Update(&crc, plain, outlen > size ? size : outlen); size -= outlen; cipher += inlen; } EVP_DecryptUpdate(&aes_ctx, plain, &outlen, cipher, (size + 15) & ~0xf); EVP_DecryptFinal_ex(&aes_ctx, &plain[outlen], &outlen); size += outlen; CRC32_Update(&crc, plain, size); CRC32_Final(crc_out, crc); /* Compare computed CRC with stored CRC */ cracked[index] = !memcmp(crc_out, &cur_file->crc.c, 4); } else { const int solid = 0; unpack_data_t *unpack_t; unsigned char plain[20]; cracked[index] = 0; /* Decrypt just one block for early rejection */ outlen = 0; EVP_DecryptUpdate(&aes_ctx, plain, &outlen, cur_file->blob, 16); EVP_DecryptFinal_ex(&aes_ctx, &plain[outlen], &outlen); #if 1 /* Early rejection */ if (plain[0] & 0x80) { // PPM checks here. if (!(plain[0] & 0x20) || // Reset bit must be set (plain[1] & 0x80)) // MaxMB must be < 128 goto bailOut; } else { // LZ checks here. if ((plain[0] & 0x40) || // KeepOldTable can't be set !check_huffman(plain)) // Huffman table check goto bailOut; } #endif /* Reset stuff for full check */ EVP_DecryptInit_ex(&aes_ctx, EVP_aes_128_cbc(), NULL, &aes_key[i16], &aes_iv[i16]); EVP_CIPHER_CTX_set_padding(&aes_ctx, 0); #ifdef _OPENMP unpack_t = &unpack_data[omp_get_thread_num()]; #else unpack_t = unpack_data; #endif unpack_t->max_size = cur_file->unp_size; unpack_t->dest_unp_size = cur_file->unp_size; unpack_t->pack_size = cur_file->pack_size; unpack_t->iv = &aes_iv[i16]; unpack_t->ctx = &aes_ctx; unpack_t->key = &aes_key[i16]; if (rar_unpack29(cur_file->blob, solid, unpack_t)) cracked[index] = !memcmp(&unpack_t->unp_crc, &cur_file->crc.c, 4); bailOut:; } } EVP_CIPHER_CTX_cleanup(&aes_ctx); } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if (cracked[index]) return 1; return 0; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_rar = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP | FMT_DYNA_SALT, #if FMT_MAIN_VERSION > 11 { NULL }, #endif cpu_tests },{ init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif fmt_default_source, { fmt_default_binary_hash }, fmt_default_dyna_salt_hash, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
image_pyramid.h
/* * * This file is part of the open-source SeetaFace engine, which includes three *modules: * SeetaFace Detection, SeetaFace Alignment, and SeetaFace Identification. * * This file is part of the SeetaFace Detection module, containing codes *implementing the * face detection method described in the following paper: * * * Funnel-structured cascade for multi-view face detection with alignment *awareness, * Shuzhe Wu, Meina Kan, Zhenliang He, Shiguang Shan, Xilin Chen. * In Neurocomputing (under review) * * * Copyright (C) 2016, Visual Information Processing and Learning (VIPL) group, * Institute of Computing Technology, Chinese Academy of Sciences, Beijing, *China. * * The codes are mainly developed by Shuzhe Wu (a Ph.D supervised by Prof. *Shiguang Shan) * * As an open-source face recognition engine: you can redistribute SeetaFace *source codes * and/or modify it under the terms of the BSD 2-Clause License. * * You should have received a copy of the BSD 2-Clause License along with the *software. * If not, see < https://opensource.org/licenses/BSD-2-Clause>. * * Contact Info: you can send an email to SeetaFace@vipl.ict.ac.cn for any *problems. * * Note: the above information must be kept whenever or wherever the codes are *used. * */ #ifndef SEETA_FD_UTIL_IMAGE_PYRAMID_H_ #define SEETA_FD_UTIL_IMAGE_PYRAMID_H_ #include <cstdint> #include <string> #include <cstring> #include "common.h" namespace seeta { namespace fd { static void ResizeImage(const seeta::ImageData& src, seeta::ImageData* dest) { int32_t src_width = src.width; int32_t src_height = src.height; int32_t dest_width = dest->width; int32_t dest_height = dest->height; if (src_width == dest_width && src_height == dest_height) { std::memcpy(dest->data, src.data, src_width * src_height * sizeof(uint8_t)); return; } double lf_x_scl = static_cast<double>(src_width) / dest_width; double lf_y_Scl = static_cast<double>(src_height) / dest_height; const uint8_t* src_data = src.data; uint8_t* dest_data = dest->data; #pragma omp parallel num_threads(SEETA_NUM_THREADS) { #pragma omp for nowait for (int32_t y = 0; y < dest_height; y++) { for (int32_t x = 0; x < dest_width; x++) { double lf_x_s = lf_x_scl * x; double lf_y_s = lf_y_Scl * y; int32_t n_x_s = static_cast<int>(lf_x_s); n_x_s = (n_x_s <= (src_width - 2) ? n_x_s : (src_width - 2)); int32_t n_y_s = static_cast<int>(lf_y_s); n_y_s = (n_y_s <= (src_height - 2) ? n_y_s : (src_height - 2)); double lf_weight_x = lf_x_s - n_x_s; double lf_weight_y = lf_y_s - n_y_s; double dest_val = (1 - lf_weight_y) * ((1 - lf_weight_x) * src_data[n_y_s * src_width + n_x_s] + lf_weight_x * src_data[n_y_s * src_width + n_x_s + 1]) + lf_weight_y * ((1 - lf_weight_x) * src_data[(n_y_s + 1) * src_width + n_x_s] + lf_weight_x * src_data[(n_y_s + 1) * src_width + n_x_s + 1]); dest_data[y * dest_width + x] = static_cast<uint8_t>(dest_val); } } } } class ImagePyramid { public: ImagePyramid() : max_scale_(1.0f), min_scale_(1.0f), scale_factor_(1.0f), scale_step_(0.8f), width1x_(0), height1x_(0), width_scaled_(0), height_scaled_(0), buf_img_width_(2), buf_img_height_(2), buf_scaled_width_(2), buf_scaled_height_(2) { buf_img_ = new uint8_t[buf_img_width_ * buf_img_height_]; buf_img_scaled_ = new uint8_t[buf_scaled_width_ * buf_scaled_height_]; } ~ImagePyramid() { delete[] buf_img_; buf_img_ = nullptr; buf_img_width_ = 0; buf_img_height_ = 0; delete[] buf_img_scaled_; buf_img_scaled_ = nullptr; buf_scaled_width_ = 0; buf_scaled_height_ = 0; img_scaled_.data = nullptr; img_scaled_.width = 0; img_scaled_.height = 0; } inline void SetScaleStep(float step) { if (step > 0.0f && step <= 1.0f) scale_step_ = step; } inline void SetMinScale(float min_scale) { min_scale_ = min_scale; } inline void SetMaxScale(float max_scale) { max_scale_ = max_scale; scale_factor_ = max_scale; UpdateBufScaled(); } void SetImage1x(const uint8_t* img_data, int32_t width, int32_t height); inline float min_scale() const { return min_scale_; } inline float max_scale() const { return max_scale_; } inline seeta::ImageData image1x() { seeta::ImageData img(width1x_, height1x_, 1); img.data = buf_img_; return img; } const seeta::ImageData* GetNextScaleImage(float* scale_factor = nullptr); private: void UpdateBufScaled(); float max_scale_; float min_scale_; float scale_factor_; float scale_step_; int32_t width1x_; int32_t height1x_; int32_t width_scaled_; int32_t height_scaled_; uint8_t* buf_img_; int32_t buf_img_width_; int32_t buf_img_height_; uint8_t* buf_img_scaled_; int32_t buf_scaled_width_; int32_t buf_scaled_height_; seeta::ImageData img_scaled_; }; } // namespace fd } // namespace seeta #endif // SEETA_FD_UTIL_IMAGE_PYRAMID_H_
energy.h
#pragma once #include "bonds.h" #include "externalpotential.h" // Energybase implemented here #include "sasa.h" #include "space.h" #include "aux/iteratorsupport.h" #include "aux/pairmatrix.h" #include "smart_montecarlo.h" #include <range/v3/range/conversion.hpp> #include <range/v3/view/iota.hpp> #include <range/v3/view/subrange.hpp> #include <range/v3/algorithm/any_of.hpp> #include <Eigen/Dense> #include <spdlog/spdlog.h> #include <numeric> #include <algorithm> struct freesasa_parameters_fwd; // workaround for freesasa unnamed struct that cannot be forward declared #if defined(__cpp_lib_parallel_algorithm) && __has_include(<tbb/tbb.h>) #include <execution> #endif #if defined(__cpp_lib_parallel_algorithm) && \ __has_include(<tbb/tbb.h>) && ((defined(__clang__) && __clang_major__ >= 10) || (defined(__GNUC__) && __GNUC__ >= 10)) #define HAS_PARALLEL_TRANSFORM_REDUCE #endif namespace Faunus { namespace ReactionCoordinate { class ReactionCoordinateBase; } namespace Potential { struct PairPotentialBase; } /** * @par Non-bonded energy * * Several classes (class templates) are used together to allow computation in change of the non-bonded energy upon * a MC move. * * The energy change is calculated by the Nonbonded class. It internally uses one of the pairing policies * to efficiently get all pair interactions affected by the MC move (as described by the Change object). * * Pairing policies allow efficient summation of pair energies over the whole system, between groups, inside a group, * etc. The pairing policy is optimized for performance in a different execution environment, e.g., sequential or * OMP parallelism. * * Policies have direct access to the pair interaction energy functor represented by a simple PairEnergy template. * Furthermore, the GroupCutoff object is provided to limit free energy computation using a cutoff distance between * respective groups. * * @see Nonbonded, PairingBasePolicy, PairEnergy, GroupCutoff */ namespace Energy { class Hamiltonian; /** * @brief Check if particles are outside the simulation container * * If any particles is ouside, infinite energy is returned; zero otherwirse. * This is not needed for cuboidal geometry as particles are always wrapped using PBC. */ class ContainerOverlap : public Energybase { private: const Space& spc; bool groupIsOutsideContainer(const Change::GroupChange& group_change) const; double energyOfAllGroups() const; public: explicit ContainerOverlap(const Space& spc); double energy(Change& change) override; }; /** * @brief Data class for Ewald k-space calculations * * Currently, the Eigen policies map to the non-eigen * variants, e.g. `PBCEigen == PBC`. * * Related reading: * - PBC Ewald (DOI:10.1063/1.481216) * - IPBC Ewald (DOI:10/css8) * - Update optimization (DOI:10.1063/1.481216, Eq. 24) */ struct EwaldData { typedef std::complex<double> Tcomplex; Eigen::Matrix3Xd k_vectors; //!< k-vectors, 3xK Eigen::VectorXd Aks; //!< 1xK for update optimization (see Eq.24, DOI:10.1063/1.481216) Eigen::VectorXcd Q_ion, Q_dipole; //!< Complex 1xK vectors double r_cutoff = 0; //!< Real-space cutoff double n_cutoff = 0; //!< Inverse space cutoff double surface_dielectric_constant = 0; //!< Surface dielectric constant; double bjerrum_length = 0; //!< Bjerrum length double kappa = 0; //!< Inverse Debye screening length double kappa_squared = 0; //!< Squared inverse Debye screening length double alpha = 0; double const_inf = 0; double check_k2_zero = 0; bool use_spherical_sum = true; int num_kvectors = 0; Point box_length = {0.0, 0.0, 0.0}; //!< Box dimensions enum Policies { PBC, PBCEigen, IPBC, IPBCEigen, INVALID }; //!< Possible k-space updating schemes Policies policy = PBC; //!< Policy for updating k-space EwaldData(const json &); //!< Initialize from json }; NLOHMANN_JSON_SERIALIZE_ENUM(EwaldData::Policies, { {EwaldData::INVALID, nullptr}, {EwaldData::PBC, "PBC"}, {EwaldData::PBCEigen, "PBCEigen"}, {EwaldData::IPBC, "IPBC"}, {EwaldData::IPBCEigen, "IPBCEigen"}, }) void to_json(json &, const EwaldData &); /** * @brief Base class for Ewald k-space updates policies */ class EwaldPolicyBase { public: std::string cite; //!< Optional reference, preferably DOI, to further information virtual ~EwaldPolicyBase() = default; virtual void updateBox(EwaldData &, const Point &) const = 0; //!< Prepare k-vectors according to given box vector virtual void updateComplex(EwaldData&, Space::GroupVector&) const = 0; //!< Update all k vectors virtual void updateComplex(EwaldData&, Change&, Space::GroupVector&, Space::GroupVector&) const = 0; //!< Update subset of k vectors. Require `old` pointer virtual double selfEnergy(const EwaldData&, Change&, Space::GroupVector&) = 0; //!< Self energy contribution due to a change virtual double surfaceEnergy(const EwaldData&, Change&, Space::GroupVector&) = 0; //!< Surface energy contribution due to a change virtual double reciprocalEnergy(const EwaldData &) = 0; //!< Total reciprocal energy /** * @brief Represent charges and positions using an Eigen facade (Map) * * Requires that all groups are fully active, i.e. does not work for GCMC. * * @param groups Vector of groups to represent * @return tuple with positions, charges */ auto mapGroupsToEigen(Space::GroupVector& groups) const { auto is_partially_inactive = [](const Group& group) { return group.size() != group.capacity(); }; if (ranges::cpp20::any_of(groups, is_partially_inactive)) { throw std::runtime_error("Eigen optimized Ewald not available with inactive groups"); } auto first_particle = groups.front().begin(); auto last_particle = groups.back().end(); auto pos = asEigenMatrix(first_particle, last_particle, &Particle::pos); // N x 3 auto charge = asEigenVector(first_particle, last_particle, &Particle::charge); // N x 1 return std::make_tuple(pos, charge); } static std::unique_ptr<EwaldPolicyBase> makePolicy(EwaldData::Policies); //!< Policy factory }; /** * @brief Ion-Ion Ewald using periodic boundary conditions (PBC) */ struct PolicyIonIon : public EwaldPolicyBase { PolicyIonIon(); void updateBox(EwaldData &, const Point &) const override; void updateComplex(EwaldData&, Space::GroupVector&) const override; void updateComplex(EwaldData&, Change&, Space::GroupVector&, Space::GroupVector&) const override; double selfEnergy(const EwaldData&, Change&, Space::GroupVector&) override; double surfaceEnergy(const EwaldData&, Change&, Space::GroupVector&) override; double reciprocalEnergy(const EwaldData &) override; }; /** * @brief Ion-Ion Ewald with periodic boundary conditions (PBC) using Eigen * operations * @warning Will not work with Space with inactive particles (GCMC, for example) * * For compilers that offer good vectorization (gcc on linux) this brings a 4-5 * fold speed increase. * Status on February, 2020: * - Clang9: Eigen version is slower than generic version (macos/ubuntu) * - GCC9: Eigen is 4-5 times faster on x86 linux; ~1.5 times *lower on macos. */ struct PolicyIonIonEigen : public PolicyIonIon { using PolicyIonIon::updateComplex; void updateComplex(EwaldData&, Space::GroupVector&) const override; double reciprocalEnergy(const EwaldData &) override; }; /** * @brief Ion-Ion Ewald with isotropic periodic boundary conditions (IPBC) */ struct PolicyIonIonIPBC : public PolicyIonIon { using PolicyIonIon::updateComplex; PolicyIonIonIPBC(); void updateBox(EwaldData &, const Point &) const override; void updateComplex(EwaldData&, Space::GroupVector&) const override; void updateComplex(EwaldData&, Change&, Space::GroupVector&, Space::GroupVector&) const override; }; /** * @brief Ion-Ion Ewald with isotropic periodic boundary conditions (IPBC) using Eigen operations * @warning Incomplete and under construction */ struct PolicyIonIonIPBCEigen : public PolicyIonIonIPBC { using PolicyIonIonIPBC::updateComplex; void updateComplex(EwaldData&, Space::GroupVector&) const override; }; /** @brief Ewald summation reciprocal energy */ class Ewald : public Energybase { private: EwaldData data; std::shared_ptr<EwaldPolicyBase> policy; //!< Policy for updating k-space Space &spc; Space::GroupVector* old_groups = nullptr; public: Ewald(const json &, Space &); void init() override; double energy(Change &) override; void sync(Energybase*, const Change&) override; //!< Called after a move is rejected/accepted //! as well as before simulation void to_json(json &) const override; void force(std::vector<Point> &) override; // update forces on all particles }; /** * @brief Pressure term for NPT ensemble */ class Isobaric : public Energybase { private: const Space& spc; double pressure = 0.0; //!< Applied pressure static const std::map<std::string, double> pressure_units; //!< Possible ways pressure can be given public: Isobaric(const json& j, const Space& spc); double energy(Change& change) override; void to_json(json& j) const override; }; /** * @brief Constrain system using reaction coordinates * * If outside specified `range`, infinity energy is returned, causing rejection. */ class Constrain : public Energybase { private: std::string type; std::unique_ptr<ReactionCoordinate::ReactionCoordinateBase> coordinate; public: Constrain(const json& j, Space& space); double energy(Change& change) override; void to_json(json& j) const override; }; /** * The keys of the `intra` map are group index and the values * is a vector of `BondData`. For bonds between groups, fill * in `inter` which is evaluated for every update of call to * `energy`. * * @todo Optimize. */ class Bonded : public Energybase { private: using BondVector = BasePointerVector<Potential::BondData>; const Space& spc; BondVector external_bonds; //!< inter-molecular bonds std::map<int, BondVector> internal_bonds; //!< intra-molecular bonds; key is group index void updateGroupBonds(const Space::GroupType& group); //!< Update/set bonds internally in group void updateInternalBonds(); //!< finds and adds all intra-molecular bonds of active molecules double sumBondEnergy(const BondVector& bonds) const; //!< sum energy in vector of BondData double internalGroupEnergy(const Change::GroupChange& changed); //!< Energy from internal bonds template <typename Indices> double sum_energy(const BondVector& bonds, const Indices& particle_indices) const; public: Bonded(const Space& spc, const BondVector& external_bonds); Bonded(const json& j, const Space& spc); void to_json(json& j) const override; double energy(Change& change) override; //!< brute force -- refine this! void force(std::vector<Point>& forces) override; //!< Calculates the forces on all particles }; /** * @brief Sum energy in vector of BondData for matching particle indices * @param bonds List of bonds * @param particle_indices Particle indices to calculate the energy for * * To speed up the bond search, the given indices must be ordered which allows * for binary search which on large systems provides superior performance compared * to simplistic search which scales as number_of_bonds x number_of_moved_particles */ template <typename Indices> double Bonded::sum_energy(const Bonded::BondVector& bonds, const Indices& particle_indices) const { assert(std::is_sorted(particle_indices.begin(), particle_indices.end())); auto index_is_included = [&](auto index) { return std::binary_search(particle_indices.begin(), particle_indices.end(), index); }; auto affected_bonds = bonds | ranges::cpp20::views::filter([&](const auto& bond) { return std::any_of(bond->indices.begin(), bond->indices.end(), index_is_included); }); auto bond_energy = [dist = spc.geometry.getDistanceFunc()](const auto& bond) { return bond->energyFunc(dist); }; #if (defined(__clang__) && __clang_major__ >= 10) || (defined(__GNUC__) && __GNUC__ >= 10) return std::transform_reduce(affected_bonds.begin(), affected_bonds.end(), 0.0, std::plus<>(), bond_energy); #else double energy = 0.0; for (const auto& bond : affected_bonds) { energy += bond_energy(bond); } return energy; #endif } /** * @brief Provides a complementary set of ints with respect to the iota set of a given size. * @remark It is used as a helper function for pair interactions. * * @tparam TSize a number castable to int * @tparam TSet a finite iterable container on ints * @param size the iota superset contains all integers in the range [0, size) * @param set an original set of integers * @return a set of ints complementary to the original set */ template <typename TSize, typename TSet> inline auto indexComplement(const TSize size, const TSet &set) { assert(size <= std::numeric_limits<int>::max()); return ranges::views::ints(0, static_cast<int>(size)) | ranges::views::remove_if([&set](TSize i) { return std::binary_search(set.begin(), set.end(), i); }); } /** * @brief Interface for energy accumulators * * The energy accumulator is used to add up energies between two particles. * This can be done instantly (see `InstantEnergyAccumulator`) or delaying * the evaluation until the energy is needed (`DelayedEnergyAccumulator`). * The latter may be used with parallelism. * * @todo See https://www.youtube.com/watch?v=3LsRYnRDSRA for a bizarre example * where a custom `struct Tpair { const Particle &first, second; };` * outperforms `std::pair` due to missed compiler optimization. */ class EnergyAccumulatorBase { protected: double value = 0.0; //!< accumulated energy using ParticleRef = const std::reference_wrapper<const Particle>; //!< Particle reference using ParticlePair = std::pair<ParticleRef, ParticleRef>; //!< References to two particles public: enum class Scheme { SERIAL, OPENMP, PARALLEL, INVALID }; Scheme scheme = Scheme::SERIAL; EnergyAccumulatorBase(double value); virtual ~EnergyAccumulatorBase() = default; virtual void reserve(size_t number_of_particles); virtual void clear(); virtual void from_json(const json &j); virtual void to_json(json &j) const; virtual explicit operator double(); virtual EnergyAccumulatorBase& operator=(double new_value) = 0; virtual EnergyAccumulatorBase& operator+=(double new_value) = 0; virtual EnergyAccumulatorBase& operator+=(ParticlePair&& pair) = 0; template <typename TOtherAccumulator> inline EnergyAccumulatorBase& operator+=(TOtherAccumulator& acc) { value += static_cast<double>(acc); return *this; } }; NLOHMANN_JSON_SERIALIZE_ENUM(EnergyAccumulatorBase::Scheme, {{EnergyAccumulatorBase::Scheme::INVALID, nullptr}, {EnergyAccumulatorBase::Scheme::SERIAL, "serial"}, {EnergyAccumulatorBase::Scheme::OPENMP, "openmp"}, {EnergyAccumulatorBase::Scheme::PARALLEL, "parallel"}}) /** * @brief A basic accumulator which immediately computes and adds energy of a pair of particles upon addition using * the PairEnergy templated class. * * Generally this is the original way how the pairwise nonbonded energy has been computed in Faunus. Due to compiler * optimization, templated class method 'PairEnergy.potential' may be inlined to significantly improve performance. * * @tparam PairEnergy pair energy implementing a potential(a, b) method for particles a and b */ template <typename PairEnergy> class InstantEnergyAccumulator : public EnergyAccumulatorBase { private: const PairEnergy& pair_energy; //!< recipe to compute non-bonded energy between two particles, see PairEnergy public: InstantEnergyAccumulator(const PairEnergy& pair_energy, const double value = 0.0) : EnergyAccumulatorBase(value), pair_energy(pair_energy) {} inline InstantEnergyAccumulator& operator=(const double new_value) override { value = new_value; return *this; } inline InstantEnergyAccumulator& operator+=(const double new_value) override { value += new_value; return *this; } inline InstantEnergyAccumulator& operator+=(ParticlePair&& pair) override { // keep this short to get inlined value += pair_energy.potential(pair.first.get(), pair.second.get()); return *this; } void from_json(const json &j) override { EnergyAccumulatorBase::from_json(j); if (scheme != Scheme::SERIAL) { faunus_logger->warn("unsupported summation scheme; falling back to 'serial'"); } } }; /** * Stores a vector of particle pairs and postpones the energy evaluation until * `operator double()` is called. Looping over the vector can be done in serial (as a fallback); * using OpenMP; or using C++17 parallel algorithms if available. */ template <typename PairEnergy> class DelayedEnergyAccumulator : public EnergyAccumulatorBase { private: std::vector<ParticlePair> particle_pairs; const PairEnergy& pair_energy; //!< recipe to compute non-bonded energy between two particles, see PairEnergy const size_t max_particles_in_buffer = 10000; //!< this can be modified to suit memory requirements public: explicit DelayedEnergyAccumulator(const PairEnergy& pair_energy, const double value = 0.0) : EnergyAccumulatorBase(value), pair_energy(pair_energy) {} /** Reserve memory for (N-1)*N/2 interaction pairs */ void reserve(size_t number_of_particles) override { try { number_of_particles = std::min(number_of_particles, max_particles_in_buffer); const auto number_of_pairs = (number_of_particles - 1U) * number_of_particles / 2U; faunus_logger->debug(fmt::format("reserving memory for {} energy pairs ({} MB)", number_of_pairs, number_of_pairs * sizeof(ParticlePair) / (1024U * 1024U))); particle_pairs.reserve(number_of_pairs); } catch (std::exception& e) { throw std::runtime_error( fmt::format("cannot allocate memory for energy pairs: {}. Use another summation policy.", e.what())); } } void clear() override { value = 0.0; particle_pairs.clear(); } DelayedEnergyAccumulator& operator=(const double new_value) override { clear(); value = new_value; return *this; } inline DelayedEnergyAccumulator& operator+=(const double new_value) override { value += new_value; return *this; } inline DelayedEnergyAccumulator& operator+=(ParticlePair&& pair) override { assert(particle_pairs.capacity() > 0); if (particle_pairs.size() == particle_pairs.capacity()) { operator double(); // sum stored pairs and reset buffer } particle_pairs.template emplace_back(std::move(pair)); return *this; } explicit operator double() override { switch (scheme) { case Scheme::OPENMP: value += accumulateOpenMP(); break; case Scheme::PARALLEL: value += accumulateParallel(); break; default: value += accumulateSerial(); } particle_pairs.clear(); return value; } private: double accumulateSerial() const { double sum = 0.0; for (const auto [particle1, particle2] : particle_pairs) { sum += pair_energy.potential(particle1.get(), particle2.get()); } return sum; } double accumulateParallel() const { #if defined(HAS_PARALLEL_TRANSFORM_REDUCE) return std::transform_reduce( std::execution::par, particle_pairs.cbegin(), particle_pairs.cend(), 0.0, std::plus<double>(), [&](const auto& pair) { return pair_energy.potential(pair.first.get(), pair.second.get()); }); #else return accumulateSerial(); // fallback #endif } double accumulateOpenMP() const { double sum = 0.0; #pragma omp parallel for reduction(+ : sum) for (const auto& pair : particle_pairs) { sum += pair_energy.potential(pair.first.get(), pair.second.get()); } return sum; } }; template <typename TPairEnergy> std::unique_ptr<EnergyAccumulatorBase> createEnergyAccumulator(const json& j, const TPairEnergy& pair_energy, double initial_value) { std::unique_ptr<EnergyAccumulatorBase> accumulator; if (j.value("summation_policy", EnergyAccumulatorBase::Scheme::SERIAL) != EnergyAccumulatorBase::Scheme::SERIAL) { accumulator = std::make_unique<DelayedEnergyAccumulator<TPairEnergy>>(pair_energy, initial_value); faunus_logger->debug("activated delayed energy summation"); } else { accumulator = std::make_unique<InstantEnergyAccumulator<TPairEnergy>>(pair_energy, initial_value); faunus_logger->debug("activated instant energy summation"); } accumulator->from_json(j); return accumulator; } /** * @brief Determines if two groups are separated beyond the cutoff distance. * * The distance between centers of mass is considered. The cutoff distance can be specified independently for each * group pair to override the default value. * * @see GroupPairingPolicy */ class GroupCutoff { double default_cutoff_squared = pc::max_value; PairMatrix<double> cutoff_squared; //!< matrix with group-to-group cutoff distances squared in angstrom squared Space::GeometryType& geometry; //!< geometry to compute the inter group distance with friend void from_json(const json&, GroupCutoff&); friend void to_json(json&, const GroupCutoff&); void setSingleCutoff(const double cutoff); public: /** * @brief Determines if two groups are separated beyond the cutoff distance. * @return true if the group-to-group distance is beyond the cutoff distance, false otherwise */ inline bool cut(const Group& group1, const Group& group2) { if (group1.isAtomic() || group2.isAtomic()) { return false; // atomic groups have ill-defined mass centers } return geometry.sqdist(group1.mass_center, group2.mass_center) >= cutoff_squared(group1.id, group2.id); } double getCutoff(size_t id1, size_t id2) const; /** * @brief A functor alias for cut(). * @see cut() */ template <typename... Args> inline auto operator()(Args &&... args) { return cut(std::forward<Args>(args)...); } /** * @brief Sets the geometry. * @param geometry geometry to compute the inter group distance with */ GroupCutoff(Space::GeometryType& geometry); }; void from_json(const json&, GroupCutoff &); void to_json(json&, const GroupCutoff &); /** * @brief Provides a fast inlineable interface for non-bonded pair potential energy computation. * * @tparam TPairPotential a pair potential to compute with * @tparam allow_anisotropic_pair_potential pass also a distance vector to the pair potential, slower */ template <typename TPairPotential, bool allow_anisotropic_pair_potential = true> class PairEnergy { const Space::GeometryType& geometry; //!< geometry to operate with TPairPotential pair_potential; //!< pair potential function/functor Space &spc; //!< space to init ParticleSelfEnergy with addPairPotentialSelfEnergy BasePointerVector<Energybase> &potentials; //!< registered non-bonded potentials, see addPairPotentialSelfEnergy public: /** * @param spc * @param potentials registered non-bonded potentials */ PairEnergy(Space& spc, BasePointerVector<Energybase>& potentials) : geometry(spc.geometry), spc(spc), potentials(potentials) {} /** * @brief Computes pair potential energy. * * @param a particle * @param b particle * @return pair potential energy between particles a and b */ template <typename T> inline double potential(const T &a, const T &b) const { assert(&a != &b); // a and b cannot be the same particle if constexpr (allow_anisotropic_pair_potential) { const Point r = geometry.vdist(a.pos, b.pos); return pair_potential(a, b, r.squaredNorm(), r); } else { return pair_potential(a, b, geometry.sqdist(a.pos, b.pos), {0, 0, 0}); } } // just a temporary placement until PairForce class template will be implemented template <typename ParticleType> inline Point force(const ParticleType& a, const ParticleType& b) const { assert(&a != &b); // a and b cannot be the same particle const Point b_towards_a = geometry.vdist(a.pos, b.pos); // vector b -> a = a - b return pair_potential.force(a, b, b_towards_a.squaredNorm(), b_towards_a); } /** * @brief A functor alias for potential(). * @see potential() */ template <typename... Args> inline auto operator()(Args &&... args) { return potential(std::forward<Args>(args)...); } /** * @brief Registers the potential self-energy to hamiltonian if needed. * @see Hamiltonian::Hamiltonian */ void addPairPotentialSelfEnergy() { if (pair_potential.selfEnergy) { // only add if self energy is defined faunus_logger->debug("Adding self-energy from {} to hamiltonian", pair_potential.name); potentials.emplace_back<Energy::ParticleSelfEnergy>(spc, pair_potential.selfEnergy); } } void from_json(const json &j) { pair_potential.from_json(j); if (!pair_potential.isotropic && !allow_anisotropic_pair_potential) { throw std::logic_error("Only isotropic pair potentials are allowed."); } addPairPotentialSelfEnergy(); } void to_json(json &j) const { pair_potential.to_json(j); } }; /** * @brief Particle pairing to calculate pairẃise interaction using particles' groups internally. Depending on * the accumulator provided, raw particle pairs, energy sum, etc. can be obtained. * * Accumulator is used as the first argument in all methods. Accumulator shall overload '+=' operator to accept a pair * of particle references as used in particle2particle method. * * @remark Method arguments are generally not checked for correctness because of performance reasons. * * @tparam TCutoff a cutoff scheme between groups * @see InstantEnergyAccumulator, GroupCutoff */ template <typename TCutoff> class GroupPairingPolicy { protected: const Space &spc; //!< a space to operate on TCutoff cut; //!< a cutoff functor that determines if energy between two groups can be ignored public: /** * @param spc */ GroupPairingPolicy(Space& spc) : spc(spc), cut(spc.geometry) {} void from_json(const json &j) { Energy::from_json(j, cut); } void to_json(json &j) const { Energy::to_json(j, cut); } /** * @brief Add two interacting particles to the accumulator. * * Due to compiler optimization, the '+=' operator and this function itself may be inlined to significantly * improve performance. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam T an interacting particle * @param pair_accumulator accumulator of interacting pairs of particles * @param a first particle * @param b second particle */ template <typename TAccumulator, typename T> inline void particle2particle(TAccumulator &pair_accumulator, const T &a, const T &b) const { pair_accumulator += {std::cref(a), std::cref(b)}; } /** * @brief All pairings within a group. * * All pair interaction within the group are accumulated. The pair exclusions defined in the molecule * topology are honoured. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam TGroup * @param group * @param pair_accumulator accumulator of interacting pairs of particles */ template <typename TAccumulator, typename TGroup> void groupInternal(TAccumulator &pair_accumulator, const TGroup &group) { const auto &moldata = group.traits(); if (!moldata.rigid) { const int group_size = group.size(); for (int i = 0; i < group_size - 1; ++i) { for (int j = i + 1; j < group_size; ++j) { // This compound condition is faster than an outer atomic condition; // tested on bulk example in GCC 9.2. if (group.isAtomic() || !moldata.isPairExcluded(i, j)) { particle2particle(pair_accumulator, group[i], group[j]); } } } } } /** * @brief Pairings of a single particle within the group. * * The pair exclusions defined in the molecule topology are honoured. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam TGroup * @param pair_accumulator accumulator of interacting pairs of particles * @param group * @param index internal index of the selected particle within the group */ template <typename TAccumulator, typename TGroup> void groupInternal(TAccumulator& pair_accumulator, const TGroup& group, const std::size_t index) { const auto &moldata = group.traits(); if (!moldata.rigid) { if (group.isAtomic()) { // speed optimization: non-bonded interaction exclusions do not need to be checked for atomic groups for (int i = 0; i < index; ++i) { particle2particle(pair_accumulator, group[index], group[i]); } for (int i = index + 1; i < group.size(); ++i) { particle2particle(pair_accumulator, group[index], group[i]); } } else { // molecular group for (int i = 0; i < index; ++i) { if (!moldata.isPairExcluded(index, i)) { particle2particle(pair_accumulator, group[index], group[i]); } } for (int i = index + 1; i < group.size(); ++i) { if (!moldata.isPairExcluded(index, i)) { particle2particle(pair_accumulator, group[index], group[i]); } } } } } /** * @brief Pairing in the group involving only the particles present in the index. * * Only such non-bonded pair interactions within the group are considered if at least one particle is present * in the index. The pair exclusions defined in the molecule topology are honoured. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam TGroup * @tparam TIndex * @param pair_accumulator accumulator of interacting pairs of particles * @param group * @param index internal indices of particles within the group */ template <typename TAccumulator, typename TGroup, typename TIndex> void groupInternal(TAccumulator &pair_accumulator, const TGroup &group, const TIndex &index) { auto &moldata = group.traits(); if (!moldata.rigid) { if (index.size() == 1) { groupInternal(pair_accumulator, group, index[0]); } else { // TODO investigate overhead of `index_complement` filtering; // TODO perhaps allow different strategies based on the index-size/group-size ratio auto index_complement = indexComplement(group.size(), index); // moved <-> static for (int i : index) { for (int j : index_complement) { if (!moldata.isPairExcluded(i, j)) { particle2particle(pair_accumulator, group[i], group[j]); } } } // moved <-> moved for (auto i_it = index.begin(); i_it < index.end(); ++i_it) { for (auto j_it = std::next(i_it); j_it < index.end(); ++j_it) { if (!moldata.isPairExcluded(*i_it, *j_it)) { particle2particle(pair_accumulator, group[*i_it], group[*j_it]); } } } } } } /** * @brief Complete cartesian pairing of particles in two groups. * * group1 × group2 * * If the distance between the groups is greater or equal to the group cutoff distance, no calculation is performed. * The group intersection must be an empty set, i.e., no particle is included in both groups. This is not verified * for performance reason. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam TGroup * @param pair_accumulator accumulator of interacting pairs of particles * @param group1 * @param group2 */ template <typename TAccumulator, typename TGroup> void group2group(TAccumulator &pair_accumulator, const TGroup &group1, const TGroup &group2) { if (!cut(group1, group2)) { for (auto &particle1 : group1) { for (auto &particle2 : group2) { particle2particle(pair_accumulator, particle1, particle2); } } } } /** * @brief Cross pairing of particles in two groups. Only a cartesian subset of the complete cartesian product is * considered as the particles in the first group must be also present in the index. The aim is to capture only * interactions that involve changing (indexed) particles. * * ⊕group1 × group2, where ⊕ denotes a filter by an index * * If the distance between the groups is greater or equal to the group cutoff distance, no calculation is performed. * The group intersection must be an empty set, i.e., no particle is included in both groups. This is not verified * for performance reason. * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam TGroup * @param pair_accumulator accumulator of interacting pairs of particles * @param group1 * @param group2 * @param index1 list of particle indices in group1 relative to the group beginning */ template <typename TAccumulator, typename TGroup> void group2group(TAccumulator& pair_accumulator, const TGroup& group1, const TGroup& group2, const std::vector<std::size_t>& index1) { if (!cut(group1, group2)) { for (auto particle1_ndx : index1) { for (auto &particle2 : group2) { particle2particle(pair_accumulator, *(group1.begin() + particle1_ndx), particle2); } } } } /** * @brief Cross pairing of particles in two groups. Only a non-cartesian subset of the complete cartesian product * is considered as at least one particles in the pair must be also present in the respective index. The aim is * to capture only interactions that involve changing (indexed) particles, i.e., to avoid pairs containing only * non-indexed particles. * * (⊕group1 × ∁⊕group2) + (∁⊕group1 × ⊕group2) + (⊕group1 × ⊕group2) = * = group1 × group2 − (∁⊕group2 × ∁⊕group2), where ⊕ denotes a filter by an index and ∁ a complement * * If the distance between the groups is greater or equal to the group cutoff distance, no calculation is performed. * The group intersection must be an empty set, i.e., no particle is included in both groups. This is not verified * for performance reason. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam TGroup * @param pair_accumulator accumulator of interacting pairs of particles * @param group1 * @param group2 * @param index1 list of particle indices in group1 relative to the group beginning * @param index2 list of particle indices in group2 relative to the group beginning */ template <typename TAccumulator, typename TGroup> void group2group(TAccumulator& pair_accumulator, const TGroup& group1, const TGroup& group2, const std::vector<std::size_t>& index1, const std::vector<std::size_t>& index2) { if (!cut(group1, group2)) { if (!index2.empty()) { // (∁⊕group1 × ⊕group2) + (⊕group1 × ⊕group2) = group1 × ⊕group2 group2group(pair_accumulator, group2, group1, index2); // + (⊕group1 × ∁⊕group2) auto index2_complement = indexComplement(group2.size(), index2); for (auto particle1_ndx : index1) { for (auto particle2_ndx : index2_complement) { particle2particle(pair_accumulator, group2[particle2_ndx], group1[particle1_ndx]); } } } else if (!index1.empty()) { // (⊕group1 × ∁⊕group2) + (⊕group1 × ⊕group2) = ⊕group1 × group2 group2group(pair_accumulator, group1, group2, index1); // + (∁⊕group1 × ⊕group2) = Ø as ⊕group2 is empty } else { // both indices empty hence nothing to do } } } /** * @brief Complete cartesian pairing between particles in a group and a union of groups. * * group × (∪ groups) * * If the distance between the groups is greater or equal to the group cutoff distance, the particle pairing between * them is skipped. The internal energy of the group is not computed even if the group is also present in the union * of groups. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam TGroup * @tparam TGroups * @param pair_accumulator accumulator of interacting pairs of particles * @param group * @param groups */ template <typename TAccumulator, typename TGroup, typename TGroups> void group2groups(TAccumulator &pair_accumulator, const TGroup &group, const TGroups &groups) { for (auto &other_group : groups) { if (&other_group != &group) { group2group(pair_accumulator, group, other_group); } } } /** * @brief Cross pairing of particles in a group and a union of groups. Only a cartesian subset of the complete * cartesian product is considered as the particles of the first group must be also present in the index. The aim * is to capture only interactions that involve changing (indexed) particles. * * ⊕group × (∪ groups), where ⊕ denotes a filter by an index * * If the distance between the groups is greater or equal to the group cutoff distance, the particle pairing * between them is skipped. The internal energy of the group is not computed even if the group is also present * in the union of groups. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam TGroup * @tparam TGroups * @param pair_accumulator accumulator of interacting pairs of particles * @param group * @param group_index groups as indices in Space::groups * @param index list of particle indices in the group relative to the group beginning */ template <typename TAccumulator, typename TGroup, typename TGroups> void group2groups(TAccumulator& pair_accumulator, const TGroup& group, const TGroups& group_index, const std::vector<std::size_t>& index) { for (auto other_group_ndx : group_index) { const auto &other_group = spc.groups[other_group_ndx]; if (&other_group != &group) { group2group(pair_accumulator, group, other_group, index); } } } /** * @brief Complete cartesian pairing between particles in a group and particles in other groups in space. * * group × (space ∖ group) * * If the distance between the groups is greater or equal to the group cutoff distance, the particle pairing * between them is skipped. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam TGroup * @param pair_accumulator accumulator of interacting pairs of particles * @param group */ template <typename TAccumulator, typename Tgroup> void group2all(TAccumulator &pair_accumulator, const Tgroup &group) { for (auto &other_group : spc.groups) { if (&other_group != &group) { group2group(pair_accumulator, group, other_group); } } } /** * @brief Complete cartesian pairing between a single particle in a group and particles in other groups in space. * * ⊕group × (space ∖ group), where ⊕ denotes a filter by an index (here a single particle) * * If the distance between the groups is greater or equal to the group cutoff distance, the particle pairing * between them is skipped. This method is performance-optimized version of the multiple indices method. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam TGroup * @param pair_accumulator accumulator of interacting pairs of particles * @param group * @param index a particle index relative to the group beginning */ template <typename TAccumulator, typename TGroup> void group2all(TAccumulator &pair_accumulator, const TGroup &group, const int index) { const auto &particle = group[index]; for (auto &other_group : spc.groups) { if (&other_group != &group) { // avoid self-interaction if (!cut(other_group, group)) { // check g2g cut-off for (auto &other_particle : other_group) { // loop over particles in other group particle2particle(pair_accumulator, particle, other_particle); } } } } } /** * @brief Complete cartesian pairing between selected particles in a group and particles in other groups in space. * * ⊕group × (space ∖ group), where ⊕ denotes a filter by an index * * If the distance between the groups is greater or equal to the group cutoff distance, the particle pairing * between them is skipped. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @param pair_accumulator accumulator of interacting pairs of particles * @param group * @param index list of particle indices in the group relative to the group beginning */ template <typename TAccumulator, typename Tgroup> void group2all(TAccumulator& pair_accumulator, const Tgroup& group, const std::vector<std::size_t>& index) { if (index.size() == 1) { group2all(pair_accumulator, group, index[0]); } else { for (auto &other_group : spc.groups) { if (&other_group != &group) { group2group(pair_accumulator, group, other_group, index); } } } } /** * @brief Cross pairing of particles among a union of groups. No internal pairs within any group are considered. * * If the distance between any two groups is greater or equal to the group cutoff distance, the particle pairing * between them is skipped. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam T * @param pair_accumulator accumulator of interacting pairs of particles * @param group_index list of groups */ template <typename TAccumulator, typename T> void groups2self(TAccumulator &pair_accumulator, const T &group_index) { for (auto group1_ndx_it = group_index.begin(); group1_ndx_it < group_index.end(); ++group1_ndx_it) { //no such move exists that the internal energy has to be recalculated //groupInternal(pair_accumulator, spc.groups[*group1_ndx_it]); for (auto group2_ndx_it = std::next(group1_ndx_it); group2_ndx_it < group_index.end(); group2_ndx_it++) { group2group(pair_accumulator, spc.groups[*group1_ndx_it], spc.groups[*group2_ndx_it]); } } } /** * @brief Cross pairing of particles between a union of groups and its complement in space. * * If the distance between any two groups is greater or equal to the group cutoff distance, the particle pairing * between them is skipped. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam T * @param pair_accumulator accumulator of interacting pairs of particles * @param group_index list of groups */ template <typename TAccumulator, typename T> void groups2all(TAccumulator &pair_accumulator, const T &group_index) { groups2self(pair_accumulator, group_index); auto index_complement = indexComplement(spc.groups.size(), group_index); for (auto group1_ndx : group_index) { for (auto group2_ndx : index_complement) { group2group(pair_accumulator, spc.groups[group1_ndx], spc.groups[group2_ndx]); } } } /** * @brief Cross pairing between all particles in the space. * * If the distance between particles' groups is greater or equal to the group cutoff distance, no calculation is * performed. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @param pair_accumulator accumulator of interacting pairs of particles */ template <typename TAccumulator> void all(TAccumulator &pair_accumulator) { for (auto group_it = spc.groups.begin(); group_it < spc.groups.end(); ++group_it) { groupInternal(pair_accumulator, *group_it); for (auto other_group_it = std::next(group_it); other_group_it < spc.groups.end(); other_group_it++) { group2group(pair_accumulator, *group_it, *other_group_it); } } } /** * @brief Cross pairing between all particles in the space. * * If the distance between particles' groups is greater or equal to the group cutoff distance, no calculation is * performed. * * @tparam TAccumulator an accumulator with '+=' operator overloaded to add a pair of particles as references * {T&, T&} * @tparam TCondition a function returning bool and having a group as an argument * @param pair_accumulator accumulator of interacting pairs of particles * @param condition a group filter if internal energy of the group shall be added */ template <typename TAccumulator, typename TCondition> void all(TAccumulator &pair_accumulator, TCondition condition) { for (auto group_it = spc.groups.begin(); group_it < spc.groups.end(); ++group_it) { if (condition(*group_it)) { groupInternal(pair_accumulator, *group_it); } for (auto other_group_it = std::next(group_it); other_group_it < spc.groups.end(); other_group_it++) { group2group(pair_accumulator, *group_it, *other_group_it); } } } }; /** * @brief Computes pair quantity difference for a systen perturbation. Such quantity can be energy using nonponded * pair potential * . * @tparam TPolicy a pairing policy */ template <typename TPolicy> class GroupPairing { const Space &spc; TPolicy pairing; protected: /** * @brief Computes pair quantity difference if only a single group has changed. * * @tparam TAccumulator * @param pair_accumulator accumulator of interacting pairs of particles * @param change */ template <typename TAccumulator> void accumulateGroup(TAccumulator &pair_accumulator, const Change &change) { const auto &change_data = change.groups.at(0); const auto& group = spc.groups.at(change_data.group_index); if (change_data.relative_atom_indices.size() == 1) { // faster algorithm if only a single particle moves pairing.group2all(pair_accumulator, group, change_data.relative_atom_indices[0]); if (change_data.internal) { pairing.groupInternal(pair_accumulator, group, change_data.relative_atom_indices[0]); } } else { const bool change_all = change_data.relative_atom_indices.empty(); // all particles or only their subset? if (change_all) { pairing.group2all(pair_accumulator, group); if (change_data.internal) { pairing.groupInternal(pair_accumulator, group); } } else { pairing.group2all(pair_accumulator, group, change_data.relative_atom_indices); if (change_data.internal) { pairing.groupInternal(pair_accumulator, group, change_data.relative_atom_indices); } } } } /** * @brief Computes pair quantity difference if the number of particles has changed. * * Particles have to be explicitly enumerated in the atom indices of the changed group. Implicit addition of atoms * with a group is not supported yet. Note that we do not have to care about missing (removed) particles at all. * They are taken into account in the original (old) space where they are present. * * @param pair_accumulator accumulator of interacting pairs of particles * @param change */ template <typename TAccumulator> void accumulateSpeciation(TAccumulator &pair_accumulator, const Change &change) { assert(change.matter_change); const auto &moved = change.touchedGroupIndex(); // index of moved groups const auto& fixed = indexComplement(spc.groups.size(), moved) | ranges::to<std::vector>; // index of static groups auto filter_active = [](int size) { return ranges::views::filter([size](const auto i) { return i < size; }); }; // loop over all changed groups for (auto change_group1_it = change.groups.begin(); change_group1_it < change.groups.end(); ++change_group1_it) { auto& group1 = spc.groups.at(change_group1_it->group_index); // filter only active particles const auto index1 = change_group1_it->relative_atom_indices | filter_active(group1.size()) | ranges::to<std::vector>; if (!index1.empty()) { // particles added into the group: compute (changed group) <-> (static group) pairing.group2groups(pair_accumulator, group1, fixed, index1); } // loop over successor changed groups (hence avoid double counting group1×group2 and group2×group1) for (auto change_group2_it = std::next(change_group1_it); change_group2_it < change.groups.end(); ++change_group2_it) { auto& group2 = spc.groups.at(change_group2_it->group_index); const auto index2 = change_group2_it->relative_atom_indices | filter_active(group2.size()) | ranges::to<std::vector>; if (!index1.empty() || !index2.empty()) { // particles added into one or other group: compute (changed group) <-> (changed group) pairing.group2group(pair_accumulator, group1, group2, index1, index2); } } if (!index1.empty() && !molecules.at(group1.id).rigid) { // compute internal energy in the changed group if (change_group1_it->all) { pairing.groupInternal(pair_accumulator, group1); } else { pairing.groupInternal(pair_accumulator, group1, index1); }; } } } public: /** * @brief Computes pair quantity difference from changed particles. * * The internal energy contribution, i.e., the contribution from the intra group interactions, is added * only if a single group is changed or if all changed. * * @param change * @param pair_accumulator accumulator of interacting pairs of particles */ template <typename TAccumulator> void accumulate(TAccumulator &pair_accumulator, const Change &change) { assert(std::is_sorted(change.groups.begin(), change.groups.end())); if (change.everything) { pairing.all(pair_accumulator); } else if (change.volume_change) { // sum all interaction energies except the internal energies of incompressible molecules pairing.all(pair_accumulator, [](auto& group) { return group.isAtomic() || group.traits().compressible; }); } else if (!change.matter_change) { if (change.groups.size() == 1) { // if only a single group changes use faster algorithm and optionally add the internal energy accumulateGroup(pair_accumulator, change); } else { // if multiple groups move, no internal energies are computed const auto &moved = change.touchedGroupIndex(); // index of moved groups pairing.groups2all(pair_accumulator, moved); } } else { // change.dN accumulateSpeciation(pair_accumulator, change); } } GroupPairing(Space &spc) : spc(spc), pairing(spc) {} void from_json(const json &j) { pairing.from_json(j); } void to_json(json &j) const { pairing.to_json(j); } // FIXME a temporal fix for non-refactorized NonbondedCached template <typename Accumulator> void group2group(Accumulator& pair_accumulator, const Space::GroupType& group1, const Space::GroupType& group2) { pairing.group2group(std::forward<Accumulator&>(pair_accumulator), std::forward<const Space::GroupType&>(group1), std::forward<const Space::GroupType&>(group2)); } }; /** * @brief Computes change in the non-bonded energy, assuming pairwise additive energy terms. * * @tparam TPairEnergy a functor to compute non-bonded energy between two particles * @tparam TPairingPolicy pairing policy to effectively sum up the pairwise additive non-bonded energy */ template <typename TPairEnergy, typename TPairingPolicy> class Nonbonded : public Energybase { protected: const Space& spc; //!< space to operate on TPairEnergy pair_energy; //!< a functor to compute non-bonded energy between two particles, see PairEnergy TPairingPolicy pairing; //!< pairing policy to effectively sum up the pairwise additive non-bonded energy std::shared_ptr<EnergyAccumulatorBase> energy_accumulator; //!< energy accumulator used for storing and summing pair-wise energies public: Nonbonded(const json& j, Space& spc, BasePointerVector<Energybase>& pot) : spc(spc), pair_energy(spc, pot), pairing(spc) { name = "nonbonded"; from_json(j); energy_accumulator = createEnergyAccumulator(j, pair_energy, 0.0); energy_accumulator->reserve(spc.numParticles()); // attempt to reduce memory fragmentation } void from_json(const json &j) { pair_energy.from_json(j); pairing.from_json(j); } void to_json(json &j) const override { pair_energy.to_json(j); pairing.to_json(j); energy_accumulator->to_json(j); } double energy(Change& change) override { energy_accumulator->clear(); // down-cast to avoid slow, virtual function calls: if (auto ptr = std::dynamic_pointer_cast<InstantEnergyAccumulator<TPairEnergy>>(energy_accumulator)) { pairing.accumulate(*ptr, change); } else if (auto ptr = std::dynamic_pointer_cast<DelayedEnergyAccumulator<TPairEnergy>>(energy_accumulator)) { pairing.accumulate(*ptr, change); } else { pairing.accumulate(*energy_accumulator, change); } return static_cast<double>(*energy_accumulator); } /** * @brief Calculates the force on all particles. * * @todo A stub. Change to reflect only active particle, see Space::activeParticles(). */ void force(std::vector<Point> &forces) override { // just a temporary hack; perhaps better to allow PairForce instead of the PairEnergy template assert(forces.size() == spc.particles.size() && "the forces size must match the particle size"); for (size_t i = 0; i < spc.particles.size() - 1; ++i) { for (size_t j = i + 1; j < spc.particles.size(); ++j) { const Point f = pair_energy.force(spc.particles[i], spc.particles[j]); forces[i] += f; forces[j] -= f; } } } }; /** * @brief Computes non-bonded energy contribution from changed particles. Cache group2group energy once calculated, * until a new trial configuration is provided. Not for general use as only partially implemented! * * Original implementation, only refurbished. Generally suboptimal as only PairingPolicy::group2group method * may be called. * No internal energy is ever computed. Cannot deal with particle count changes. And other unmentioned constrains. * * @tparam TPairEnergy a functor to compute non-bonded energy between two particles * @tparam TPairingPolicy pairing policy to effectively sum up the pairwise additive non-bonded energy */ template <typename TPairEnergy, typename TPairingPolicy> class NonbondedCached : public Nonbonded<TPairEnergy, TPairingPolicy> { typedef Nonbonded<TPairEnergy, TPairingPolicy> Base; typedef InstantEnergyAccumulator<TPairEnergy> TAccumulator; Eigen::MatrixXf energy_cache; using Base::spc; template <typename TGroup> double g2g(const TGroup &g1, const TGroup &g2) { int i = &g1 - spc.groups.data(); int j = &g2 - spc.groups.data(); if (j < i) { std::swap(i, j); } if (Energybase::state == Energybase::MonteCarloState::TRIAL) { // if this is from the trial system TAccumulator energy_accumulator(Base::pair_energy); Base::pairing.group2group(energy_accumulator, g1, g2); energy_cache(i, j) = static_cast<double>(energy_accumulator); // update the cache } return energy_cache(i, j); // return (cached) value } template <typename TGroup> double g2g(const TGroup& g1, const TGroup& g2, [[maybe_unused]] const std::vector<std::size_t>& index) { // index not implemented return g2g(g1, g2); } public: NonbondedCached(const json &j, Space &spc, BasePointerVector<Energybase> &pot) : Base(j, spc, pot) { Base::name += "EM"; init(); } /** * @brief Cache pair interactions in matrix. */ void init() override { const auto groups_size = spc.groups.size(); energy_cache.resize(groups_size, groups_size); energy_cache.setZero(); TAccumulator u(Base::pair_energy); for (auto i = 0; i < groups_size - 1; ++i) { for (auto j = i + 1; j < groups_size; ++j) { u = 0.0; Base::pairing.group2group(u, spc.groups.at(i), spc.groups.at(j)); energy_cache(i, j) = static_cast<double>(u); } } } double energy(Change &change) override { // Only g2g may be called there to compute (and cache) energy! double energy_sum = 0.0; if (change) { if (change.everything || change.volume_change) { for (auto i = spc.groups.begin(); i < spc.groups.end(); ++i) { for (auto j = std::next(i); j < Base::spc.groups.end(); ++j) { energy_sum += g2g(*i, *j); } } } else { if (change.groups.size() == 1) { // if exactly ONE molecule is changed auto &d = change.groups[0]; auto& g1 = spc.groups.at(d.group_index); for (auto g2_it = spc.groups.begin(); g2_it < spc.groups.end(); ++g2_it) { if (&g1 != &(*g2_it)) { energy_sum += g2g(g1, *g2_it, d.relative_atom_indices); } } } else { // many molecules are changed auto moved = change.touchedGroupIndex(); // index of moved groups // moved<->moved if (change.moved_to_moved_interactions) { for (auto i = moved.begin(); i < moved.end(); ++i) { for (auto j = std::next(i); j < moved.end(); ++j) { energy_sum += g2g(spc.groups[*i], spc.groups[*j]); } } } // moved<->static #if true // classic version auto fixed = indexComplement(spc.groups.size(), moved); // index of static groups for (auto i : moved) { for (auto j : fixed) { energy_sum += g2g(spc.groups[i], spc.groups[j]); } } #else // OMP-ready version auto fixed = indexComplement(spc.groups.size(), moved) | ranges::to<std::vector>; // index of static groups const size_t moved_size = moved.size(); const size_t fixed_size = fixed.size(); for (auto i = 0; i < moved_size; ++i) { for (auto j = 0; j < fixed_size; ++j) { energy_sum += g2g(spc.groups[moved[i]], spc.groups[fixed[j]]); } } #endif } } // more todo! } return energy_sum; } /** * @brief Copy energy matrix from other * @param base_ptr * @param change */ void sync(Energybase* base_ptr, const Change& change) override { auto other = dynamic_cast<decltype(this)>(base_ptr); assert(other); if (change.everything || change.volume_change) { energy_cache.triangularView<Eigen::StrictlyUpper>() = (other->energy_cache).template triangularView<Eigen::StrictlyUpper>(); } else { for (auto &d : change.groups) { for (int i = 0; i < d.group_index; i++) { energy_cache(i, d.group_index) = other->energy_cache(i, d.group_index); } for (size_t i = d.group_index + 1; i < spc.groups.size(); i++) { energy_cache(d.group_index, i) = other->energy_cache(d.group_index, i); } } } } }; #ifdef ENABLE_FREESASA /** * @brief Interface to the FreeSASA C-library. Experimental and unoptimized. * https://freesasa.github.io/ * * @todo - Implement partial evaluation refelcting `change` object * - Average volume currently mixes accepted/rejected states */ class FreeSASAEnergy : public Energybase { private: std::vector<double> positions; //!< Flattened position buffer for all particles std::vector<double> radii; //!< Radii buffer for all particles std::vector<double> sasa; //!< Target buffer for calculated surface areas const Space& spc; double cosolute_molarity = 0.; //!< co-solute concentration (mol/l) std::unique_ptr<freesasa_parameters_fwd> parameters; //!< Parameters for freesasa Average<double> mean_surface_area; void to_json(json &j) const override; void sync(Energybase* energybase_ptr, const Change& change) override; void updateSASA(const Change& change); void init() override; /** * @brief Copies radii from Space to internal buffer * @param begin Iterator to first particle * @param end Iterator to beyond last particle * @param change Change object (currently unused) */ template <typename Tfirst, typename Tend> void updateRadii(Tfirst begin, Tend end, [[maybe_unused]] const Change& change) { const auto number_of_particles = std::distance(begin, end); radii.clear(); radii.reserve(number_of_particles); std::transform(begin, end, std::back_inserter(radii), [](const Particle& particle) { return particle.traits().sigma * 0.5; }); } /** * @brief Copies positions from Space to internal (flattened) buffer * @param begin Iterator to first particle * @param end Iterator to beyond last particle * @param change Change object (currently unused) */ template <typename Tfirst, typename Tend> void updatePositions(Tfirst begin, Tend end, [[maybe_unused]] const Change& change) { const auto number_of_particles = std::distance(begin, end); positions.clear(); positions.reserve(3 * number_of_particles); for (const auto& particle : spc.activeParticles()) { const auto* xyz = particle.pos.data(); positions.insert(positions.end(), xyz, xyz + 3); } } public: /** * @param spc * @param cosolute_molarity in particles per angstrom cubed * @param probe_radius in angstrom */ FreeSASAEnergy(const Space& spc, double cosolute_molarity = 0.0, double probe_radius = 1.4); FreeSASAEnergy(const json& j, const Space& spc); double energy(Change& change) override; const std::vector<double>& getAreas() const { return sasa; } }; //!< SASA energy from transfer free energies #endif /** * @brief class for calculating SASA energies calculating SASA of each particle every step * */ class SASAEnergyBase : public Energybase { public: using index_type = size_t; std::vector<double> areas; //!< Target buffer for calculated surface areas Space& spc; double cosolute_molarity = 0.; //!< co-solute concentration (mol/l) std::unique_ptr<SASA::SASABase> sasa; //!< performs neighbour searching and subsequent sasa calculation private: void to_json(json& j) const override; void sync(Energybase* energybase_ptr, const Change& change) override; void init() override; protected: /** * @brief returns absolute index of particle in ParticleVector * @param particle */ inline auto indexOf(const Particle& particle) const { return static_cast<index_type>(std::addressof(particle) - std::addressof(spc.particles.at(0))); } public: SASAEnergyBase(Space& spc, double cosolute_molarity = 0.0, double probe_radius = 1.4, int slices_per_atom = 20, bool dense_container = true); SASAEnergyBase(const json& j, Space& spc); const std::vector<double>& getAreas() const { return areas; } double energy(Change& change) override; }; //!< SASA energy from transfer free energies with SASA calculation each step /** * @brief class for calculating SASA energies calculating SASA of particles based on change object every step * */ class SASAEnergy : public SASAEnergyBase { private: std::vector<std::vector<index_type>> current_neighbours; //!< holds cached neighbour indices for each particle in ParticleVector std::vector<index_type> changed_indices; //!< paritcle indices whose SASA changed based on change object void to_json(json& j) const override; void sync(Energybase* energybase_ptr, const Change& change) override; void init() override; void updateChangedIndices(const Change& change); void insertChangedNeighboursOf(const index_type index, std::set<index_type>& target_indices) const; public: SASAEnergy(Space& spc, double cosolute_molarity = 0.0, double probe_radius = 1.4, int slices_per_atom = 20, bool dense_container = true); SASAEnergy(const json& j, Space& spc); double energy(Change& change) override; }; //!< SASA energy from transfer free energies /** * @brief Oscillating energy on a single particle * * This is 2D version of the oscillating potential used * to illustrate parallel tempering in the book * "Understanding Molecular Simulation" by D. Frenkel. */ class Example2D : public Energybase { private: bool use_2d = true; // Set to false to apply energy only along x (as by the book) double scale_energy = 1.0; // effective temperature const Point &particle; // reference to 1st particle in the system void to_json(json &j) const override; public: Example2D(const json &j, Space &spc); double energy(Change &change) override; }; /** * @brief Aggregate and sum energy terms */ class Hamiltonian : public Energybase, public BasePointerVector<Energybase> { private: double maximum_allowed_energy = pc::infty; //!< Maximum allowed energy change std::vector<double> latest_energies; //!< Placeholder for the lastest energies for each energy term decltype(vec)& energy_terms; //!< Alias for `vec` void addEwald(const json& j, Space& spc); //!< Adds an instance of reciprocal space Ewald energies (if appropriate) void checkBondedMolecules() const; //!< Warn if bonded molecules and no bonded energy term void to_json(json& j) const override; void force(PointVector& forces) override; std::unique_ptr<Energybase> createEnergy(Space& spc, const std::string& name, const json& j); public: Hamiltonian(Space& spc, const json& j); void init() override; void sync(Energybase* other_hamiltonian, const Change& change) override; double energy(Change& change) override; //!< Energy due to changes const std::vector<double>& latestEnergies() const; //!< Energies for each term from the latest call to `energy()` }; } // namespace Energy } // namespace Faunus
GB_subassign_12_and_20.c
//------------------------------------------------------------------------------ // GB_subassign_12_and_20: C(I,J)<M or !M,repl> += A ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 12: C(I,J)<M,repl> += A ; using S // Method 20: C(I,J)<!M,repl> += A ; using S // M: present // Mask_comp: true or false // C_replace: true // accum: present // A: matrix // S: constructed // C: not bitmap: use GB_bitmap_assign instead // M, A: any sparsity structure. #include "GB_subassign_methods.h" GrB_Info GB_subassign_12_and_20 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, // if true, use the only structure of M const bool Mask_comp, // if true, !M, else use M const GrB_BinaryOp accum, const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_MATRIX_WAIT_IF_JUMBLED (M) ; GB_MATRIX_WAIT_IF_JUMBLED (A) ; GB_GET_C ; // C must not be bitmap GB_GET_MASK ; GB_GET_A ; GB_GET_S ; GB_GET_ACCUM ; //-------------------------------------------------------------------------- // Method 12: C(I,J)<M,repl> += A ; using S // Method 20: C(I,J)<!M,repl> += A ; using S //-------------------------------------------------------------------------- // Time: all entries in S+A must be traversed, so Omega(nnz(S)+nnz(A)) is // required. All cases of the mask (0, 1, or not present) must be // considered, because of the C_replace descriptor being true. //-------------------------------------------------------------------------- // Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20) //-------------------------------------------------------------------------- if (A_is_bitmap) { // all of IxJ must be examined GB_SUBASSIGN_IXJ_SLICE ; } else { // traverse all A+S GB_SUBASSIGN_TWO_SLICE (A, S) ; } //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // phase1: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // get M(:,j) //-------------------------------------------------------------- int64_t pM_start, pM_end ; GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ; bool mjdense = (pM_end - pM_start) == Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (Sfound && !Afound) { // S (i,j) is present but A (i,j) is not GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (!mij) { // ----[C . 0] or [X . 0]--------------------------- // [X . 0]: action: ( X ): still a zombie // [C . 0]: C_repl: action: ( delete ): now zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; } GB_NEXT (S) ; } else if (!Sfound && Afound) { // S (i,j) is not present, A (i,j) is present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } else if (Sfound && Afound) { // both S (i,j) and A (i,j) present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; GB_C_S_LOOKUP ; if (mij) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): A to C no accum // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_withaccum_C_A_1_matrix ; } else { // ----[C A 0] or [X A 0]--------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): now zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; } } } GB_PHASE1_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase1: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE1 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // get M(:,j) //-------------------------------------------------------------- int64_t pM_start, pM_end ; GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ; bool mjdense = (pM_end - pM_start) == Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression // int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // S (i,j) is present but A (i,j) is not GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iS) ; if (Mask_comp) mij = !mij ; if (!mij) { // ----[C . 0] or [X . 0]--------------------------- // [X . 0]: action: ( X ): still a zombie // [C . 0]: C_repl: action: ( delete ): now zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; } GB_NEXT (S) ; } else if (iA < iS) { // S (i,j) is not present, A (i,j) is present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (A) ; } else { // both S (i,j) and A (i,j) present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; GB_C_S_LOOKUP ; if (mij) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): A to C no accum // [C A 1]: action: ( =C+A ): apply accum // [X A 1]: action: ( undelete ): zombie lives GB_withaccum_C_A_1_matrix ; } else { // ----[C A 0] or [X A 0]--------------------------- // [X A 0]: action: ( X ): still a zombie // [C A 0]: C_repl: action: ( delete ): now zombie GB_DELETE_ENTRY ; } GB_NEXT (S) ; GB_NEXT (A) ; } } // while list S (:,j) has entries. List A (:,j) exhausted. while (pS < pS_end) { int64_t iS = GBI (Si, pS, Svlen) ; GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iS) ; if (Mask_comp) mij = !mij ; if (!mij) { // ----[C . 0] or [X . 0]------------------------------- // [X . 0]: action: ( X ): still a zombie // [C . 0]: C_repl: action: ( delete ): becomes zombie GB_C_S_LOOKUP ; GB_DELETE_ENTRY ; } GB_NEXT (S) ; } // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // S (i,j) is not present, A (i,j) is present int64_t iA = GBI (Ai, pA, Avlen) ; GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (mij) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) task_pending++ ; } GB_NEXT (A) ; } } GB_PHASE1_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; if (A_is_bitmap) { //---------------------------------------------------------------------- // phase2: A is bitmap //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t j = kfirst ; j <= klast ; j++) { //-------------------------------------------------------------- // get S(iA_start:iA_end,j) //-------------------------------------------------------------- GB_GET_VECTOR_FOR_IXJ (S, iA_start) ; int64_t pA_start = j * Avlen ; //-------------------------------------------------------------- // get M(:,j) //-------------------------------------------------------------- int64_t pM_start, pM_end ; GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ; bool mjdense = (pM_end - pM_start) == Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t pA = pA_start + iA ; bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ; bool Afound = Ab [pA] ; if (!Sfound && Afound) { // S (i,j) is not present, A (i,j) is present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; } } else if (Sfound) { // S (i,j) present GB_NEXT (S) ; } } } GB_PHASE2_TASK_WRAPUP ; } } else { //---------------------------------------------------------------------- // phase2: A is hypersparse, sparse, or full //---------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //------------------------------------------------------------------ // get the task descriptor //------------------------------------------------------------------ GB_GET_TASK_DESCRIPTOR_PHASE2 ; //------------------------------------------------------------------ // compute all vectors in this task //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // get A(:,j) and S(:,j) //-------------------------------------------------------------- int64_t j = GBH (Zh, k) ; GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen); GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen); //-------------------------------------------------------------- // get M(:,j) //-------------------------------------------------------------- int64_t pM_start, pM_end ; GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ; bool mjdense = (pM_end - pM_start) == Mvlen ; //-------------------------------------------------------------- // do a 2-way merge of S(:,j) and A(:,j) //-------------------------------------------------------------- // jC = J [j] ; or J is a colon expression int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ; // while both list S (:,j) and A (:,j) have entries while (pS < pS_end && pA < pA_end) { int64_t iS = GBI (Si, pS, Svlen) ; int64_t iA = GBI (Ai, pA, Avlen) ; if (iS < iA) { // S (i,j) is present but A (i,j) is not GB_NEXT (S) ; } else if (iA < iS) { // S (i,j) is not present, A (i,j) is present GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; } GB_NEXT (A) ; } else { // both S (i,j) and A (i,j) present GB_NEXT (S) ; GB_NEXT (A) ; } } // while list A (:,j) has entries. List S (:,j) exhausted. while (pA < pA_end) { // S (i,j) is not present, A (i,j) is present int64_t iA = GBI (Ai, pA, Avlen) ; GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ; if (Mask_comp) mij = !mij ; if (mij) { // ----[. A 1]------------------------------------------ // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (Ax +(pA*asize)) ; } GB_NEXT (A) ; } } GB_PHASE2_TASK_WRAPUP ; } } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
dropout-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file dropout-inl.h * \brief * \author Bing Xu, Da Zheng, Hang Zhang */ #ifndef MXNET_OPERATOR_NN_DROPOUT_INL_H_ #define MXNET_OPERATOR_NN_DROPOUT_INL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <map> #include <vector> #include <string> #include <utility> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../random/sampler.h" #include "../tensor/elemwise_binary_broadcast_op.h" #if defined(USE_MKL) && defined(_OPENMP) #include <omp.h> #include <mkl_vml_functions.h> #include <mkl_vsl.h> #endif // USE_MKL && _OPENMP namespace dropout { enum DropoutOpInputs {kData}; enum DropoutOpOutputs {kOut, kMask}; enum DropoutOpForwardResource {kRandom}; enum DropoutOpMode {kTraining, kAlways}; } // namespace dropout namespace mxnet { namespace op { const int MAX_DIM = 5; struct DropoutParam : public dmlc::Parameter<DropoutParam> { float p; int mode; TShape axes; DMLC_DECLARE_PARAMETER(DropoutParam) { DMLC_DECLARE_FIELD(p).set_default(0.5) .set_range(0, 1) .describe("Fraction of the input that gets dropped out during training time."); DMLC_DECLARE_FIELD(mode) .add_enum("training", dropout::kTraining) .add_enum("always", dropout::kAlways) .set_default(dropout::kTraining) .describe("Whether to only turn on dropout during training or to also turn on for inference."); DMLC_DECLARE_FIELD(axes).set_default(TShape()) .describe("Axes for variational dropout kernel."); } }; // struct DropoutParam template<typename xpu, typename DType> class DropoutOp { #if defined(USE_MKL) && defined(_OPENMP) static void BernoulliGenerate(common::random::RandGenerator<cpu, DType> gen, int n, double p, int* r) { typename RandGenerator<xpu, DType>::Impl genImpl(&gen, 1); const int seed = 17 + abs(genImpl.rand() % 4096); CHECK_GE(seed, 0); const int nthr = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel num_threads(nthr) { const int ithr = omp_get_thread_num(); const int avg_amount = (n + nthr - 1) / nthr; const int my_offset = ithr * avg_amount; const int my_amount = std::min(my_offset + avg_amount, n) - my_offset; if (my_amount > 0) { VSLStreamStatePtr stream; vslNewStream(&stream, VSL_BRNG_MCG31, seed); vslSkipAheadStream(stream, my_offset); viRngBernoulli(VSL_RNG_METHOD_BERNOULLI_ICDF, stream, my_amount, r + my_offset, p); vslDeleteStream(&stream); } } } // MKL forward pass static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<cpu> *s, RandGenerator<cpu, DType> *pgen, const double pkeep, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { // BernoulliGenerate expects an array int, so for types smaller than int, the mask buffer // will be too small, so we can;t use MKL in those cases if (sizeof(DType) >= sizeof(int)) { Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> data = in_data[dropout::kData].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> out = out_data[dropout::kOut].FlatTo2D<xpu, DType>(s); DType *outptr = out.dptr_; DType *dataptr = data.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; BernoulliGenerate(*pgen, count, pkeep, maskptr); const float pk_1 = 1.0f / pkeep; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { outptr[i] = dataptr[i] * maskptr[i] * pk_1; } return true; } return false; } // MKL backward pass static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<cpu> *s, const double pkeep, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { if (sizeof(DType) >= sizeof(int)) { Tensor<xpu, 2, DType> grad = out_grad[dropout::kOut].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> mask = out_data[dropout::kMask].FlatTo2D<xpu, DType>(s); Tensor<xpu, 2, DType> gdata = in_grad[dropout::kData].FlatTo2D<xpu, DType>(s); DType *ingradptr = gdata.dptr_; const DType *outgradptr = grad.dptr_; auto maskptr = reinterpret_cast<int *>(mask.dptr_); int count = mask.shape_[0] * mask.shape_[1]; const float pk_1 = 1.0f / pkeep; #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = 0; i < count; ++i) { ingradptr[i] = outgradptr[i] * maskptr[i] * pk_1; } return true; } return false; } #ifdef __CUDACC__ // GPU never uses MKL static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<gpu> *s, RandGenerator<gpu, DType> *pgen, const double pkeep, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { return false; } static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<gpu> *s, const double pkeep, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { return false; } #endif // __CUDACC__ #else // #if defined(USE_MKL) && defined(_OPENMP) static bool MSHADOW_CINLINE MKLForward(mshadow::Stream<xpu> *s, RandGenerator<xpu, DType> *pgen, const double pkeep, const std::vector<TBlob> &in_data, const std::vector<TBlob> &out_data) { return false; } static bool MSHADOW_CINLINE MKLBackward(mshadow::Stream<xpu> *s, const double pkeep, const std::vector<TBlob> &in_grad, const std::vector<TBlob> &out_data, const std::vector<TBlob> &out_grad) { return false; } #endif // #if defined(USE_MKL) && defined(_OPENMP) public: /*! * \brief Dropout kernel, compute dropout tensor */ struct DropoutKernel { /*! * \brief Dropout kernel function * \param id Thread number (0-based representing count) * \param gen Random number generator * \param N Total number of items in the output * \param step Step between items, related to parallelism * \param dropout_out Output dropout values * \param mask_out Output mask (is multiplied to create dropout output, may be 0) * \param input_data Input data to perform the dropout on * \param pkeep Dropout rate (keep when the generated random number is less than this value) */ MSHADOW_XINLINE static void Map(int id, RandGenerator<xpu, DType> gen, const int N, const int step, DType *dropout_out, DType *mask_out, const DType *input_data, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold_eq::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); dropout_out[i] = input_data[i] * mask_out[i]; }); } }; struct BernoulliKernel { /*! \brief Bernoulli kernel for generating mask */ MSHADOW_XINLINE static void Map(int id, RandGenerator<xpu, DType> gen, const int N, const int step, DType *mask_out, const real_t pkeep) { RNG_KERNEL_LOOP(xpu, DType, id, gen, N, step, { const real_t rand_num = static_cast<real_t>(genImpl.uniform()); mask_out[i] = mshadow_op::threshold::Map<real_t>(rand_num, pkeep) * (1.0f / pkeep); }); } }; void Init(const DropoutParam &param) { this->pkeep_ = 1.0f - param.p; this->mode_ = static_cast<dropout::DropoutOpMode>(param.mode); this->axes_ = param.axes; } void Forward(const OpContext &ctx, const std::vector<TBlob> &in_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &out_data) { if (req[dropout::kOut] != kNullOp) { CHECK_EQ(in_data.size(), 1U); if (ctx.is_train) { CHECK_EQ(out_data.size(), 2U); } Stream<xpu> *s = ctx.get_stream<xpu>(); const TBlob &out = out_data[dropout::kOut]; if (ctx.is_train || this->mode_ == dropout::kAlways) { RandGenerator<xpu, DType> *pgen = ctx.requested[0].get_parallel_random<xpu, DType>(); CHECK_NOTNULL(pgen); if (this->axes_.ndim() != 0 || !MKLForward(s, pgen, this->pkeep_, in_data, out_data)) { const TBlob &mask = out_data[dropout::kMask]; CHECK(req[dropout::kOut] != kAddTo); if (this->axes_.ndim() == 0) { // standard case for dropout LaunchRNG<DropoutKernel, xpu>(s, pgen, out.Size(), out.dptr<DType>(), mask.dptr<DType>(), in_data[dropout::kData].dptr<DType>(), this->pkeep_); return; } // initialize the mask LaunchRNG<BernoulliKernel, xpu>(s, pgen, mask.Size(), mask.dptr<DType>(), this->pkeep_); // broadcast mul TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(in_data[dropout::kData].shape_, mask.shape_, out.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), in_data[dropout::kData].dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[dropout::kOut], lstride, rstride, oshape, in_data[dropout::kData].dptr<DType>(), mask.dptr<DType>(), out.dptr<DType>()); }); } } } else { const TBlob& data = in_data[dropout::kData]; if (req[dropout::kOut] == kWriteTo) { mxnet_op::copy(s, out, data); } else { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kOut], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, out.Size(), out.dptr<DType>(), data.dptr<DType>()); }); } } } } void Backward(const OpContext &ctx, const std::vector<TBlob> &out_grad, const std::vector<TBlob> &out_data, const std::vector<OpReqType> &req, const std::vector<TBlob> &in_grad) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); if (ctx.is_train || mode_ == dropout::kAlways) { if (this->axes_.ndim() != 0 || !MKLBackward(s, this->pkeep_, in_grad, out_data, out_grad)) { const TBlob &gdata = in_grad[dropout::kData]; const TBlob &grad = out_grad[dropout::kOut]; const TBlob &mask = out_data[dropout::kMask]; if (this->axes_.ndim() == 0) { // standard case for dropout CHECK_EQ(grad.Size(), mask.Size()); MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); return; } // broardcast mul TShape new_lshape, new_rshape, new_oshape; int ndim = BinaryBroadcastShapeCompact(grad.shape_, mask.shape_, gdata.shape_, &new_lshape, &new_rshape, &new_oshape); if (!ndim) { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::mul, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>(), mask.dptr<DType>()); }); } else { BROADCAST_NDIM_SWITCH(ndim, NDim, { mshadow::Shape<NDim> oshape = new_oshape.get<NDim>(); mshadow::Shape<NDim> lstride = mxnet_op::calc_stride(new_lshape.get<NDim>()); mshadow::Shape<NDim> rstride = mxnet_op::calc_stride(new_rshape.get<NDim>()); mxnet_op::Kernel<mxnet_op::binary_broadcast_kernel<NDim, DType, mshadow_op::mul>, xpu>:: template LaunchEx(s, new_oshape.Size(), req[0], lstride, rstride, oshape, grad.dptr<DType>(), mask.dptr<DType>(), gdata.dptr<DType>()); }); } } } else { const TBlob& gdata = in_grad[dropout::kData]; const TBlob& grad = out_grad[dropout::kOut]; if (req[dropout::kData] == kWriteTo) { mxnet_op::copy(s, gdata, grad); } else { MXNET_ASSIGN_REQ_SWITCH(req[dropout::kData], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<mshadow_op::identity, Req>, xpu>::Launch( s, gdata.Size(), gdata.dptr<DType>(), grad.dptr<DType>()); }); } } } private: /*! \brief Dropout rate (keep when the generated random number is less than this value) */ real_t pkeep_; /*! \brief Dropout mode */ dropout::DropoutOpMode mode_; TShape axes_; }; // class DropoutOp template<typename xpu> void DropoutCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const DropoutParam& param = nnvm::get<DropoutParam>(attrs.parsed); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType> op; op.Init(param); op.Forward(ctx, inputs, req, outputs); }); } template<typename xpu> void DropoutGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { const DropoutParam& param = nnvm::get<DropoutParam>(attrs.parsed); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); CHECK_EQ(req.size(), 1); std::vector<TBlob> out_grads(2); std::vector<TBlob> out_data(2); out_grads[dropout::kOut] = inputs[0]; out_data[dropout::kMask] = inputs[1]; MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { DropoutOp<xpu, DType> op; op.Init(param); op.Backward(ctx, out_grads, out_data, req, outputs); }); } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_NN_DROPOUT_INL_H_
QuadNodePolarEuclid.h
/* * QuadNodePolarEuclid.h * * Created on: 21.05.2014 * Author: Moritz v. Looz (moritz.looz-corswarem@kit.edu) * * Note: This is similar enough to QuadNode.h that one could merge these two classes. */ #ifndef QUADNODEPOLAREUCLID_H_ #define QUADNODEPOLAREUCLID_H_ #include <vector> #include <algorithm> #include <functional> #include <assert.h> #include "../../auxiliary/Log.h" #include "../../geometric/HyperbolicSpace.h" using std::vector; using std::min; using std::max; using std::cos; namespace NetworKit { template <class T> class QuadNodePolarEuclid { friend class QuadTreeGTest; private: double leftAngle; double minR; double rightAngle; double maxR; Point2D<double> a,b,c,d; unsigned capacity; static const unsigned coarsenLimit = 4; count subTreeSize; std::vector<T> content; std::vector<Point2D<double> > positions; std::vector<double> angles; std::vector<double> radii; bool isLeaf; bool splitTheoretical; double balance; index ID; double lowerBoundR; public: std::vector<QuadNodePolarEuclid> children; QuadNodePolarEuclid() { //This should never be called. leftAngle = 0; rightAngle = 0; minR = 0; maxR = 0; capacity = 20; isLeaf = true; subTreeSize = 0; balance = 0.5; splitTheoretical = false; lowerBoundR = maxR; ID = 0; } /** * Construct a QuadNode for polar coordinates. * * * @param leftAngle Minimal angular coordinate of region, in radians from 0 to 2\pi * @param rightAngle Maximal angular coordinate of region, in radians from 0 to 2\pi * @param minR Minimal radial coordinate of region, between 0 and 1 * @param maxR Maximal radial coordinate of region, between 0 and 1 * @param capacity Number of points a leaf cell can store before splitting * @param minDiameter Minimal diameter of a quadtree node. If the node is already smaller, don't split even if over capacity. Default is 0 * @param splitTheoretical Whether to split in a theoretically optimal way or in a way to decrease measured running times * @param alpha dispersion Parameter of the point distribution. Only has an effect if theoretical split is true * @param diagnostics Count how many necessary and unnecessary comparisons happen in leaf cells? Will cause race condition and false sharing in parallel use * */ QuadNodePolarEuclid(double leftAngle, double minR, double rightAngle, double maxR, unsigned capacity = 1000, bool splitTheoretical = false, double balance = 0.5) { if (balance <= 0 || balance >= 1) throw std::runtime_error("Quadtree balance parameter must be between 0 and 1."); this->leftAngle = leftAngle; this->minR = minR; this->maxR = maxR; this->rightAngle = rightAngle; this->a = HyperbolicSpace::polarToCartesian(leftAngle, minR); this->b = HyperbolicSpace::polarToCartesian(rightAngle, minR); this->c = HyperbolicSpace::polarToCartesian(rightAngle, maxR); this->d = HyperbolicSpace::polarToCartesian(leftAngle, maxR); this->capacity = capacity; this->splitTheoretical = splitTheoretical; this->balance = balance; this->lowerBoundR = maxR; this->ID = 0; isLeaf = true; subTreeSize = 0; } void split() { assert(isLeaf); //heavy lifting: split up! double middleAngle, middleR; if (splitTheoretical) { //Euclidean space is distributed equally middleAngle = (rightAngle - leftAngle) / 2 + leftAngle; middleR = pow(maxR*maxR*(1-balance)+minR*minR*balance, 0.5); } else { //median of points vector<double> sortedAngles = angles; std::sort(sortedAngles.begin(), sortedAngles.end()); middleAngle = sortedAngles[sortedAngles.size()/2]; vector<double> sortedRadii = radii; std::sort(sortedRadii.begin(), sortedRadii.end()); middleR = sortedRadii[sortedRadii.size()/2]; } assert(middleR < maxR); assert(middleR > minR); QuadNodePolarEuclid southwest(leftAngle, minR, middleAngle, middleR, capacity, splitTheoretical, balance); QuadNodePolarEuclid southeast(middleAngle, minR, rightAngle, middleR, capacity, splitTheoretical, balance); QuadNodePolarEuclid northwest(leftAngle, middleR, middleAngle, maxR, capacity, splitTheoretical, balance); QuadNodePolarEuclid northeast(middleAngle, middleR, rightAngle, maxR, capacity, splitTheoretical, balance); children = {southwest, southeast, northwest, northeast}; isLeaf = false; } /** * Add a point at polar coordinates (angle, R) with content input. May split node if capacity is full * * @param input arbitrary content, in our case an index * @param angle angular coordinate of point, between 0 and 2 pi. * @param R radial coordinate of point, between 0 and 1. */ void addContent(T input, double angle, double R) { assert(this->responsible(angle, R)); if (lowerBoundR > R) lowerBoundR = R; if (isLeaf) { if (content.size() + 1 < capacity) { content.push_back(input); angles.push_back(angle); radii.push_back(R); Point2D<double> pos = HyperbolicSpace::polarToCartesian(angle, R); positions.push_back(pos); } else { split(); for (index i = 0; i < content.size(); i++) { this->addContent(content[i], angles[i], radii[i]); } assert(subTreeSize == content.size());//we have added everything twice subTreeSize = content.size(); content.clear(); angles.clear(); radii.clear(); positions.clear(); this->addContent(input, angle, R); } } else { assert(children.size() > 0); for (index i = 0; i < children.size(); i++) { if (children[i].responsible(angle, R)) { children[i].addContent(input, angle, R); break; } } subTreeSize++; } } /** * Remove content at polar coordinates (angle, R). May cause coarsening of the quadtree * * @param input Content to be removed * @param angle Angular coordinate * @param R Radial coordinate * * @return True if content was found and removed, false otherwise */ bool removeContent(T input, double angle, double R) { if (!responsible(angle, R)) return false; if (isLeaf) { index i = 0; for (; i < content.size(); i++) { if (content[i] == input) break; } if (i < content.size()) { assert(angles[i] == angle); assert(radii[i] == R); //remove element content.erase(content.begin()+i); positions.erase(positions.begin()+i); angles.erase(angles.begin()+i); radii.erase(radii.begin()+i); return true; } else { return false; } } else { bool removed = false; bool allLeaves = true; assert(children.size() > 0); for (index i = 0; i < children.size(); i++) { if (!children[i].isLeaf) allLeaves = false; if (children[i].removeContent(input, angle, R)) { assert(!removed); removed = true; } } if (removed) subTreeSize--; //coarsen? if (removed && allLeaves && size() < coarsenLimit) { //coarsen!! //why not assert empty containers and then insert directly? vector<T> allContent; vector<Point2D<double> > allPositions; vector<double> allAngles; vector<double> allRadii; for (index i = 0; i < children.size(); i++) { allContent.insert(allContent.end(), children[i].content.begin(), children[i].content.end()); allPositions.insert(allPositions.end(), children[i].positions.begin(), children[i].positions.end()); allAngles.insert(allAngles.end(), children[i].angles.begin(), children[i].angles.end()); allRadii.insert(allRadii.end(), children[i].radii.begin(), children[i].radii.end()); } assert(subTreeSize == allContent.size()); assert(subTreeSize == allPositions.size()); assert(subTreeSize == allAngles.size()); assert(subTreeSize == allRadii.size()); children.clear(); content.swap(allContent); positions.swap(allPositions); angles.swap(allAngles); radii.swap(allRadii); isLeaf = true; } return removed; } } /** * Check whether the region managed by this node lies outside of an Euclidean circle. * * @param query Center of the Euclidean query circle, given in Cartesian coordinates * @param radius Radius of the Euclidean query circle * * @return True if the region managed by this node lies completely outside of the circle */ bool outOfReach(Point2D<double> query, double radius) const { double phi, r; HyperbolicSpace::cartesianToPolar(query, phi, r); if (responsible(phi, r)) return false; //get four edge points double topDistance, bottomDistance, leftDistance, rightDistance; if (phi < leftAngle || phi > rightAngle) { topDistance = min(c.distance(query), d.distance(query)); } else { topDistance = abs(r - maxR); } if (topDistance <= radius) return false; if (phi < leftAngle || phi > rightAngle) { bottomDistance = min(a.distance(query), b.distance(query)); } else { bottomDistance = abs(r - minR); } if (bottomDistance <= radius) return false; double minDistanceR = r*cos(abs(phi-leftAngle)); if (minDistanceR > minR && minDistanceR < maxR) { leftDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR)); } else { leftDistance = min(a.distance(query), d.distance(query)); } if (leftDistance <= radius) return false; minDistanceR = r*cos(abs(phi-rightAngle)); if (minDistanceR > minR && minDistanceR < maxR) { rightDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR)); } else { rightDistance = min(b.distance(query), c.distance(query)); } if (rightDistance <= radius) return false; return true; } /** * Check whether the region managed by this node lies outside of an Euclidean circle. * Functionality is the same as in the method above, but it takes polar coordinates instead of Cartesian ones * * @param angle_c Angular coordinate of the Euclidean query circle's center * @param r_c Radial coordinate of the Euclidean query circle's center * @param radius Radius of the Euclidean query circle * * @return True if the region managed by this node lies completely outside of the circle */ bool outOfReach(double angle_c, double r_c, double radius) const { if (responsible(angle_c, r_c)) return false; Point2D<double> query = HyperbolicSpace::polarToCartesian(angle_c, r_c); return outOfReach(query, radius); } /** * @param phi Angular coordinate of query point * @param r_h radial coordinate of query point */ std::pair<double, double> EuclideanDistances(double phi, double r) const { /** * If the query point is not within the quadnode, the distance minimum is on the border. * Need to check whether extremum is between corners. */ double maxDistance = 0; double minDistance = std::numeric_limits<double>::max(); if (responsible(phi, r)) minDistance = 0; auto euclidDistancePolar = [](double phi_a, double r_a, double phi_b, double r_b){ return pow(r_a*r_a+r_b*r_b-2*r_a*r_b*cos(phi_a-phi_b), 0.5); }; auto updateMinMax = [&minDistance, &maxDistance, phi, r, euclidDistancePolar](double phi_b, double r_b){ double extremalValue = euclidDistancePolar(phi, r, phi_b, r_b); //assert(extremalValue <= r + r_b); maxDistance = std::max(extremalValue, maxDistance); minDistance = std::min(minDistance, extremalValue); }; /** * angular boundaries */ //left double extremum = r*cos(this->leftAngle - phi); if (extremum < maxR && extremum > minR) { updateMinMax(this->leftAngle, extremum); } //right extremum = r*cos(this->rightAngle - phi); if (extremum < maxR && extremum > minR) { updateMinMax(this->leftAngle, extremum); } /** * radial boundaries. */ if (phi > leftAngle && phi < rightAngle) { updateMinMax(phi, maxR); updateMinMax(phi, minR); } if (phi + M_PI > leftAngle && phi + M_PI < rightAngle) { updateMinMax(phi + M_PI, maxR); updateMinMax(phi + M_PI, minR); } if (phi - M_PI > leftAngle && phi -M_PI < rightAngle) { updateMinMax(phi - M_PI, maxR); updateMinMax(phi - M_PI, minR); } /** * corners */ updateMinMax(leftAngle, maxR); updateMinMax(rightAngle, maxR); updateMinMax(leftAngle, minR); updateMinMax(rightAngle, minR); //double shortCutGainMax = maxR + r - maxDistance; //assert(minDistance <= minR + r); //assert(maxDistance <= maxR + r); assert(minDistance < maxDistance); return std::pair<double, double>(minDistance, maxDistance); } /** * Does the point at (angle, r) fall inside the region managed by this QuadNode? * * @param angle Angular coordinate of input point * @param r Radial coordinate of input points * * @return True if input point lies within the region of this QuadNode */ bool responsible(double angle, double r) const { return (angle >= leftAngle && angle < rightAngle && r >= minR && r < maxR); } /** * Get all Elements in this QuadNode or a descendant of it * * @return vector of content type T */ std::vector<T> getElements() const { if (isLeaf) { return content; } else { assert(content.size() == 0); assert(angles.size() == 0); assert(radii.size() == 0); vector<T> result; for (index i = 0; i < children.size(); i++) { std::vector<T> subresult = children[i].getElements(); result.insert(result.end(), subresult.begin(), subresult.end()); } return result; } } void getCoordinates(vector<double> &anglesContainer, vector<double> &radiiContainer) const { assert(angles.size() == radii.size()); if (isLeaf) { anglesContainer.insert(anglesContainer.end(), angles.begin(), angles.end()); radiiContainer.insert(radiiContainer.end(), radii.begin(), radii.end()); } else { assert(content.size() == 0); assert(angles.size() == 0); assert(radii.size() == 0); for (index i = 0; i < children.size(); i++) { children[i].getCoordinates(anglesContainer, radiiContainer); } } } /** * Main query method, get points lying in a Euclidean circle around the center point. * Optional limits can be given to get a different result or to reduce unnecessary comparisons * * Elements are pushed onto a vector which is a required argument. This is done to reduce copying * * Safe to call in parallel if diagnostics are disabled * * @param center Center of the query circle * @param radius Radius of the query circle * @param result Reference to the vector where the results will be stored * @param minAngle Optional value for the minimum angular coordinate of the query region * @param maxAngle Optional value for the maximum angular coordinate of the query region * @param lowR Optional value for the minimum radial coordinate of the query region * @param highR Optional value for the maximum radial coordinate of the query region */ void getElementsInEuclideanCircle(Point2D<double> center, double radius, vector<T> &result, double minAngle=0, double maxAngle=2*M_PI, double lowR=0, double highR = 1) const { if (minAngle >= rightAngle || maxAngle <= leftAngle || lowR >= maxR || highR < lowerBoundR) return; if (outOfReach(center, radius)) { return; } if (isLeaf) { const double rsq = radius*radius; const double queryX = center[0]; const double queryY = center[1]; const count cSize = content.size(); for (int i=0; i < cSize; i++) { const double deltaX = positions[i].getX() - queryX; const double deltaY = positions[i].getY() - queryY; if (deltaX*deltaX + deltaY*deltaY < rsq) { result.push_back(content[i]); } } } else { for (index i = 0; i < children.size(); i++) { children[i].getElementsInEuclideanCircle(center, radius, result, minAngle, maxAngle, lowR, highR); } } } count getElementsProbabilistically(Point2D<double> euQuery, std::function<double(double)> prob, bool suppressLeft, vector<T> &result) const { double phi_q, r_q; HyperbolicSpace::cartesianToPolar(euQuery, phi_q, r_q); if (suppressLeft && phi_q > rightAngle) return 0; TRACE("Getting Euclidean distances"); auto distancePair = EuclideanDistances(phi_q, r_q); double probUB = prob(distancePair.first); double probLB = prob(distancePair.second); assert(probLB <= probUB); if (probUB > 0.5) probUB = 1;//if we are going to take every second element anyway, no use in calculating expensive jumps if (probUB == 0) return 0; //TODO: return whole if probLB == 1 double probdenom = std::log(1-probUB); if (probdenom == 0) { DEBUG(probUB, " not zero, but too small too process. Ignoring."); return 0; } TRACE("probUB: ", probUB, ", probdenom: ", probdenom); count expectedNeighbours = probUB*size(); count candidatesTested = 0; if (isLeaf) { const count lsize = content.size(); TRACE("Leaf of size ", lsize); for (index i = 0; i < lsize; i++) { //jump! if (probUB < 1) { double random = Aux::Random::real(); double delta = std::log(random) / probdenom; assert(delta == delta); assert(delta >= 0); i += delta; if (i >= lsize) break; TRACE("Jumped with delta ", delta, " arrived at ", i); } assert(i >= 0); //see where we've arrived candidatesTested++; double distance = positions[i].distance(euQuery); double q = prob(distance); q = q / probUB; //since the candidate was selected by the jumping process, we have to adjust the probabilities assert(q <= 1); assert(q >= 0); //accept? double acc = Aux::Random::real(); if (acc < q) { TRACE("Accepted node ", i, " with probability ", q, "."); result.push_back(content[i]); } } } else { if (expectedNeighbours < 4 || probUB < 1/1000) {//select candidates directly instead of calling recursively TRACE("probUB = ", probUB, ", switching to direct candidate selection."); assert(probUB < 1); const count stsize = size(); for (index i = 0; i < stsize; i++) { double delta = std::log(Aux::Random::real()) / probdenom; assert(delta >= 0); i += delta; TRACE("Jumped with delta ", delta, " arrived at ", i, ". Calling maybeGetKthElement."); if (i < size()) maybeGetKthElement(probUB, euQuery, prob, i, result);//this could be optimized. As of now, the offset is subtracted separately for each point else break; candidatesTested++; } } else {//carry on as normal for (index i = 0; i < children.size(); i++) { TRACE("Recursively calling child ", i); candidatesTested += children[i].getElementsProbabilistically(euQuery, prob, suppressLeft, result); } } } //DEBUG("Expected at most ", expectedNeighbours, " neighbours, got ", result.size() - offset); return candidatesTested; } void maybeGetKthElement(double upperBound, Point2D<double> euQuery, std::function<double(double)> prob, index k, vector<T> &circleDenizens) const { TRACE("Maybe get element ", k, " with upper Bound ", upperBound); assert(k < size()); if (isLeaf) { double acceptance = prob(euQuery.distance(positions[k]))/upperBound; TRACE("Is leaf, accept with ", acceptance); if (Aux::Random::real() < acceptance) circleDenizens.push_back(content[k]); } else { TRACE("Call recursively."); index offset = 0; for (index i = 0; i < children.size(); i++) { count childsize = children[i].size(); if (k - offset < childsize) { children[i].maybeGetKthElement(upperBound, euQuery, prob, k - offset, circleDenizens); break; } offset += childsize; } } } /** * Shrink all vectors in this subtree to fit the content. * Call after quadtree construction is complete, causes better memory usage and cache efficiency */ void trim() { content.shrink_to_fit(); positions.shrink_to_fit(); angles.shrink_to_fit(); radii.shrink_to_fit(); if (!isLeaf) { for (index i = 0; i < children.size(); i++) { children[i].trim(); } } } /** * Number of points lying in the region managed by this QuadNode */ count size() const { return isLeaf ? content.size() : subTreeSize; } void recount() { subTreeSize = 0; for (index i = 0; i < children.size(); i++) { children[i].recount(); subTreeSize += children[i].size(); } } /** * Height of subtree hanging from this QuadNode */ count height() const { count result = 1;//if leaf node, the children loop will not execute for (auto child : children) result = std::max(result, child.height()+1); return result; } /** * Leaf cells in the subtree hanging from this QuadNode */ count countLeaves() const { if (isLeaf) return 1; count result = 0; for (index i = 0; i < children.size(); i++) { result += children[i].countLeaves(); } return result; } double getLeftAngle() const { return leftAngle; } double getRightAngle() const { return rightAngle; } double getMinR() const { return minR; } double getMaxR() const { return maxR; } index getID() const { return ID; } index indexSubtree(index nextID) { index result = nextID; assert(children.size() == 4 || children.size() == 0); for (int i = 0; i < children.size(); i++) { result = children[i].indexSubtree(result); } this->ID = result; return result+1; } index getCellID(double phi, double r) const { if (!responsible(phi, r)) return NetworKit::none; if (isLeaf) return getID(); else { for (int i = 0; i < children.size(); i++) { index childresult = children[i].getCellID(phi, r); if (childresult != NetworKit::none) return childresult; } throw std::runtime_error("No responsible child node found even though this node is responsible."); } } index getMaxIDInSubtree() const { if (isLeaf) return getID(); else { index result = -1; for (int i = 0; i < 4; i++) { result = std::max(children[i].getMaxIDInSubtree(), result); } return std::max(result, getID()); } } count reindex(count offset) { if (isLeaf) { #pragma omp task { index p = offset; std::generate(content.begin(), content.end(), [&p](){return p++;}); } offset += size(); } else { for (int i = 0; i < 4; i++) { offset = children[i].reindex(offset); } } return offset; } }; } #endif /* QUADNODE_H_ */
trmv_x_csr_n_hi.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t trmv_x_csr_n_hi_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for(ALPHA_INT i = 0; i < m; ++i) { ALPHA_Number tmp; alpha_setzero(tmp); for(ALPHA_INT ai = A->rows_start[i]; ai < A->rows_end[i]; ++ai) { const ALPHA_INT col = A->col_indx[ai]; if(col < i) { continue; } else { alpha_madde(tmp, A->values[ai], x[col]); } } alpha_mule(tmp, alpha); alpha_mule(y[i], beta); alpha_adde(y[i], tmp); } return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_Number beta, ALPHA_Number *y) { return trmv_x_csr_n_hi_omp(alpha, A, x, beta, y); }
DRB101-task-value-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Cover an implicitly determined rule: In a task generating construct, * a variable without applicable rules is firstprivate. * */ #include <stdio.h> #define MYLEN 100 int a[MYLEN]; int b[MYLEN]; void gen_task(int i) { a[i]= i+1; } int main() { int i=0; #pragma omp parallel #pragma omp for private(i) for (i=0; i<MYLEN; i++) { gen_task(i); } /* checking control flow */ #pragma omp parallel for private(i) for (i=0; i<MYLEN; i++) { //assert (a[i]==i+1); if (a[i]!= i+1) { b[i] = a[i]; } } for (i=0; i<MYLEN; i++) { printf("%d %d\n", a[i], b[i]); } return 0; }
decoder.c
/*! @file * @brief * * @version 1.0.0 * * (C) Copyright 2017 GoPro Inc (http://gopro.com/). * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "config.h" #include "timing.h" #if WARPSTUFF #include "WarpLib.h" #endif //#include <stdlib.h> #include <stddef.h> #include <math.h> #include <memory.h> #include <time.h> //#include <stdint.h> #ifndef DEBUG #define DEBUG (1 && _DEBUG) #endif #ifndef TIMING #define TIMING (1 && _TIMING) #endif #ifndef XMMOPT #define XMMOPT (1 && _XMMOPT) #endif #define GEN_LICENSE 0 #ifndef PI #define PI 3.14159265359f #endif #ifdef _WIN32 #include <windows.h> #elif __APPLE__ #include "macdefs.h" #else #ifndef ZeroMemory #define ZeroMemory(p,s) memset(p,0,s) #endif #endif #include <stdio.h> #include <assert.h> #include <emmintrin.h> // Intel aligned alloc and free #include "dump.h" #include "decoder.h" #include "codec.h" #include "vlc.h" #include "codebooks.h" // References to the codebooks #include "debug.h" #include "color.h" // Color formats supported by image processing routines #include "image.h" #include "filter.h" #include "spatial.h" #include "temporal.h" //#include "logo40x5.h" #include "convert.h" #include "wavelet.h" #include "bitstream.h" #include "frame.h" #include "cpuid.h" #include "bayer.h" #include "metadata.h" #include "DemoasicFrames.h" //TODO: Change filename to lower case #include "swap.h" #include "draw.h" #include "RGB2YUV.h" #include "lutpath.h" #include "exception.h" extern void FastVignetteInplaceWP13(DECODER *decoder, int displayWidth, int width, int height, int y, float r1, float r2, float gain, int16_t *sptr, int resolution, int pixelsize); extern void FastSharpeningBlurHinplaceWP13(int width, int16_t *sptr, float sharpness, int resolution, int pixelsize); extern void FastSharpeningBlurVWP13(short *Aptr, short *Bptr, short *Cptr, short *Dptr, short *Eptr, int pitch, int edgenear, short *output, int pixels, float sharpness, int resolution, int channel_blend_type); extern void FastSharpeningBlurVW13A(short *Aptr, short *Bptr, short *Cptr, short *Dptr, short *Eptr, int pitch, int edgenear, short *output, int pixels, float sharpness, int resolution, int channel_blend_type); #ifdef SPI_LOADER #include "spi.h" #include "keyframes.h" #endif #ifndef DUMP #define DUMP (0 && _DUMP) #endif #define ERROR_TOLERANT 1 #if defined(_WIN32) && DEBUG #include <tchar.h> // For printing debug string in the console window #endif #define _DECODE_TRANSFORM 1 // Enable concurrent decoding and inverse transform #define _TRANSFORM_FIELDPLUS 1 // Use the field plus transform #if _SIF // In SIF resolution, enable the _DECODE_TRANSFORM switch #if _DECODE_TRANSFORM == 0 #define _DECODE_TRANSFORM 1 #endif #endif #ifndef _FSMBUFFER #define _FSMBUFFER 0 #endif // Turn off saturation in this file #ifdef SATURATE #undef SATURATE #endif //#define SATURATE(x) (assert(PIXEL_MIN <= (x) && (x) <= PIXEL_MAX), (x)) //#define SATURATE8S(x) (assert(PIXEL8S_MIN <= (x) && (x) <= PIXEL8S_MAX), (x)) #define SATURATE8S(x) SATURATE_8S(x) #define SATURATE(x) (x) // Enable or disable function inlining #if 1 //DEBUG #define inline #else #define inline __forceinline #endif // Pixel size used for computing the compression ratio #define BITS_PER_PIXEL 8 // Default processor capabilities #define DEFAULT_FEATURES (_CPU_FEATURE_MMX ) #define DEMOSAIC_DELAYLINES 4 // Forward references void AllocDecoderGroup(DECODER *decoder); bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format); void EraseDecoderFrames(DECODER *decoder); TRANSFORM *AllocGroupTransform(GROUP *group, int channel); void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format); #if _DEBUG bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile); #else bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch); #endif bool DecodeBandFSM16sNoGapHighByte(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant); bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant); void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels, uint8_t *output_buffer, int32_t output_pitch, FRAME_INFO *info, int chroma_offset, int precision); extern void Row16uQuarter2OutputFormat(DECODER *decoder, FRAME_INFO *info, int thread_index, uint8_t *output, int pitch, int frame, void *scratch, size_t scratch_size, int threading, uint8_t *channeldata[TRANSFORM_MAX_CHANNELS], // used in quarter res decodes int channelpitch[TRANSFORM_MAX_CHANNELS]); // used in quarter res decodes); //extern void ComputeCube(DECODER *decoder); extern bool NeedCube(DECODER *decoder); extern void LoadTweak(); //extern int g_topdown; //extern int g_bottomup; // Performance measurements #if _TIMING extern TIMER tk_decompress; // Timers extern TIMER tk_decoding; extern TIMER tk_convert; extern TIMER tk_inverse; extern COUNTER decode_byte_count; // Counters extern COUNTER sample_byte_count; extern COUNTER alloc_group_count; extern COUNTER alloc_transform_count; extern COUNTER alloc_buffer_count; extern COUNTER spatial_decoding_count; extern COUNTER temporal_decoding_count; extern COUNTER progressive_decode_count; #endif #if 0 // Table that maps from decoded format to pixel size static const int PixelSize[] = { 0, // DECODED_FORMAT_UNSUPPORTED 2, // DECODED_FORMAT_YUYV 2, // DECODED_FORMAT_UYVY 2, // DECODED_FORMAT_420 4, // DECODED_FORMAT_RGB32 3, // DECODED_FORMAT_RGB24 2, // DECODED_FORMAT_RGB555 2, // DECODED_FORMAT_RGB565 #if 0 2, // DECODED_FORMAT_YUYV_INVERTED 2, // DECODED_FORMAT_UYVY_INVERTED 2, // DECODED_FORMAT_420_INVERTED #endif 4, // DECODED_FORMAT_RGB32_INVERTED 3, // DECODED_FORMAT_RGB24_INVERTED 2, // DECODED_FORMAT_RGB555_INVERTED 2, // DECODED_FORMAT_RGB565_INVERTED 3, // DECODED_FORMAT_V210, 4, // DECODED_FORMAT_YU64, // Custom 16 bits per channel (all data scaled up) YUYV format. 4, // DECODED_FORMAT_YR16 // Rows of YUV with 16 bits per channel }; #if _DEBUG char *decoded_format_string[] = { "Unsupported", "YUYV", "UYUV", "420", "RGB32", "RGB24", "RGB555", "RGB565", #if 0 "YUYV Inverted", "UYVY Inverted", "420 Inverted", #endif //#if BUILD_PROSPECT "RGB32 Inverted", "RGB24 Inverted", "RGB555 Inverted", "RGB565 Inverted", "V210" //#endif }; #endif #else static const int pixel_size_table[] = { 0, // COLOR_FORMAT_UNKNOWN 2, // COLOR_FORMAT_UYVY 2, // COLOR_FORMAT_YUYV 2, // COLOR_FORMAT_YVYU 0, // COLOR_FORMAT_YV12 0, // COLOR_FORMAT_I420 2, // COLOR_FORMAT_RGB16 3, // COLOR_FORMAT_RGB24 4, // COLOR_FORMAT_RGB32 0, 3, // COLOR_FORMAT_V210 0, // COLOR_FORMAT_RGB10 4, // COLOR_FORMAT_YU64 4, // COLOR_FORMAT_YR16 4, // COLOR_FORMAT_YUVA }; static const int pixel_size_table_length = sizeof(pixel_size_table)/sizeof(pixel_size_table[0]); static int PixelSize(int format) { int pixel_size = 0; // Mask off the other fields in the format descriptor // Use the lookup table to determine the pixel size (if possible) if (0 <= format && format < pixel_size_table_length) { pixel_size = pixel_size_table[format]; //return pixel_size; } //TODO: Change the rest of this routine into one big switch statement // Is this an Avid format? else if (COLOR_FORMAT_AVID <= format && format <= COLOR_FORMAT_AVID_END) { switch (format) { case COLOR_FORMAT_CbYCrY_8bit: case COLOR_FORMAT_CbYCrY_10bit_2_8: // Only valid for the lower plane pixel_size = 1; break; case COLOR_FORMAT_CbYCrY_16bit: case COLOR_FORMAT_CbYCrY_16bit_2_14: case COLOR_FORMAT_CbYCrY_16bit_10_6: pixel_size = 2; break; default: assert(0); pixel_size = 2; // Assume 16 bits per pixel if the format is unknown break; } } // Is this a Bayer format? else if (COLOR_FORMAT_BAYER <= format && format <= COLOR_FORMAT_BAYER_END) { pixel_size = (format - 100); if(pixel_size > 2) pixel_size = 2; } else if (format == COLOR_FORMAT_RG48) pixel_size = 6; else if (format == COLOR_FORMAT_RG64) pixel_size = 8; else if (format == COLOR_FORMAT_B64A) { pixel_size = 8; } return pixel_size; } #endif int DecodedPixelSize(DECODED_FORMAT format) { int pixel_size = 0; // Compute the pixel size switch (format) { case DECODED_FORMAT_YUYV: pixel_size = 2; break; case DECODED_FORMAT_RGB32: pixel_size = 4; break; case DECODED_FORMAT_RG48: pixel_size = 6; break; case DECODED_FORMAT_CT_UCHAR: pixel_size = 2; break; case DECODED_FORMAT_CT_SHORT: case DECODED_FORMAT_CT_SHORT_2_14: case DECODED_FORMAT_CT_USHORT_10_6: pixel_size = 4; break; case DECODED_FORMAT_CT_10Bit_2_8: case DECODED_FORMAT_V210: // This routine should not be called to compute the pixel sizes for these formats assert(0); return 0; break; case DECODED_FORMAT_ROW16U: pixel_size = 4; break; default: assert(0); return 0; break; } return pixel_size; } #if 0 // Convert FOURCC code to a string static void str4cc(char *string, uint32_t marker) { char *p = (char *)&marker + 3; char *s = string; int i; for (i = 0; i < 4; i++) *(s++) = *(p--); *s = '\0'; } #endif void GetDisplayAspectRatio(DECODER *decoder, int *w, int *h) { int origw,origh, guess = 0; origw = decoder->frame.width; origh = decoder->frame.height; switch(decoder->frame.resolution) { case DECODED_RESOLUTION_FULL: break; case DECODED_RESOLUTION_HALF: origw *= 2; origh *= 2; break; case DECODED_RESOLUTION_QUARTER: origw *= 4; origh *= 4; break; case DECODED_RESOLUTION_LOWPASS_ONLY: origw *= 8; origh *= 8; break; case DECODED_RESOLUTION_FULL_DEBAYER: break; case DECODED_RESOLUTION_HALF_NODEBAYER: origw *= 2; origh *= 2; break; case DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED: origw *= 4; origh *= 4; break; case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: //origw *= 2; //DAN20110129 -- seems the width has been corrected elsewhere or was never halved. break; case DECODED_RESOLUTION_HALF_HORIZONTAL: origw *= 2; break; case DECODED_RESOLUTION_HALF_VERTICAL: origh *= 2; break; } if(decoder->codec.picture_aspect_x <= 0 || decoder->codec.picture_aspect_y <= 0) guess = 1; // if guess default values, we can't trust them if(decoder->codec.picture_aspect_x == 16 && decoder->codec.picture_aspect_y == 9) guess = 1; if(decoder->pixel_aspect_x && decoder->pixel_aspect_y) { int j,den,num; decoder->codec.picture_aspect_x = num = (origw * decoder->pixel_aspect_x) / decoder->pixel_aspect_y; decoder->codec.picture_aspect_y = den = origh; for(j=2; j<num+den; j++) { while(num == (num/j)*j && den == (den/j)*j) { num /= j; den /= j; } } decoder->codec.picture_aspect_x = num; decoder->codec.picture_aspect_y = den; guess = 0; } if(guess) { if(origw > 720) //HD. { if(origh == 1080) { if(origw == 2048) *w=origw,*h=origh; else *w=16,*h=9; // assume 16x9 } else if(origh == 720) { *w=16,*h=9; // assume 16x9 } else { *w=origw,*h=origh; // assume square pixel. } } else { if(origh == 720) { *w=16,*h=9; // assume 16x9 } else { *w=origw,*h=origh; // assume square pixel. } } } else { *w=decoder->codec.picture_aspect_x; *h=decoder->codec.picture_aspect_y; } } bool IsValidFrameResolution(int resolution) { switch (resolution) { case DECODED_RESOLUTION_FULL: case DECODED_RESOLUTION_HALF: case DECODED_RESOLUTION_QUARTER: case DECODED_RESOLUTION_LOWPASS_ONLY: case DECODED_RESOLUTION_HALF_HORIZONTAL: case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: return true; default: return false; } } // Return true if this decoder can decode to quarter resolution bool IsQuarterResolutionEnabled(DECODER *decoder) { return true; } size_t DecoderSize() { return sizeof(DECODER); } void InitDecoder(DECODER *decoder, FILE *logfile, CODESET *cs) { #if (0 && DEBUG) if (logfile) { fprintf(logfile, "InitDecoder, decoder: 0x%p\n", decoder); } #endif { //TODO: Clear the decoder before setting the CPU limit and affinity int i; //int thread_limit=0, thread_affinity=0, set_thread_params=0, capabilities=0; //save key params Thread_cntrl saved_params = decoder->thread_cntrl; // Clear everything memset(decoder, 0, sizeof(DECODER)); //restore key params if(saved_params.set_thread_params == 1) // used by the DShow Interface { decoder->thread_cntrl = saved_params; } #if _TIMING InitTiming(); #endif // Set the file for status information during decoding decoder->logfile = logfile; // Initialize the decoding error to no error decoder->error = CODEC_ERROR_OKAY; // Most recent marker found during decoding decoder->marker = 0; // Count of frames decoded decoder->frame_count = 0; // Set the codebooks that will be used for decoding if (cs != NULL) { // Use the codeset provided in the call for(i=0; i<CODEC_NUM_CODESETS; i++) { // Codebook for decoding highpass coefficients decoder->magsbook[i] = cs[i].magsbook; // Codebook for decoding runs of coefficients decoder->runsbook[i] = cs[i].runsbook; // Lookup table for fast codebook search decoder->fastbook[i] = cs[i].fastbook; } } else { // Use the default codeset decoder->magsbook[0] = cs9.magsbook; decoder->runsbook[0] = cs9.runsbook; decoder->fastbook[0] = cs9.fastbook; } // Initialize the codec state InitCodecState(&decoder->codec); InitScratchBuffer(&decoder->scratch, NULL, 0); #if _DUMP // Initialize the descriptor for controlling debug output decoder->dump.enabled = false; decoder->dump.channel_mask = 0; decoder->dump.wavelet_mask = 0; memset(decoder->dump.directory, 0, sizeof(decoder->dump.directory)); memset(decoder->dump.filename, 0, sizeof(decoder->dump.filename)); #endif } //REDTEST decoder->frm = 0; decoder->run = 1; #if _ALLOCATOR decoder->allocator = NULL; #endif decoder->initialized = 1; //DAN20060912 } void InitDecoderLicense(DECODER *decoder, const unsigned char *licensekey) { if (decoder && licensekey) { const unsigned char unlicensed[16] = {0}; //memset(unlicensed, 0, sizeof(unlicensed)); // Has the license been set? if (memcmp(decoder->licensekey, unlicensed, sizeof(decoder->licensekey)) == 0) { // Copy the license into the decoder memcpy(decoder->licensekey, licensekey, sizeof(decoder->licensekey)); } } } // Free data allocated within the decoder void ClearDecoder(DECODER *decoder) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif // Free the transforms allocated in the decoder int i; if(decoder->initialized == 0) return; // nothing to free //DAN20060912 #if _GRAPHICS DrawClose(decoder); #endif for(i=0; i<=METADATA_PRIORITY_MAX; i++) { if(decoder->DataBases[i]) { #if _ALLOCATOR Free(decoder->allocator, decoder->DataBases[i]); #else MEMORY_FREE(decoder->DataBases[i]); #endif decoder->DataBases[i] = NULL; decoder->DataBasesSize[i] = 0; decoder->DataBasesAllocSize[i] = 0; } } if(decoder->sqrttable) { #if _ALLOCATOR Free(decoder->allocator, decoder->sqrttable); #else MEMORY_FREE(decoder->sqrttable); #endif decoder->sqrttable = NULL; } for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++) { #if _ALLOCATOR FreeTransform(allocator, decoder->transform[i]); #else FreeTransform(decoder->transform[i]); #endif decoder->transform[i] = NULL; } if(decoder->aligned_sample_buffer) { #if _ALLOCATOR FreeAligned(decoder->allocator, decoder->aligned_sample_buffer); #else MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer); #endif decoder->aligned_sample_buffer = NULL; decoder->aligned_sample_buffer_size = 0; } if(decoder->tools) { #if _ALLOCATOR Free(decoder->allocator, decoder->tools); #else MEMORY_FREE(decoder->tools); #endif decoder->tools = NULL; } // Free the buffer allocated for decoding if (decoder->buffer != NULL) { #if DEBUG_BUFFER_USAGE int i; char *ptr = (char *)decoder->buffer; FILE *fp = fopen("C:/free.txt", "a"); fprintf(fp, "decoder->buffer = %08x buffer_size = %d\n", decoder->buffer ,decoder->buffer_size); i = decoder->buffer_size-1; while(ptr[i] == 1) i--; fprintf(fp, "used %2.3f percent\n", 100.0*(float)i/(float)decoder->buffer_size); fclose(fp); #endif #if _ALLOCATOR FreeAligned(allocator, decoder->buffer); #else MEMORY_ALIGNED_FREE(decoder->buffer); #endif decoder->buffer = NULL; decoder->buffer_size = 0; // Clear the fields in the scratch buffer descriptor memset(&decoder->scratch, 0, sizeof(SCRATCH)); // Eventually the buffer and buffer size fields will be obsolete } for(i=0;i<_MAX_CPUS;i++) { if(decoder->threads_buffer[i]) { #if _ALLOCATOR FreeAligned(decoder->allocator, decoder->threads_buffer[i]); #else MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]); #endif decoder->threads_buffer[i] = NULL; } } decoder->threads_buffer_size = 0; // Do not attempt to free the codebooks since the // codebook pointers are references to static tables // Can free some of the data structures allocated by the decoder FreeCodebooks(decoder); #if _INTERLACED_WORKER_THREADS if(decoder->interlaced_worker.lock_init) // threads started { int i; // Signal this thread to stop SetEvent(decoder->interlaced_worker.stop_event); // Free all handles used by the worker threads for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { WaitForSingleObject(decoder->interlaced_worker.handle[i], INFINITE); //JY20080307 CloseHandle(decoder->interlaced_worker.handle[i]); CloseHandle(decoder->interlaced_worker.start_event[i]); CloseHandle(decoder->interlaced_worker.done_event[i]); } CloseHandle(decoder->interlaced_worker.row_semaphore); CloseHandle(decoder->interlaced_worker.stop_event); for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { decoder->interlaced_worker.handle[i] = 0; decoder->interlaced_worker.start_event[i] = 0; decoder->interlaced_worker.done_event[i] = 0; } decoder->interlaced_worker.row_semaphore = 0; decoder->interlaced_worker.stop_event = 0; } // Free the critical section used by the worker threads DeleteCriticalSection(&decoder->interlaced_worker.lock); decoder->interlaced_worker.lock_init = 0; #endif #if _THREADED if(decoder->entropy_worker_new.pool.thread_count) { ThreadPoolDelete(&decoder->entropy_worker_new.pool); DeleteLock(&decoder->entropy_worker_new.lock); } if(decoder->worker_thread.pool.thread_count) { ThreadPoolDelete(&decoder->worker_thread.pool); DeleteLock(&decoder->worker_thread.lock); } if(decoder->draw_thread.pool.thread_count) { ThreadPoolDelete(&decoder->draw_thread.pool); DeleteLock(&decoder->draw_thread.lock); } /* if(decoder->qt_convert_worker.pool.thread_count) { ThreadPoolDelete(&decoder->qt_convert_worker.pool); DeleteLock(&decoder->qt_convert_worker.lock); } if(decoder->qt_scale_worker.pool.thread_count) { ThreadPoolDelete(&decoder->qt_scale_worker.pool); DeleteLock(&decoder->qt_scale_worker.lock); } */ if(decoder->parallelDecoder) { if(decoder->parallelDecoder->decoder_thread.pool.thread_count) { ThreadPoolDelete(&decoder->parallelDecoder->decoder_thread.pool); DeleteLock(&decoder->parallelDecoder->decoder_thread.lock); decoder->parallelDecoder->decoder_thread.pool.thread_count = 0; } ClearDecoder(decoder->parallelDecoder); #if _ALLOCATOR Free(decoder->allocator, decoder->parallelDecoder); #else MEMORY_FREE(decoder->parallelDecoder); #endif decoder->parallelDecoder = NULL; } #endif //MEMORY_ALIGNED_FREE(RawBayer16); #if _ALLOCATOR if(decoder->RGBFilterBuffer16) { FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = 0; decoder->RGBFilterBufferSize = 0; } if(decoder->RawBayer16) { FreeAligned(decoder->allocator, decoder->RawBayer16); decoder->RawBayer16 = 0; decoder->RawBayerSize = 0; } if(decoder->StereoBuffer) { FreeAligned(decoder->allocator, decoder->StereoBuffer); decoder->StereoBuffer = 0; decoder->StereoBufferSize = 0; } if(decoder->RawCube) { FreeAligned(decoder->allocator, decoder->RawCube); decoder->RawCube = 0; } if(decoder->Curve2Linear) { FreeAligned(decoder->allocator, decoder->Curve2Linear); decoder->Curve2Linear = 0; } if(decoder->Linear2CurveRed) { FreeAligned(decoder->allocator, decoder->Linear2CurveRed); decoder->Linear2CurveRed = NULL; } if(decoder->Linear2CurveGrn) { FreeAligned(decoder->allocator, decoder->Linear2CurveGrn); decoder->Linear2CurveGrn = NULL; } if(decoder->Linear2CurveBlu) { FreeAligned(decoder->allocator, decoder->Linear2CurveBlu); decoder->Linear2CurveBlu = NULL; } if(decoder->BYR4LinearRestore) { FreeAligned(decoder->allocator, decoder->BYR4LinearRestore); decoder->BYR4LinearRestore = NULL; } if(decoder->GammaContrastRed) { FreeAligned(decoder->allocator, decoder->GammaContrastRed); decoder->GammaContrastRed = NULL; } if(decoder->GammaContrastGrn) { FreeAligned(decoder->allocator, decoder->GammaContrastGrn); decoder->GammaContrastGrn = NULL; } if(decoder->GammaContrastBlu) { FreeAligned(decoder->allocator, decoder->GammaContrastBlu); decoder->GammaContrastBlu = NULL; } //3d LUT { if(decoder->LUTcache) Free(decoder->allocator, decoder->LUTcache); decoder->LUTcache = NULL; decoder->LUTcacheCRC = 0; } #if WARPSTUFF { if (decoder->lens_correct_buffer) #if _ALLOCATOR Free(decoder->allocator, decoder->lens_correct_buffer); #else MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer); #endif if (decoder->mesh) geomesh_destroy(decoder->mesh); decoder->lastLensOffsetX = 0; decoder->lastLensOffsetY = 0; decoder->lastLensOffsetZ = 0; decoder->lastLensOffsetR = 0; decoder->lastLensZoom = 0; decoder->lastLensFishFOV = 0; decoder->lastLensGoPro = 0; decoder->lastLensSphere = 0; decoder->lastLensFill = 0; decoder->lastLensStyleSel = 0; memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC)); memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST)); decoder->mesh = NULL; decoder->lens_correct_buffer = NULL; } #endif if(decoder->overrideData) { Free(decoder->allocator, decoder->overrideData); decoder->overrideData = NULL; decoder->overrideSize = 0; } for(i=0; i<64; i++) { if(decoder->mdc[i]) Free(decoder->allocator, decoder->mdc[i]); decoder->mdc[i] = NULL; decoder->mdc_size[i] = 0; } #else if(decoder->RGBFilterBuffer16) { MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } if(decoder->RawBayer16) { MEMORY_ALIGNED_FREE(decoder->RawBayer16); decoder->RawBayer16 = NULL; } if(decoder->StereoBuffer) { MEMORY_ALIGNED_FREE(decoder->StereoBuffer); decoder->StereoBuffer = NULL; decoder->StereoBufferSize = 0; } if(decoder->RawCube) { MEMORY_ALIGNED_FREE(decoder->RawCube); decoder->RawCube = NULL; } if(decoder->Curve2Linear) { MEMORY_ALIGNED_FREE(decoder->Curve2Linear); decoder->Curve2Linear = NULL; } if(decoder->BYR4LinearRestore) { MEMORY_ALIGNED_FREE(decoder->BYR4LinearRestore); decoder->BYR4LinearRestore = NULL; } if(decoder->Linear2CurveRed) { MEMORY_ALIGNED_FREE(decoder->Linear2CurveRed); decoder->Linear2CurveRed = NULL; } if(decoder->Linear2CurveGrn) { MEMORY_ALIGNED_FREE(decoder->Linear2CurveGrn); decoder->Linear2CurveGrn = NULL; } if(decoder->Linear2CurveBlu) { MEMORY_ALIGNED_FREE(decoder->Linear2CurveBlu); decoder->Linear2CurveBlu = NULL; } if(decoder->GammaContrastRed) { MEMORY_ALIGNED_FREE(decoder->GammaContrastRed); decoder->GammaContrastRed = NULL; } if(decoder->GammaContrastGrn) { MEMORY_ALIGNED_FREE(decoder->GammaContrastGrn); decoder->GammaContrastGrn = NULL; } if(decoder->GammaContrastBlu) { MEMORY_ALIGNED_FREE(decoder->GammaContrastBlu); decoder->GammaContrastBlu = NULL; } //3d LUT { if(decoder->LUTcache) MEMORY_FREE(decoder->LUTcache); decoder->LUTcache = NULL; decoder->LUTcacheCRC = 0; } #if WARPSTUFF { if (decoder->lens_correct_buffer) #if _ALLOCATOR Free(decoder->allocator, decoder->lens_correct_buffer); #else MEMORY_ALIGNED_FREE(decoder->lens_correct_buffer); #endif if (decoder->mesh) geomesh_destroy(mesh); decoder->mesh = NULL; decoder->lens_correct_buffer = NULL; decoder->lastLensOffsetX = 0; decoder->lastLensOffsetY = 0; decoder->lastLensOffsetZ = 0; decoder->lastLensOffsetR = 0; decoder->lastLensZoom = 0; decoder->lastLensFishFOV = 0; decoder->lastLlensGoPro = 0; decoder->lastLlensSphere = 0; decoder->lastLlensFill = 0; decoder->lastLlensStyleSel = 0; memset(decoder->lastLensCustomSRC, 0, sizeof(decoder->lastLensCustomSRC)); memset(decoder->lastLensCustomDST, 0, sizeof(decoder->lastLensCustomDST)); } #endif if(decoder->overrideData) { MEMORY_FREE(decoder->overrideData); decoder->overrideData = NULL; decoder->overrideSize = 0; } for(i=0; i<64; i++) { if(decoder->mdc[i]) MEMORY_FREE(decoder->mdc[i]); decoder->mdc[i] = NULL; decoder->mdc_size[i] = 0; } #endif #ifdef SPI_LOADER SPIReleaseAll(decoder); //KeyframesReleaseAll(decoder); #endif decoder->initialized = 0;// cleared } void ExitDecoder(DECODER *decoder) { // Let the caller keep the logfile open or choose to close it //if (logfile) fclose(logfile); // Free data allocated within the decoder ClearDecoder(decoder); } // Allocate the data structures for decoding a group void AllocDecoderGroup(DECODER *decoder) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels;//DAN07022004 int channel; assert(decoder->codec.num_channels <= TRANSFORM_MAX_CHANNELS); //DAN07022004 for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++)//DAN07022004 { TRANSFORM *transform = decoder->transform[channel]; // Need to allocate a transform data structure? if (transform == NULL) { #if _ALLOCATOR transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM)); #else transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM)); #endif assert(transform != NULL); if (transform == NULL) { decoder->error = CODEC_ERROR_TRANSFORM_MEMORY; return; } memset(transform, 0, sizeof(TRANSFORM)); decoder->transform[channel] = transform; #if _TIMING alloc_transform_count++; #endif } } } // Allocate the buffer used for intermediate results during decoding bool AllocDecoderBuffer(DECODER *decoder, int width, int height, int format) { int cpus; size_t size; size_t row_size; char *buffer; #if 0 // Allocate a buffer large enough for six rows of cache lines size = width * sizeof(PIXEL); size = ALIGN(size, _CACHE_LINE_SIZE); size = 2 * TRANSFORM_MAX_CHANNELS * size; #else // Allocate a buffer large enough for nine rows of cache lines size = width * sizeof(PIXEL) * 4; size = ALIGN(size, _CACHE_LINE_SIZE); size = 3 * TRANSFORM_MAX_CHANNELS * size; #endif switch (format) { case DECODED_FORMAT_V210: case DECODED_FORMAT_YU64: // Increase the buffer size for decoding to the V210 format row_size = 4 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 4 * 2 * row_size; break; case DECODED_FORMAT_YR16: case DECODED_FORMAT_CbYCrY_10bit_2_8: case DECODED_FORMAT_CbYCrY_16bit_2_14: case DECODED_FORMAT_CbYCrY_16bit_10_6: // Increase the buffer size for decoding to the YUV16 format row_size = 4 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 8 * 2 * row_size; break; case DECODED_FORMAT_RG48: case DECODED_FORMAT_WP13: // Increase the buffer size for decoding to the YUV16 format row_size = 6 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 12 * 2 * row_size; break; case DECODED_FORMAT_RG64: // Increase the buffer size for decoding to the YUV16 format row_size = 8 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 16 * 2 * row_size; break; case DECODED_FORMAT_BYR3: // Increase the buffer size for decoding to the YUV16 format row_size = 2 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 4 * 2 * row_size; break; case DECODED_FORMAT_BYR4: // Increase the buffer size for decoding to the YUV16 format row_size = 2 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 4 * 2 * row_size; break; case DECODED_FORMAT_B64A: case DECODED_FORMAT_W13A: // Increase the buffer size for decoding to the B64A format row_size = 8 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 16 * 2 * row_size; break; default: // Increase the buffer size for YUV to RGB conversion row_size = 3 * width * sizeof(PIXEL); row_size = ALIGN(row_size, _CACHE_LINE_SIZE); size += 2 * 2 * row_size; break; } cpus = decoder->thread_cntrl.capabilities >> 16; if(cpus > 4) size *= 4; if(cpus > 16) //DAN20120803 -- 4444 clips size *= 2; // Has a buffer already been allocated? if (decoder->buffer != NULL) { // Is the buffer large enough? if (decoder->buffer_size < size) { // Free the previous buffer #if _ALLOCATOR FreeAligned(decoder->allocator, decoder->buffer); #else MEMORY_ALIGNED_FREE(decoder->buffer); #endif decoder->buffer = NULL; decoder->buffer_size = 0; } else { return true; } } buffer = decoder->buffer; if(buffer == NULL) { // Allocate the decoding buffer #if _ALLOCATOR buffer = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE); #else buffer = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE); #endif if(buffer == NULL) { return false; } } #if DEBUG_BUFFER_USAGE memset(buffer, 1, size); #endif // Save the buffer and its size in the decoder decoder->buffer = buffer; decoder->buffer_size = size; // Initialize the scratch space descriptor InitScratchBuffer(&decoder->scratch, buffer, size); // allocate buffer for each debayer/color formating thread { int i; size = (width+16)*3*2*4*2*4;// sixteen lines if(height*4 > width*3) //square or tall images where running out of scratch space for zooms. size *= 1 + ((height+(width/2))/width); if (decoder->threads_buffer_size < size) { for(i=0;i<_MAX_CPUS;i++) { if(decoder->threads_buffer[i]) { #if _ALLOCATOR FreeAligned(decoder->allocator, decoder->threads_buffer[i]); #else MEMORY_ALIGNED_FREE(decoder->threads_buffer[i]); #endif decoder->threads_buffer[i] = NULL; } } decoder->threads_buffer_size = 0; } for(i=0;i<cpus;i++) { if(decoder->threads_buffer[i] == NULL) { #if _ALLOCATOR decoder->threads_buffer[i] = (char *)AllocAligned(decoder->allocator, size, _CACHE_LINE_SIZE); #else decoder->threads_buffer[i] = (char *)MEMORY_ALIGNED_ALLOC(size, _CACHE_LINE_SIZE); #endif if(decoder->threads_buffer[i] == NULL) { return false; } } } decoder->threads_buffer_size = size; } // Eventually the scratch space descriptor will replace the buffer and buffer_size fields return true; } bool ResizeDecoderBuffer(DECODER *decoder, int width, int height, int format) { // Check that the dimensions are valid assert(width > 0); assert(height > 0); // Just call the allocation routine return AllocDecoderBuffer(decoder, width, height, format); } void ClearTransformFlags(DECODER *decoder) { TRANSFORM **transform_array = decoder->transform; int channel; for (channel = 0; channel < TRANSFORM_MAX_CHANNELS; channel++) { TRANSFORM *transform = transform_array[channel]; int index; if (transform == NULL) break; for (index = 0; index < TRANSFORM_MAX_WAVELETS; index++) { IMAGE *wavelet = transform->wavelet[index]; if (wavelet != NULL) { wavelet->band_valid_flags = 0; wavelet->band_started_flags = 0; } } } } // Initialize the tables for decoding the wavelet transforms void InitWaveletDecoding(DECODER *decoder, int subband_wavelet_index[], int subband_band_index[], int num_subbands) { size_t subband_table_size = num_subbands * sizeof(int); memset(decoder->subband_wavelet_index, 0, sizeof(decoder->subband_wavelet_index)); memcpy(decoder->subband_wavelet_index, subband_wavelet_index, subband_table_size); memset(decoder->subband_band_index, 0, sizeof(decoder->subband_band_index)); memcpy(decoder->subband_band_index, subband_band_index, subband_table_size); } #if 0 static bool IsValidFormat(int format) { bool valid_format = true; //TODO: Change this routine into a switch statement if(format == COLOR_FORMAT_BYR5) return true; // can decode to BYR5 if(format == COLOR_FORMAT_BYR4) return true; // can decode to BYR4 if(format == COLOR_FORMAT_BYR3) return true; // can decode to BYR3 if(format == COLOR_FORMAT_BYR2) return true; // can decode to BYR2 if(format == COLOR_FORMAT_RG48) return true; // can decode to RGB48 if(format == COLOR_FORMAT_RG64) return true; // can decode to RGBA64 if (format == COLOR_FORMAT_B64A) { return true; // Can decode to B64A } if (!(COLOR_FORMAT_UNKNOWN < format && format <= MAX_DECODED_COLOR_FORMAT)) { valid_format = false; } return valid_format; } #endif #if _INTERLACED_WORKER_THREADS void StartInterlaceWorkerThreads(DECODER *decoder) { int i; if(decoder->interlaced_worker.lock_init == 0) { // Create events for starting the worker threads for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { decoder->interlaced_worker.start_event[i] = CreateEvent(NULL, false, false, NULL); } // Create a semaphore to signal the worker threads to process rows decoder->interlaced_worker.row_semaphore = CreateSemaphore(NULL, 0, LONG_MAX, NULL); // Create an event for each worker thread to signal that it has finished for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { decoder->interlaced_worker.done_event[i] = CreateEvent(NULL, false, false, NULL); } // Create an event for forcing the worker threads to terminate decoder->interlaced_worker.stop_event = CreateEvent(NULL, true, false, NULL); // Zero the count of worker threads that are active decoder->interlaced_worker.thread_count = 0; // Initialize the lock for controlling access to the worker thread data InitializeCriticalSection(&decoder->interlaced_worker.lock); decoder->interlaced_worker.lock_init = 1; for (i = 0; i < THREADS_IN_LAST_WAVELET; i++) { decoder->interlaced_worker.id[i] = 0; decoder->interlaced_worker.handle[i] = CreateThread(NULL, 0, InterlacedWorkerThreadProc, decoder, 0, &decoder->interlaced_worker.id[i]); assert(decoder->interlaced_worker.handle[i] != NULL); } } } #endif #if 0 int TestException(int x) { static volatile int y1 = 100; volatile int x1 = x; return y1 / x1; } #endif // Process device driver request to initialize the decoder #if _ALLOCATOR bool DecodeInit(ALLOCATOR *allocator, DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile) #else bool DecodeInit(DECODER *decoder, int width, int height, int format, int resolution, FILE *logfile) #endif { CODESET codesets[CODEC_NUM_CODESETS]; int i; int cpus; //int x = 0; #if CODEC_NUM_CODESETS == 3 memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET)); memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET)); memcpy(&codesets[2], &THIRD_CODESET, sizeof(CODESET)); #elif CODEC_NUM_CODESETS == 2 memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET)); memcpy(&codesets[1], &SECOND_CODESET, sizeof(CODESET)); #else memcpy(&codesets[0], &CURRENT_CODESET, sizeof(CODESET)); #endif #ifdef _WIN32 // Set the handler for system exceptions SetDefaultExceptionHandler(); #endif //TestException(x); // Clear all decoder fields except the logfile and set the codebooks for decoding InitDecoder(decoder, logfile, &codesets[0]); #if _ALLOCATOR decoder->allocator = allocator; #endif if(decoder->thread_cntrl.capabilities == 0) { // Determine the processor capabilities SetDecoderCapabilities(decoder); } cpus = decoder->thread_cntrl.capabilities >> 16; assert(cpus > 0 && cpus <= _MAX_CPUS); // Decode to half resolution? if (resolution == DECODED_RESOLUTION_HALF) { // Reduce the frame size by half in each dimension width = width/2; height = height/2; } else if (resolution == DECODED_RESOLUTION_QUARTER) { // Reduce the frame size by one fourth in each dimension width = width/4; height = height/4; } // Initialize the codebooks #if _ALLOCATOR if (!InitCodebooks(decoder->allocator, codesets)) { //decoder->error = CODEC_ERROR_INIT_CODEBOOKS; // The subroutine has already set the error code return false; } #else if (!InitCodebooks(codesets)) { //decoder->error = CODEC_ERROR_INIT_CODEBOOKS; // The subroutine has already set the error code return false; } #endif // Initize the FSM InitDecoderFSM(decoder, &codesets[0]); // Check the frame dimensions and format //assert(width > 0); //assert(height > 0); // assert(IsValidFormat(format)); #if _THREADED_DECODER // Create a semaphore to signal the transform thread to begin processing // Initialize the transform queue decoder->transform_queue.started = 0; decoder->transform_queue.num_entries = 0; decoder->transform_queue.next_entry = 0; decoder->transform_queue.free_entry = 0; memset(decoder->transform_queue.queue, 0, sizeof(decoder->transform_queue.queue)); #endif #if _INTERLACED_WORKER_THREADS && _DELAY_THREAD_START==0 StartInterlaceWorkerThreads(decoder); #endif #if _THREADED #if !_DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START if(cpus > 1) { int threads = cpus; if(threads > 4) threads = 4; CreateLock(&decoder->entropy_worker_new.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->entropy_worker_new.pool, threads, EntropyWorkerThreadProc, decoder); } // Initialize the lock that controls access to the generic worker thread data CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, cpus, WorkerThreadProc, decoder); #endif #endif // Set the frame dimensions and format SetDecoderFormat(decoder, width, height, format, resolution); // Allocate the data structure for decoding the samples AllocDecoderGroup(decoder); // Note that this code assumes that the samples to decode are groups // as opposed to isolated frames which are not supported in this code // Allocate a buffer for storing intermediate results during decoding if (!AllocDecoderBuffer(decoder, width, height, format)) { return false; } // Should check that the finite state machine tables were initialized assert(decoder->fsm[0].table.flags < 0); // Initialize the finite state machine for this decoder for(i=0; i<CODEC_NUM_CODESETS; i++) { InitFSM(&decoder->fsm[i], codesets[i].fsm_table); #if _COMPANDING // Scale the values in the finite state machine entries for companding ScaleFSM(&decoder->fsm[i].table); #endif } // Indicate that the decoder has been initialized decoder->state = DECODER_STATE_INITIALIZED; #if (1 && DUMP) // Write the wavelet bands as images SetDumpDirectory(CODEC_TYPE(decoder), DUMP_DECODER_DIRECTORY); SetDumpFilename(CODEC_TYPE(decoder), DUMP_DEFAULT_FILENAME); SetDumpChannelMask(CODEC_TYPE(decoder), 1/*ULONG_MAX*/); // SetDumpWaveletMask(CODEC_TYPE(decoder), 7<<4 | 1/*ULONG_MAX*/); SetDumpWaveletMask(CODEC_TYPE(decoder), ULONG_MAX); // Set this flag to enable output decoder->dump.enabled = true; #endif #if _TIMING // Initialize the global timers and counters InitTiming(); #endif //DAN20160203 Fix for a memory leak in InitCookbooks for (i = 0; i < CODEC_NUM_CODESETS; i++) { #if _ALLOCATOR Free(allocator, codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL; Free(allocator, codesets[i].fastbook); codesets[i].fastbook = NULL; Free(allocator, codesets[i].valuebook); codesets[i].valuebook = NULL; #else MEMORY_FREE(codesets[i].codebook_runbook); codesets[i].codebook_runbook = NULL; MEMORY_FREE(codesets[i].fastbook); codesets[i].fastbook = NULL; MEMORY_FREE(codesets[i].valuebook); codesets[i].valuebook = NULL; #endif } // The decoder has been initialized successfully return true; } void DecodeEntropyInit(DECODER *decoder) { int cpus = 1; if(decoder->thread_cntrl.capabilities == 0) { // Determine the processor capabilities SetDecoderCapabilities(decoder); } cpus = decoder->thread_cntrl.capabilities >> 16; if (cpus > (int)decoder->cfhddata.cpu_limit && decoder->cfhddata.cpu_limit) { cpus = decoder->cfhddata.cpu_limit; decoder->thread_cntrl.limit = cpus; decoder->thread_cntrl.set_thread_params = 1; decoder->thread_cntrl.capabilities &= 0xffff; decoder->thread_cntrl.capabilities |= cpus<<16; } assert(cpus > 0 && cpus <= _MAX_CPUS); #if _THREADED #if _DELAY_THREAD_START //start threads now if not _DELAY_THREAD_START if(cpus > 1 && decoder->entropy_worker_new.pool.thread_count == 0) { int threads = cpus; if(threads > 4) threads = 4; CreateLock(&decoder->entropy_worker_new.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->entropy_worker_new.pool, threads, EntropyWorkerThreadProc, decoder); } #endif #endif } bool DecodeOverrides(DECODER *decoder, unsigned char *overrideData, int overrideSize) { if(decoder->overrideData) { #if _ALLOCATOR Free(decoder->allocator, decoder->overrideData); #else MEMORY_FREE(decoder->overrideData); #endif decoder->overrideData = NULL; decoder->overrideSize = 0; } if(overrideSize) { #if _ALLOCATOR decoder->overrideData = Alloc(decoder->allocator, overrideSize); #else decoder->overrideData = MEMORY_ALLOC(overrideSize); #endif if(decoder->overrideData) { memcpy(decoder->overrideData, overrideData, overrideSize); decoder->overrideSize = overrideSize; } } else { int i; for(i=METADATA_PRIORITY_OVERRIDE; i<=METADATA_PRIORITY_MAX; i++) //This was 0 to max but that cause right eye primary corrections(side-by-side) mode to flicker. // This database cleariing was added but I don't know why. { if(decoder->DataBases[i]) { #if _ALLOCATOR Free(decoder->allocator, decoder->DataBases[i]); #else MEMORY_FREE(decoder->DataBases[i]); #endif decoder->DataBases[i] = NULL; decoder->DataBasesSize[i] = 0; decoder->DataBasesAllocSize[i] = 0; } } } return true; } TRANSFORM *AllocGroupTransform(GROUP *group, int channel) { #if _ALLOCATOR //TODO:ALLOC Change this routine to take an allocator as the first argument ALLOCATOR *allocator = NULL; #endif TRANSFORM *transform; // Channel zero is a special case because it may mean // that the group header has not been decoded yet if (channel != 0) { // Make sure that the channel number is in range assert(0 <= channel && channel < group->header.num_channels); if (!(0 <= channel && channel < group->header.num_channels)) return NULL; } transform = group->transform[channel]; // Need to allocate a transform data structure? if (transform == NULL) { #if _ALLOCATOR transform = (TRANSFORM *)Alloc(allocator, sizeof(TRANSFORM)); #else transform = (TRANSFORM *)MEMORY_ALLOC(sizeof(TRANSFORM)); #endif assert(transform != NULL); if (transform == NULL) return NULL; memset(transform, 0, sizeof(TRANSFORM)); group->transform[channel] = transform; #if _TIMING alloc_transform_count++; #endif } return transform; } //extern FILE *logfile; void EraseOutputBuffer(uint8_t *buffer, int width, int height, int32_t pitch, int format) { size_t size = height * pitch; union { uint8_t byte[4]; uint32_t word; } output; switch (format) { case DECODED_FORMAT_YUYV: output.byte[0] = COLOR_LUMA_BLACK; output.byte[1] = COLOR_CHROMA_ZERO; output.byte[2] = COLOR_LUMA_BLACK; output.byte[3] = COLOR_CHROMA_ZERO; break; default: //if (logfile) fprintf(logfile,"**Unknown format: %d\n", format); //assert(0); output.word = 0; break; } memset(buffer, output.word, size); } // Decode the coefficients in a subband bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband); // Decode the coefficients in a lowpass band bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet); // Decode the coefficients in a highpass band bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading); // Decode an empty band bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band); bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height); bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height); // Decode a sample channel header bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input); // Apply the inverse horizontal-temporal transform to reconstruct the output frame void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); #if 0 // Reconstruct the frame to quarter resolution at full frame rate void ReconstructQuarterFrame(DECODER *decoder, int num_channels, uint8_t *frame1, uint8_t *frame2, int output_pitch, FRAME_INFO *info, char *buffer, size_t buffer_size); #else // Reconstruct the frame to quarter resolution at full frame rate void ReconstructQuarterFrame(DECODER *decoder, int num_channels, int frame_index, uint8_t *output, int output_pitch, FRAME_INFO *info, const SCRATCH *scratch, int precision); #endif // Copy the quarter resolution lowpass channels from the spatial transform void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *info, int precision); // Convert the quarter resolution lowpass channels to the specified output format void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *info, int precision); // Routines for converting the new encoded formats to the requested output format CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameRGBA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameYUVA4444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); // The first Bayer routine calls the other Bayer routines for the decoded resolution CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch); // New code for handling the original YUV 4:2:2 encoded format CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch); // Return true if the rest of the channel does not have to be decoded static bool CanSkipChannel(DECODER *decoder, int resolution) { CODEC_STATE *codec = &decoder->codec; int channel = codec->channel; TRANSFORM *transform = decoder->transform[channel]; int transform_type = transform->type; // Can the rest of the channel be skipped? if (transform_type == TRANSFORM_TYPE_FIELDPLUS) { switch (resolution) { case DECODED_RESOLUTION_HALF: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_HALF) == DECODED_SUBBAND_MASK_HALF); break; case DECODED_RESOLUTION_QUARTER: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) return ((codec->decoded_subband_flags & DECODED_SUBBAND_MASK_QUARTER) == DECODED_SUBBAND_MASK_QUARTER); break; case DECODED_RESOLUTION_LOWPASS_ONLY: return (codec->decoded_subband_flags & 1); break; default: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) { if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY) { // If we are requesting a YUV decode we don't need the 4th channel if(codec->channel == 3) { return true; } } } break; } } else { const uint32_t decoded_subband_mask_half = 0x7F; const uint32_t decoded_subband_mask_quarter = 0x0F; //assert(transform_type == TRANSFORM_TYPE_SPATIAL); if (transform_type != TRANSFORM_TYPE_SPATIAL) { decoder->error = CODEC_ERROR_BAD_FRAME; return true; } switch (resolution) { case DECODED_RESOLUTION_HALF: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) return ((codec->decoded_subband_flags & decoded_subband_mask_half) == decoded_subband_mask_half); break; case DECODED_RESOLUTION_QUARTER: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) return ((codec->decoded_subband_flags & decoded_subband_mask_quarter) == decoded_subband_mask_quarter); break; case DECODED_RESOLUTION_LOWPASS_ONLY: return (codec->decoded_subband_flags & 1); break; default: if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) { if(decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY) { // If we are requesting a YUV decode we don't need the 4th channel if(codec->channel == 3) { return true; } } } break; } } // Cannot skip the rest of the channel return false; } #if 0 static bool CanSkipSubband(DECODER *decoder, int subband) { // Bitmask indicates which subbands must be decoded for quarter resolution static uint32_t quarter_resolution_mask = 0x008F; // Convert the subband number into a bitmask (could use a lookup table) uint32_t subband_mask = SUBBAND_MASK(subband); // Select the resolution of the fully decoded frames int resolution = decoder->frame.resolution; switch (resolution) { case DECODED_RESOLUTION_QUARTER: //if (4 <= subband && subband <= 6) if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) { if ((subband_mask & quarter_resolution_mask) == 0) { return true; } } break; default: // Assume that the subband must be decoded break; } return false; } #endif // Return true if the wavelet exists and all bands are valid static bool AllBandsValid(IMAGE *wavelet) { return (wavelet != NULL && BANDS_ALL_VALID(wavelet)); } #if DEBUG || 1 static bool AllTransformBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index) { int channel; if (!(1 <= num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) { //assert(0); return false; } if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) { //assert(0); return false; } for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform_array[channel]->wavelet[frame_index]; //if (!AllBandsValid(wavelet)) if(wavelet == NULL) { return false; } } // All wavelet bands in all channels are valid return true; } static bool AllLowpassBandsValid(TRANSFORM *transform_array[], int num_channels, int frame_index) { int channel; if (!(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS)) { return false; } if (!(0 <= frame_index && frame_index < TRANSFORM_MAX_FRAMES)) { return false; } for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform_array[channel]->wavelet[frame_index]; if (!(wavelet != NULL && wavelet->band_valid_flags & BAND_VALID_MASK(0))) { return false; } } // All lowpass bands in all channels are valid return true; } #endif static bool ComputeFrameDimensionsFromFirstWavelet(int transform_type, int first_wavelet_width, int first_wavelet_height, int *frame_width_out, int *frame_height_out) { int frame_width; int frame_height; int expansion = 8; switch (transform_type) { case TRANSFORM_TYPE_SPATIAL: frame_width = first_wavelet_width * expansion; frame_height = first_wavelet_height * expansion; break; case TRANSFORM_TYPE_FIELDPLUS: frame_width = first_wavelet_width * expansion; frame_height = first_wavelet_height * expansion; break; default: //assert(0); return false; } // Return the frame dimensions *frame_width_out = frame_width; *frame_height_out = frame_height; return true; } // Decode the sample header to determine the type of sample and other parameters bool ParseSampleHeader(BITSTREAM *input, SAMPLE_HEADER *header) { TAGVALUE segment; int sample_type; int sample_size = 0; // Group index uint32_t channel_size[TRANSFORM_MAX_CHANNELS]; // Number of channels in the group index int channel_count = 0; // Values used for computing the frame width and height (if necessary) int transform_type = -1; int first_wavelet_width = 0; int first_wavelet_height = 0; int display_height = 0; int current_channel = 0; int currentVideoChannel = header->videoChannels; int find_lowpass_bands = header->find_lowpass_bands & 1; int find_uncompressed = header->find_lowpass_bands & 2 ? 1 : 0; int find_header_info_only = header->find_lowpass_bands & 4 ? 1 : 0; if (header == NULL) { return false; } if(currentVideoChannel == 0) currentVideoChannel = 1; // Clear the entire sample header to prevent early return from this routine memset(header, 0, sizeof(SAMPLE_HEADER)); // Clear the error code header->error = CODEC_ERROR_OKAY; // Initialize the frame dimensions to unknown header->width = 0; header->height = 0; header->videoChannels = 1; // Initialize the original pixel format to unknown header->input_format = COLOR_FORMAT_UNKNOWN; // Initialize the encoded format to unknown header->encoded_format = ENCODED_FORMAT_UNKNOWN; // Clear the frame number in case it is not present in the sample header->frame_number = 0; // The video is not progressive if the sample flags are not present header->hdr_progressive = false; #if _BITSTREAM_UNALIGNED // Record the alignment of the bitstream within the sample SetBitstreamAlignment(input, 0); #endif sample_size = input->nWordsUsed; // Get the type of sample (should be the first tag value pair) segment = GetTagValue(input); //assert(segment.tuple.tag == CODEC_TAG_SAMPLE); if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) { header->error = CodecErrorBitstream(input); return false; } sample_type = segment.tuple.value; switch (sample_type) { case SAMPLE_TYPE_GROUP: // Group of frames header->key_frame = true; header->difference_frame = false; header->droppable_frame = false; break; case SAMPLE_TYPE_FRAME: // The second or later frame in a group header->key_frame = false; header->difference_frame = true; header->droppable_frame = true; break; case SAMPLE_TYPE_IFRAME: // One frame in the group header->key_frame = true; header->difference_frame = false; header->droppable_frame = true; break; case SAMPLE_TYPE_SEQUENCE_HEADER: // Treat the video sequence header like a keyframe that can be dropped header->key_frame = true; header->difference_frame = false; header->droppable_frame = true; break; default: // Unknown type of sample header->error = CODEC_ERROR_SAMPLE_TYPE; return false; break; } // Continue parsing the sample header until all of the information has been found while ( (find_lowpass_bands == 1 && current_channel < 3) || //parse all (find_uncompressed == 1 && current_channel < 1) || display_height == 0 || header->width == 0 || header->height == 0 || header->input_format == COLOR_FORMAT_UNKNOWN || header->frame_number == 0 || (header->interlaced_flags == 0 && header->hdr_progressive == 0)) { int chunksize = 0; // Get the next tag value pair from the bitstream segment = GetSegment(input); // Did the bitstream end before the last tag was found? if (input->error == BITSTREAM_ERROR_UNDERFLOW) { break; } // Did an error occur while reading the bitstream? if (input->error != BITSTREAM_ERROR_OKAY) { header->error = CodecErrorBitstream(input); return false; } // Is this an optional tag? if (segment.tuple.tag < 0) { segment.tuple.tag = NEG(segment.tuple.tag); } if(segment.tuple.tag & 0x2000) { chunksize = segment.tuple.value; chunksize &= 0xffff; chunksize += ((segment.tuple.tag&0xff)<<16); } else if(segment.tuple.tag & 0x4000) { chunksize = segment.tuple.value; chunksize &= 0xffff; } // else if(tag == CODEC_TAG_INDEX) // handled below // { // chunksize = value; // chunksize &= 0xffff; // } else { chunksize = 0; } if((int)(segment.tuple.tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || segment.tuple.tag & 0x6000) { int skip = 1; if((segment.tuple.tag & 0xff00) == 0x2200) //sample size { if(sample_size < chunksize*4) find_header_info_only = 1; skip = find_header_info_only; if(currentVideoChannel <= 1 && header->videoChannels == 2 && !find_header_info_only) { BITSTREAM input2; SAMPLE_HEADER header2; BITWORD *eye2 = (BITWORD *)(input->lpCurrentWord + chunksize*4); int eye_offset = sample_size - input->nWordsUsed + chunksize*4; //approx int eye_sample_size = input->nWordsUsed - eye_offset; // Search for first sample of the next frame while((eye2[1] != (uint8_t)CODEC_TAG_SAMPLE || eye2[0] != 0 || eye2[2] != 0) && eye_sample_size > 0) { eye2 += 4; chunksize ++; eye_offset += 4; eye_sample_size -= 4; } // Save the offset to the right stereo sample header->left_sample_size = eye_offset; { InitBitstreamBuffer(&input2, eye2, eye_sample_size, BITSTREAM_ACCESS_READ); memset(&header2, 0, sizeof(SAMPLE_HEADER)); header2.find_lowpass_bands = 1; currentVideoChannel++; header2.videoChannels = currentVideoChannel; if(ParseSampleHeader(&input2, &header2)) { int i; for(i=0;i<4;i++) { if(header2.thumbnail_channel_offsets[i]) header->thumbnail_channel_offsets_2nd_Eye[i] = eye_offset + header2.thumbnail_channel_offsets[i]; } } } } } if((segment.tuple.tag & 0xff00) == 0x2300) //uncompressed sample size { header->hdr_uncompressed = 1; skip = 1; if(find_lowpass_bands != 1) break; } if((segment.tuple.tag & 0xff00) == 0x2100) //level { if(find_lowpass_bands == 1) { skip = 0; } else { skip = 1; // no header data after the fix level break; } } if(chunksize) { if(skip) { input->lpCurrentWord += chunksize*4; input->nWordsUsed -= chunksize*4; } } else { TAGWORD value = segment.tuple.value; switch (segment.tuple.tag) { case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP. header->encoder_version = (((value>>12) & 0xf)<<16) | (((value>>8) & 0xf)<<8) | ((value) & 0xff); break; case CODEC_TAG_INDEX: // Get the number of channels in the index to skip channel_count = value; if (channel_count <= TRANSFORM_MAX_CHANNELS) DecodeGroupIndex(input, (uint32_t*)&channel_size[0], channel_count); else { header->width = header->height = 0; return false; } break; case CODEC_TAG_FRAME_WIDTH: // Record the frame width in the sample header if (value > 0 && value <= 32768) header->width = value; else { header->width = header->height = 0; return false; } break; case CODEC_TAG_FRAME_HEIGHT: // Record the frame height in the sample header if (value > 0 && value <= 32768) header->height = value; else { header->width = header->height = 0; return false; } break; case CODEC_TAG_FRAME_DISPLAY_HEIGHT: if (value > 0 && (int)value >= (int)header->height-16 && (int)value <= (int)header->height) display_height = value; else { header->width = header->height = 0; return false; } break; case CODEC_TAG_LOWPASS_WIDTH: // Save the width of the smallest wavelet for computing the frame dimensions if (value > 0 && value < (int)header->width / 4) first_wavelet_width = value; else { header->width = header->height = 0; return false; } break; case CODEC_TAG_LOWPASS_HEIGHT: // Save the height of the smallest wavelet for computing the frame dimensions if (value > 0 && value < (int)header->height / 4) first_wavelet_height = value; else { header->width = header->height = 0; return false; } break; case CODEC_TAG_TRANSFORM_TYPE: // Save the type of transform for computing the frame dimensions (if necessary) if (TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST) transform_type = value; else { header->width = header->height = 0; return false; } break; case CODEC_TAG_INPUT_FORMAT: // Record the original format of the encoded frames header->input_format = (COLOR_FORMAT)value; break; case CODEC_TAG_ENCODED_FORMAT: case CODEC_TAG_OLD_ENCODED_FORMAT: // Record the encoded format (internal representation) header->encoded_format = (ENCODED_FORMAT)value; if(header->encoded_format == ENCODED_FORMAT_RGBA_4444 && channel_count == 3) header->encoded_format = ENCODED_FORMAT_RGB_444; break; case CODEC_TAG_FRAME_NUMBER: // Record the frame number for debugging header->frame_number = value; break; case CODEC_TAG_INTERLACED_FLAGS: // Record the flags that indicate the field type header->interlaced_flags = value; break; case CODEC_TAG_SAMPLE_FLAGS: // The sample flags specify progressive versus interlaced decoding header->hdr_progressive = !!(value & SAMPLE_FLAGS_PROGRESSIVE); if (header->hdr_progressive) { // Clear the interlaced flags header->interlaced_flags = 0; } break; case CODEC_TAG_LOWPASS_SUBBAND: if(value == 0) // low pass band { int count = 8; uint32_t *lptr = (uint32_t *)input->lpCurrentWord; do { uint32_t longword = SwapInt32(lptr[count]); unsigned short t,v; t = (longword>>16) & 0xffff; v = (longword) & 0xffff; if (t == CODEC_TAG_MARKER && IsLowPassBandMarker(v) && current_channel < 4) { header->thumbnail_channel_offsets[current_channel] = (sample_size - input->nWordsUsed) + count*4 + 4; break; } count++; } while(count < 32); current_channel++; } break; case CODEC_TAG_ENCODED_CHANNELS: if(header->videoChannels == 1) { header->videoChannels = value; if(header->videoChannels < 1) header->videoChannels = 1; if (header->videoChannels > 2) { header->width = header->height = 0; return false; } } break; case CODEC_TAG_QUALITY_L: // header->encode_quality &= 0xffff0000; header->encode_quality |= value; break; case CODEC_TAG_QUALITY_H: // header->encode_quality &= 0xffff; header->encode_quality |= value<<16; break; } // Have the encoded frame dimensions been computed? if (header->width == 0 || header->height == 0) { // Found the first wavelet in the bitstream? if (transform_type >= 0 && first_wavelet_width > 0 && first_wavelet_height > 0) { // The group header did not contain tags for the frame dimensions // prior to the release of support for RGB 4:4:4, so must attempt to // compute the frame dimensions from the dimensions of the lowpass band. int frame_width = 0; int frame_height = 0; // Use the dimensions of the first wavelet to compute the frame width and height if (!ComputeFrameDimensionsFromFirstWavelet(transform_type, first_wavelet_width, first_wavelet_height, &frame_width, &frame_height)) { // Could not compute the frame dimensions header->error = CODEC_ERROR_FRAME_DIMENSIONS; return false; } // Save the frame dimensions in the sample header header->width = frame_width; header->height = frame_height; // No more header information after finding the lowpass band break; } } if(find_lowpass_bands != 1 && find_uncompressed != 1) { // No more header information after the first encoded band if (segment.tuple.tag == CODEC_TAG_BAND_NUMBER) { // Stop looking for header information break; } // No more header information after the frame index if (segment.tuple.tag == CODEC_TAG_FRAME_INDEX) { // Stop looking for header information break; } // No more header information after the lowpass band header if (segment.tuple.tag == CODEC_TAG_PIXEL_DEPTH) { // Stop looking for header information break; } } } } } if (header->width == 0 || header->height == 0) { //assert(0); return false; } // Fill in the encoded format if it was not present in the header if (header->encoded_format == ENCODED_FORMAT_UNKNOWN && channel_count > 0) { header->encoded_format = GetEncodedFormat(header->input_format, header->encode_quality, channel_count); } if (display_height > 0) { header->height = display_height; } if (header->encoded_format == ENCODED_FORMAT_BAYER) { header->width *= 2; header->height *= 2; if(display_height == 0) { if(header->height == 1088) header->height = 1080; } } // Return true if the header was parsed completely and correctly return (header->width > 0 && header->height > 0 && ((sample_type == SAMPLE_TYPE_FRAME) || (header->input_format != COLOR_FORMAT_UNKNOWN && header->encoded_format != ENCODED_FORMAT_UNKNOWN))); // It is not an error if the frame number was not found in the sample header } bool DumpSampleHeader(BITSTREAM *input, FILE *logfile) { TAGVALUE segment; int lowpass_width = 0; int lowpass_height = 0; // Parse the sample header until the lowpass band is found while (lowpass_width == 0 && lowpass_height == 0) { // Get the next tag value pair from the bitstream segment = GetSegment(input); // Did an error occur while reading the bitstream? if (input->error != BITSTREAM_ERROR_OKAY) { return false; } // Is this an optional tag? if (segment.tuple.tag < 0) { segment.tuple.tag = NEG(segment.tuple.tag); } // Check that the tag is valid assert(CODEC_TAG_ZERO < segment.tuple.tag && segment.tuple.tag <= CODEC_TAG_LAST_NON_SIZED); switch (segment.tuple.tag) { case CODEC_TAG_SAMPLE: fprintf(logfile, "Sample type: %d\n", segment.tuple.value); break; case CODEC_TAG_FRAME_WIDTH: fprintf(logfile, "Frame width: %d\n", segment.tuple.value); break; case CODEC_TAG_FRAME_HEIGHT: fprintf(logfile, "Frame height: %d\n", segment.tuple.value); break; case CODEC_TAG_LOWPASS_WIDTH: lowpass_width = segment.tuple.value; fprintf(logfile, "Lowpass width: %d\n", lowpass_width); break; case CODEC_TAG_LOWPASS_HEIGHT: lowpass_height = segment.tuple.value; fprintf(logfile, "Lowpass height: %d\n", lowpass_height); break; case CODEC_TAG_TRANSFORM_TYPE: fprintf(logfile, "Transform type: %d\n", segment.tuple.value); break; case CODEC_TAG_INPUT_FORMAT: fprintf(logfile, "Input format: %d\n", segment.tuple.value); break; case CODEC_TAG_ENCODED_FORMAT: case CODEC_TAG_OLD_ENCODED_FORMAT: fprintf(logfile, "Encoded format: %d\n", segment.tuple.value); break; case CODEC_TAG_FRAME_NUMBER: fprintf(logfile, "Frame number: %d\n", segment.tuple.value); break; } } return true; } int SkipVideoChannel(DECODER *decoder, BITSTREAM *input, int skip_to_channel) // 3D work { TAGWORD tag,value=1; unsigned char *pos = NULL; int readsize = input->nWordsUsed; if(readsize > 4096) // only need to scan the first few tuplets { readsize = 4096; } else { //Tiny therefore P-frame, nothing to be read so: value=decoder->real_channels; // return the last value. return value; } pos = GetTupletAddr(input->lpCurrentBuffer, readsize, CODEC_TAG_ENCODED_CHANNELS, &value); if(pos && value>1 && skip_to_channel>1) { int chunksize = 0; intptr_t offset; int count = 0; do { tag = *pos++<<8; tag |= *pos++; value = *pos++<<8; value |= *pos++; if (tag < 0) { tag = NEG(tag); } } while((tag & 0xff00) != CODEC_TAG_SAMPLE_SIZE && count++ < 10); if((tag & 0xff00) == CODEC_TAG_SAMPLE_SIZE) { chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); offset = ((intptr_t)pos - (intptr_t)input->lpCurrentWord) + chunksize*4; input->lpCurrentWord += offset; input->nWordsUsed -= (int)offset; { uint8_t *tag = (uint8_t *)input->lpCurrentWord; // Search for first sample of the next frame while((tag[1] != (uint8_t)CODEC_TAG_SAMPLE || tag[0] != 0 || tag[2] != 0) && input->nWordsUsed > 0) { input->lpCurrentWord += 4; input->nWordsUsed -= 4; tag += 4; } } } } //if(value == 0) value = 1; // old non-stereo file return value; } #define SUBPIXEL 64 static short gains[SUBPIXEL+1][4] = { {0*128,0*128,0x7fff,0*128}, {0*128,2*128,0x7fff,-2*128}, {0*128,5*128,255*128,-4*128}, {0*128,8*128,254*128,-6*128}, {0*128,11*128,253*128,-8*128}, {0*128,14*128,252*128,-10*128}, {0*128,18*128,250*128,-12*128}, {0*128,21*128,248*128,-13*128}, {-1*128,25*128,247*128,-15*128}, {-1*128,29*128,244*128,-16*128}, {-1*128,33*128,241*128,-17*128}, {-2*128,37*128,239*128,-18*128}, {-2*128,41*128,236*128,-19*128}, {-3*128,46*128,233*128,-20*128}, {-3*128,50*128,229*128,-20*128}, {-4*128,55*128,226*128,-21*128}, {-4*128,60*128,221*128,-21*128}, {-5*128,65*128,217*128,-21*128}, {-5*128,70*128,213*128,-22*128}, {-6*128,75*128,209*128,-22*128}, {-7*128,80*128,205*128,-22*128}, {-7*128,85*128,199*128,-21*128}, {-8*128,91*128,194*128,-21*128}, {-9*128,96*128,190*128,-21*128}, {-10*128,102*128,185*128,-21*128}, {-10*128,107*128,179*128,-20*128}, {-11*128,113*128,174*128,-20*128}, {-12*128,118*128,169*128,-19*128}, {-13*128,124*128,164*128,-19*128}, {-14*128,129*128,159*128,-18*128}, {-14*128,135*128,152*128,-17*128}, {-15*128,141*128,147*128,-17*128}, {-16*128,144*128,144*128,-16*128}, {-17*128,147*128,141*128,-15*128}, {-17*128,152*128,135*128,-14*128}, {-18*128,159*128,129*128,-14*128}, {-19*128,164*128,124*128,-13*128}, {-19*128,169*128,118*128,-12*128}, {-20*128,174*128,113*128,-11*128}, {-20*128,179*128,107*128,-10*128}, {-21*128,185*128,102*128,-10*128}, {-21*128,190*128,96*128,-9*128}, {-21*128,194*128,91*128,-8*128}, {-21*128,199*128,85*128,-7*128}, {-22*128,205*128,80*128,-7*128}, {-22*128,209*128,75*128,-6*128}, {-22*128,213*128,70*128,-5*128}, {-21*128,217*128,65*128,-5*128}, {-21*128,221*128,60*128,-4*128}, {-21*128,226*128,55*128,-4*128}, {-20*128,229*128,50*128,-3*128}, {-20*128,233*128,46*128,-3*128}, {-19*128,236*128,41*128,-2*128}, {-18*128,239*128,37*128,-2*128}, {-17*128,241*128,33*128,-1*128}, {-16*128,244*128,29*128,-1*128}, {-15*128,247*128,25*128,-1*128}, {-13*128,248*128,21*128,0*128}, {-12*128,250*128,18*128,0*128}, {-10*128,252*128,14*128,0*128}, {-8*128,253*128,11*128,0*128}, {-6*128,254*128,8*128,0*128}, {-4*128,255*128,5*128,0*128}, {-2*128,0x7fff,2*128,0*128}, {0*128,0*128,0x7fff,0*128} }; static int lanczos[256] = { 0, -2, -8, -18, -33, -53, -77, -106, -141, -179, -223, -272, -325, -384, -447, -514, -586, -662, -742, -826, -913, -1004, -1097, -1193, -1290, -1389, -1490, -1591, -1692, -1792, -1892, -1990, -2086, -2179, -2269, -2355, -2436, -2511, -2580, -2643, -2697, -2744, -2781, -2809, -2826, -2832, -2826, -2808, -2776, -2730, -2670, -2594, -2503, -2395, -2271, -2129, -1969, -1790, -1593, -1377, -1141, -886, -611, -315, 0, 336, 692, 1069, 1466, 1884, 2321, 2778, 3255, 3750, 4265, 4797, 5347, 5914, 6498, 7097, 7711, 8340, 8982, 9636, 10301, 10977, 11663, 12357, 13058, 13765, 14477, 15192, 15910, 16630, 17349, 18066, 18781, 18871, 19580, 20285, 20986, 21678, 22361, 23035, 23697, 24348, 24983, 25604, 26206, 26790, 27354, 27898, 28419, 28915, 29387, 29832, 30249, 30638, 30997, 31326, 31623, 31886, 32117, 32314, 32476, 32603, 32695, 32749, 32767, //was 32768, issue for SSE2 32749, 32695, 32603, 32476, 32314, 32117, 31886, 31623, 31326, 30997, 30638, 30249, 29832, 29387, 28915, 28419, 27898, 27354, 26790, 26206, 25604, 24983, 24348, 23697, 23035, 22361, 21678, 20986, 20285, 19580, 18871, 18159, 18066, 17349, 16630, 15910, 15192, 14477, 13765, 13058, 12357, 11663, 10977, 10301, 9636, 8982, 8340, 7711, 7097, 6498, 5914, 5347, 4797, 4265, 3750, 3255, 2778, 2321, 1884, 1466, 1069, 692, 336, 0, -315, -611, -886, -1141, -1377, -1593, -1790, -1969, -2129, -2271, -2395, -2503, -2594, -2670, -2730, -2776, -2808, -2826, -2832, -2826, -2809, -2781, -2744, -2697, -2643, -2580, -2511, -2436, -2355, -2269, -2179, -2086, -1990, -1892, -1792, -1692, -1591, -1490, -1389, -1290, -1193, -1097, -1004, -913, -826, -742, -662, -586, -514, -447, -384, -325, -272, -223, -179, -141, -106, -77, -53, -33, -18, -8, -2, }; void RGB48VerticalShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int widthbytes, int height, int pitch, float offset, float zoom) { float yposf,ystepf; int x; //int endofSSEline = 0; unsigned short *scanline[4]; //int spitch = pitch/2; int neg = 0,step; __m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1; __m128i *lineA, *lineB, *lineC, *lineD, *outline128; offset = -offset; yposf = height * offset; yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset); ystepf = 1.0f/zoom; if(yposf < 0.0) neg = 1; if(pitch < 0) yposf -= ystepf; /* yposi = floor(yposf); remainf = yposf - (float)yposi; tablepos = (remainf*(float)SUBPIXEL); yposi = abs(yposi); if(yposi==0 && tablepos == 0) return; // no move required */ // -3 , 0 best small notch at zero? // switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB24: case DECODED_FORMAT_YUYV: step = 16; break; case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: default: step = 32; break; } { static char zeroline[1024] = {0}; int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height)); unsigned char *src = (unsigned char *)RGB48; unsigned char *dst = (unsigned char *)RGB48; unsigned char *ptr = (unsigned char *)buffer; if(yoffset < 0) yoffset = 0; if(yend > height) yend = height; src += pitch * yoffset; for(y=yoffset; y<yend; y++) { memcpy(ptr, src, widthbytes); ptr += widthbytes; src += pitch; } ptr = (unsigned char *)buffer; for(y=0;y<height; y++) { int i,t,yp = ((int)yposf); int rmdr = 63-((int)(yposf*64.0) & 63); int gains[4]; yp -= 1; // use -2 cause a image down shift //DAN20100225 t = 0; for(i=0; i<4; i++) { if(yp<0 || yp>= height) // skip 0 line as the top line was zagged { t += gains[i] = lanczos[rmdr]; scanline[i] = (unsigned short *)zeroline; } else { t += gains[i] = lanczos[rmdr]; scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)]; } yp++; rmdr+=64; } if(t) { __m128i half; gA = _mm_set1_epi16(gains[0]); gB = _mm_set1_epi16(gains[1]); gC = _mm_set1_epi16(gains[2]); gD = _mm_set1_epi16(gains[3]); outline128 = (__m128i *)dst; lineA = (__m128i *)scanline[0]; lineB = (__m128i *)scanline[1]; lineC = (__m128i *)scanline[2]; lineD = (__m128i *)scanline[3]; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_W13A: case DECODED_FORMAT_WP13: for(x=0;x<widthbytes; x+=step) { lA = _mm_loadu_si128(lineA++); lB = _mm_loadu_si128(lineB++); lC = _mm_loadu_si128(lineC++); lD = _mm_loadu_si128(lineD++); o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); _mm_storeu_si128(outline128++, o128); lA = _mm_loadu_si128(lineA++); lB = _mm_loadu_si128(lineB++); lC = _mm_loadu_si128(lineC++); lD = _mm_loadu_si128(lineD++); o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); _mm_storeu_si128(outline128++, o128); } break; case DECODED_FORMAT_RG64: case DECODED_FORMAT_RG48: for(x=0;x<widthbytes; x+=step) { lA = _mm_loadu_si128(lineA++); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_loadu_si128(lineB++); lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_loadu_si128(lineC++); lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_loadu_si128(lineD++); lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); _mm_storeu_si128(outline128++, o128); lA = _mm_loadu_si128(lineA++); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_loadu_si128(lineB++); lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_loadu_si128(lineC++); lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_loadu_si128(lineD++); lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); _mm_storeu_si128(outline128++, o128); } break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB24: case DECODED_FORMAT_YUYV: for(x=0;x<widthbytes; x+=step) { lA = _mm_loadu_si128(lineA); lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128(lineB); lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128(lineC); lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128(lineD); lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); half = o128; lA = _mm_loadu_si128(lineA++); lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128(lineB++); lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128(lineC++); lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128(lineD++); lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); half = _mm_srli_epi16(half,8); o128 = _mm_srli_epi16(o128,8); o128 = _mm_packus_epi16(o128, half); _mm_storeu_si128(outline128++, o128); } break; } } else { if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV) { memset(dst, 0x10801080, widthbytes); } else { memset(dst, 0, widthbytes); } } yposf += ystepf; dst += pitch; } /*ptr = (unsigned char *)buffer; for(y=0;y<height; y++) { int r,g,b,yp = ((int)yposf); yposf += ystepf; if(yp<0 || yp>= height) { memset(dst, 0, widthbytes); } else { memcpy(dst, &ptr[widthbytes*yp], widthbytes); } dst += pitch; }*/ } } void RGB48VerticalShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int widthbytes, int height, int pitch, float offset, float zoom, int xx) { float yposf,ystepf; //int endofSSEline = 0; unsigned short *scanline[4]; //int spitch = pitch/2; int neg = 0,step; __m128i lA,lB,lC,lD,gA,gB,gC,gD,o128,t1; uint8_t *lineAPos, *lineBPos, *lineCPos, *lineDPos; uint8_t *outlinePos8; uint16_t *outlinePos16; offset = -offset; //yposf = height * offset; yposf = (float)height*(0.5f - 1.0f/(2.0f*zoom) - offset); ystepf = 1.0f/zoom; if(yposf < 0.0) neg = 1; if(pitch < 0) yposf -= ystepf; /* yposi = floor(yposf); remainf = yposf - (float)yposi; tablepos = (remainf*(float)SUBPIXEL); yposi = abs(yposi); if(yposi==0 && tablepos == 0) return; // no move required */ // -3 , 0 best small notch at zero? // switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: step = 4; break; case DECODED_FORMAT_RGB24: step = 3; break; case DECODED_FORMAT_YUYV: step = 4; break; case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: step = 8; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: step = 6; break; default: assert(0); break; } { static char zeroline[1024] = {0}; int y,yoffset = ((int)(yposf-2.0)),yend = ((int)(yposf+2.0+ystepf*height)); unsigned char *src = (unsigned char *)RGB48; unsigned char *dst = (unsigned char *)RGB48; unsigned char *ptr = (unsigned char *)buffer; if(yoffset < 0) yoffset = 0; if(yend > height) yend = height; src += pitch * yoffset; for(y=yoffset; y<yend; y++) { memcpy(ptr, src, widthbytes); ptr += widthbytes; src += pitch; } ptr = (unsigned char *)buffer; for(y=0;y<height; y++) { int i,t,yp = ((int)yposf); int rmdr = 63-((int)(yposf*64.0) & 63); int gains[4]; yp -= 1; // use -2 cause a image down shift //DAN20100225 t = 0; for(i=0; i<4; i++) { if(yp<0 || yp>= height) // skip 0 line as the top line was zagged { t += gains[i] = lanczos[rmdr]; scanline[i] = (unsigned short *)zeroline; } else { t += gains[i] = lanczos[rmdr]; scanline[i] = (unsigned short *)&ptr[widthbytes*(yp-yoffset)]; } yp++; rmdr+=64; } if(t) { gA = _mm_set1_epi16(gains[0]); gB = _mm_set1_epi16(gains[1]); gC = _mm_set1_epi16(gains[2]); gD = _mm_set1_epi16(gains[3]); outlinePos8 = (uint8_t *)dst; outlinePos16 = (uint16_t *)dst; lineAPos = (uint8_t *)scanline[0]; lineBPos = (uint8_t *)scanline[1]; lineCPos = (uint8_t *)scanline[2]; lineDPos = (uint8_t *)scanline[3]; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_W13A: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8; lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8; lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8; lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8; o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); //_mm_storeu_si128((__m128i *)outlinePos, o128); outlinePos16[0] = _mm_extract_epi16(o128, 0); outlinePos16[1] = _mm_extract_epi16(o128, 1); outlinePos16[2] = _mm_extract_epi16(o128, 2); outlinePos16[3] = _mm_extract_epi16(o128, 3); outlinePos16+=4; break; case DECODED_FORMAT_WP13: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6; lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6; lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6; lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6; o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); //_mm_storeu_si128((__m128i *)outlinePos, o128); outlinePos16[0] = _mm_extract_epi16(o128, 0); outlinePos16[1] = _mm_extract_epi16(o128, 1); outlinePos16[2] = _mm_extract_epi16(o128, 2); outlinePos16+=3; break; case DECODED_FORMAT_RG64: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=8; lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=8; lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=8; lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=8; lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); //_mm_storeu_si128((__m128i *)outlinePos, o128); outlinePos16[0] = _mm_extract_epi16(o128, 0); outlinePos16[1] = _mm_extract_epi16(o128, 1); outlinePos16[2] = _mm_extract_epi16(o128, 2); outlinePos16[3] = _mm_extract_epi16(o128, 3); outlinePos16+=4; break; case DECODED_FORMAT_RG48: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=6; lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=6; lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=6; lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=6; lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); //_mm_storeu_si128((__m128i *)outlinePos, o128); outlinePos16[0] = _mm_extract_epi16(o128, 0); outlinePos16[1] = _mm_extract_epi16(o128, 1); outlinePos16[2] = _mm_extract_epi16(o128, 2); outlinePos16+=3; break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_YUYV: lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=4; lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=4; lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=4; lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=4; lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_srli_epi16(o128,4); outlinePos8[0] = _mm_extract_epi16(o128, 0); outlinePos8[1] = _mm_extract_epi16(o128, 1); outlinePos8[2] = _mm_extract_epi16(o128, 2); outlinePos8[3] = _mm_extract_epi16(o128, 3); outlinePos8+=4; break; case DECODED_FORMAT_RGB24: { int r,g,b; b = ((lineAPos[0] * gains[0])>>7) + ((lineBPos[0] * gains[1])>>7) + ((lineCPos[0] * gains[2])>>7) + ((lineDPos[0] * gains[3])>>7); //16-bit g = ((lineAPos[1] * gains[0])>>7) + ((lineBPos[1] * gains[1])>>7) + ((lineCPos[1] * gains[2])>>7) + ((lineDPos[1] * gains[3])>>7); //16-bit r = ((lineAPos[2] * gains[0])>>7) + ((lineBPos[2] * gains[1])>>7) + ((lineCPos[2] * gains[2])>>7) + ((lineDPos[2] * gains[3])>>7); //16-bit if(r<0) r = 0; if(r>65535) r = 65535; if(g<0) g = 0; if(g>65535) g = 65535; if(b<0) b = 0; if(b>65535) b = 65535; lineAPos+=3; lineBPos+=3; lineCPos+=3; lineDPos+=3; outlinePos8[0] = b >> 8; //b outlinePos8[1] = g >> 8; //g outlinePos8[2] = r >> 8; //r outlinePos8+=3; /* SSE2 can't load byte alligned lA = _mm_loadu_si128((__m128i *)lineAPos); lineAPos+=3; lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128((__m128i *)lineBPos); lineBPos+=3; lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128((__m128i *)lineCPos); lineCPos+=3; lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128((__m128i *)lineDPos); lineDPos+=3; lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_srli_epi16(o128,4); outlinePos8[0] = _mm_extract_epi16(o128, 0); //b outlinePos8[1] = _mm_extract_epi16(o128, 1); //g outlinePos8[2] = _mm_extract_epi16(o128, 2); //r outlinePos8+=3; */ } break; } } else { if(decoder->StereoBufferFormat == DECODED_FORMAT_YUYV) { memset(dst, 0x10801080, widthbytes); } else { memset(dst, 0, widthbytes); } } yposf += ystepf; dst += pitch; } } } void RGB48VerticalShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int widthbytes, int height, int pitch, float offset) { float yposf,remainf; int yposi,tablepos,x,y; int gainA,gainB,gainC,gainD; //int endofSSEline = 0; unsigned short *scanline[4], *tline; int spitch = pitch/2; int neg = 0,shift = 0,skip,step; int origwidthbytes = widthbytes; int origwidthextra; __m128i lA, lB, lC, lD, gA, gB, gC, gD, o128, t1; __m128i *lineA, *lineB, *lineC, *lineD, *outline128; // offset = -offset; if(offset < 0.0) neg = 1; yposf = height * offset; yposi = (int)floor(yposf); remainf = yposf - (float)yposi; tablepos = (int)(remainf*(float)SUBPIXEL); yposi = abs(yposi); if(yposi==0 && tablepos == 0) return; // no move required // -3 , 0 best small notch at zero? // if(neg) { yposi -= 2; gainA = gains[tablepos][0]; gainB = gains[tablepos][1]; gainC = gains[tablepos][2]; gainD = gains[tablepos][3]; } else { yposi -= 1; //offset inherent in the table gainD = gains[tablepos][0]; gainC = gains[tablepos][1]; gainB = gains[tablepos][2]; gainA = gains[tablepos][3]; } gA = _mm_set1_epi16(gainA); gB = _mm_set1_epi16(gainB); gC = _mm_set1_epi16(gainC); gD = _mm_set1_epi16(gainD); switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: skip = 4; step = 16; break; case DECODED_FORMAT_RGB24: skip = 3; step = 16; break; case DECODED_FORMAT_YUYV: skip = 2; step = 16; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: default: skip = 6; step = 32; break; } // scanline[0] = buffer; // scanline[1] = buffer + width*skip/2; // scanline[2] = buffer + width*skip/2*2; // scanline[3] = buffer + width*skip/2*3; widthbytes += (step - 1); widthbytes -= (widthbytes % step); origwidthextra = (origwidthbytes % step); scanline[0] = buffer; scanline[1] = buffer + widthbytes/2; scanline[2] = buffer + widthbytes/2*2; scanline[3] = buffer + widthbytes/2*3; for(y=0; y<4; y++) { if(yposi+y >=0 && yposi+y<height) { unsigned short *ptr = RGB48; if(neg) ptr += (height-1-yposi-y)*spitch; else ptr += (yposi+y)*spitch; memcpy(scanline[y], ptr, origwidthbytes); } else { memset(scanline[y], 0, origwidthbytes); } } { for(y=0;y<height; y++) { unsigned short *ptr = RGB48; if(neg) ptr += (height-y-1)*spitch; else ptr += y*spitch; outline128 = (__m128i *)ptr; lineA = (__m128i *)scanline[0]; lineB = (__m128i *)scanline[1]; lineC = (__m128i *)scanline[2]; lineD = (__m128i *)scanline[3]; //for(x=0;x<width*skip/2; x+=step) for(x=0;x<widthbytes; x+=step) { __m128i half; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_W13A: case DECODED_FORMAT_WP13: { lA = _mm_loadu_si128(lineA++); lB = _mm_loadu_si128(lineB++); lC = _mm_loadu_si128(lineC++); lD = _mm_loadu_si128(lineD++); shift = 0; } break; case DECODED_FORMAT_RG64: case DECODED_FORMAT_RG48: { lA = _mm_loadu_si128(lineA++); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_loadu_si128(lineB++); lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_loadu_si128(lineC++); lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_loadu_si128(lineD++); lD = _mm_srli_epi16(lD,3); //13-bit unsigned shift = 3; } break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB24: case DECODED_FORMAT_YUYV: lA = _mm_loadu_si128(lineA); lA = _mm_unpackhi_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128(lineB); lB = _mm_unpackhi_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128(lineC); lC = _mm_unpackhi_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128(lineD); lD = _mm_unpackhi_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned shift = 3; break; } o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); if(shift) { o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } if(skip == 6) //RGB48 || WP13 { if(widthbytes == origwidthbytes || x+16 < origwidthbytes) _mm_storeu_si128(outline128++, o128); else { //if(x < origwidthbytes+16/*bytes in an SSE2 reg*/) _mm_storeu_si128((__m128i *)scanline[0], o128); memcpy((char *)outline128, (char *)scanline[0], origwidthextra); outline128++; } } else { half = o128; } switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_W13A: case DECODED_FORMAT_WP13: { lA = _mm_loadu_si128(lineA++); lB = _mm_loadu_si128(lineB++); lC = _mm_loadu_si128(lineC++); lD = _mm_loadu_si128(lineD++); shift = 0; } break; case DECODED_FORMAT_RG64: case DECODED_FORMAT_RG48: { lA = _mm_loadu_si128(lineA++); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_loadu_si128(lineB++); lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_loadu_si128(lineC++); lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_loadu_si128(lineD++); lD = _mm_srli_epi16(lD,3); //13-bit unsigned shift = 3; } break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB24: case DECODED_FORMAT_YUYV: lA = _mm_loadu_si128(lineA++); lA = _mm_unpacklo_epi8 (_mm_setzero_si128(), lA); lB = _mm_loadu_si128(lineB++); lB = _mm_unpacklo_epi8 (_mm_setzero_si128(), lB); lC = _mm_loadu_si128(lineC++); lC = _mm_unpacklo_epi8 (_mm_setzero_si128(), lC); lD = _mm_loadu_si128(lineD++); lD = _mm_unpacklo_epi8 (_mm_setzero_si128(), lD); lA = _mm_srli_epi16(lA,3); //13-bit unsigned lB = _mm_srli_epi16(lB,3); //13-bit unsigned lC = _mm_srli_epi16(lC,3); //13-bit unsigned lD = _mm_srli_epi16(lD,3); //13-bit unsigned shift = 3; break; } o128 = _mm_mulhi_epi16(lA, gA); t1 = _mm_mulhi_epi16(lB, gB); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lC, gC); o128 = _mm_adds_epi16(o128,t1); t1 = _mm_mulhi_epi16(lD, gD); o128 = _mm_adds_epi16(o128,t1); if(shift) { o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } if(skip != 6) //!RGB48 || !WP13 { half = _mm_srli_epi16(half,8); o128 = _mm_srli_epi16(o128,8); o128 = _mm_packus_epi16(o128, half); } if(widthbytes == origwidthbytes || x+32 < origwidthbytes) { _mm_storeu_si128(outline128++, o128); } else { //if(x+16 < origwidthbytes+16) if(origwidthextra > 16) { _mm_storeu_si128((__m128i *)scanline[0], o128); memcpy((char *)outline128, (char *)scanline[0], origwidthextra - 16); } outline128++; } } tline = scanline[0]; scanline[0] = scanline[1]; scanline[1] = scanline[2]; scanline[2] = scanline[3]; scanline[3] = tline; if(yposi+y+4 >=0 && yposi+y+4<height) { unsigned short *ptr = RGB48; if(neg) ptr += (height-1-(yposi+y+4))*spitch; else ptr += (yposi+y+4)*spitch; memcpy(scanline[3], ptr, origwidthbytes); } else { memset(scanline[3], 0, origwidthbytes); } } } } void RGB48HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye) { float xposf,xstepf; int x; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; short *sscanline = (short *)buffer; int neg = 0; float offset = hoffset; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; ptrR -= 6; } } if(eye > 0) { zoom *= 1.0f + frameTilt; } else { zoom /= 1.0f + frameTilt; } xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset); xposf -= width * roffset * 0.5f / zoom; xposf += (float)line * ((float)width* roffset / ((float)height*zoom)); if(xposf < 0.0) neg = 1; xstepf = 1.0f/zoom; memcpy(scanline, RGB48, width*3*2); { //unsigned short zeroline[3] = {0}; int xx = 0; int ixpos = (int)(xposf * 65536.0f); int ixstep = (int)(xstepf * 65536.0f); float xbase = xposf / (float)width; float xstep = xstepf / (float)width; float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f; // int holdstart = width*5/10; // Use to specify a area of uniform stretch // int holdend = width*5/10; int holdstart = (int)((decoder->cfhddata.FrameHDynCenter - decoder->cfhddata.FrameHDynWidth*0.125)*(float)width); int holdend = (int)((decoder->cfhddata.FrameHDynCenter + decoder->cfhddata.FrameHDynWidth*0.125)*(float)width); float flatxstep; float modified_xstep_avg; float bottomxstep; float basexstepstart; float basexstepend; float range; #if MMXSUPPORTED //TODO DANREMOVE __m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff); #endif if(holdstart < 0) holdstart = 0, holdend = (int)((decoder->cfhddata.FrameHDynWidth*0.5)*(float)width); if(holdend > width) holdend = width, holdstart = (int)((1.0 - decoder->cfhddata.FrameHDynWidth*0.5)*(float)width); range = (float)(holdend - holdstart); flatxstep = xstep-z*0.5f*xstep; modified_xstep_avg = (xstep * (float)width - range * flatxstep) / ((float)width - range); bottomxstep = modified_xstep_avg - (flatxstep - modified_xstep_avg); if(holdstart == (width-holdend)) { basexstepstart = bottomxstep; basexstepend = bottomxstep; } else if(holdstart < (width-holdend)) { float a = (float)holdstart / (float)(width-holdend); float startavg = a * modified_xstep_avg + (1.0f - a) * flatxstep; float endavg = (modified_xstep_avg * ((float)width-range) - startavg * (float)holdstart) / (float)(width-holdend); basexstepstart = startavg - (flatxstep - startavg); basexstepend = endavg - (flatxstep - endavg); } else { float a = (float)(width-holdend) / (float)holdstart; float endavg = a * modified_xstep_avg + (1.0f - a) * flatxstep; float startavg = (modified_xstep_avg * ((float)width-range) - endavg * (float)(width-holdend)) / (float)holdstart; basexstepstart = startavg - (flatxstep - startavg); basexstepend = endavg - (flatxstep - endavg); } if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13) { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } xp = (int)(fxpos * 65536.0f*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1;// was -2 causing a right shift //DAN20100225 #if MMXSUPPORTED //TODO DANREMOVE if(xp>4 && xp<width-4 && xx < (width-1)*3) //We need 3 values for RGB< yet we write 4, so the last pixel can't be done with MMX { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-1)*3; src64 = (__m64 *)&sscanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit src64 = (__m64 *)&sscanline[linepos+3]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+6]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+9]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 1); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else #endif { int i,r=0,g=0,b=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { gains += lanczos[rmdr]>>1; } else { gains += lanczos[rmdr]>>1; r += (gains * sscanline[xp*3]); g += (gains * sscanline[xp*3+1]); b += (gains * sscanline[xp*3+2]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; } xx+=3; } } else { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += basexstepstart*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += basexstepend*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } xp = (int)(fxpos * 65536.0f*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1; // was -2 causing a right shift //DAN20100225 #if MMXSUPPORTED //TODO DANREMOVE if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error. src64 = (__m64 *)&scanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit src64 = (__m64 *)&scanline[linepos+3]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+6]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+9]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 2); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else #endif { int i,r=0,g=0,b=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { gains += lanczos[rmdr]>>1; } else { gains += lanczos[rmdr]>>1; r += (gains * scanline[xp*3]); g += (gains * scanline[xp*3+1]); b += (gains * scanline[xp*3+2]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; } xx+=3; } } } #if MMXSUPPORTED //TODO DANREMOVE //_mm_empty(); #endif } #if 0 //Why is this not used? void RGB48HoriShiftZoomFine(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye) { float xposf,remainf,xstepf; int xposi,tablepos,x; int Ra,Rb,Rc,Rd; int Ga,Gb,Gc,Gd; int Ba,Bb,Bc,Bd; int gainA,gainB,gainC,gainD; int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; short *sscanline = (short *)buffer; int neg = 0,shift = 0; float offset = hoffset; __m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2; __m128i *line128, *outline128; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; ptrR -= 6; } } if(eye > 0) { zoom *= 1.0 + frameTilt; } else { zoom /= 1.0 + frameTilt; } xposf = (float)width*(0.5 - 1.0/(2.0*zoom) - offset); xposf -= width * roffset * 0.5 / zoom; xposf += (float)line * ((float)width* roffset / ((float)height*zoom)); if(xposf < 0.0) neg = 1; xstepf = 1.0/zoom; memcpy(scanline, RGB48, width*3*2); { unsigned short zeroline[3] = {0}; int xx = 0; int ixpos = xposf * 65536.0; int ixstep = xstepf * 65536.0; float xbase = xposf / (float)width; float xstep = xstepf / (float)width; float z = (decoder->cfhddata.FrameHDynamic - 1.0)*2.0; int holdstart = width*5/10; // Use to specify a area of uniform stretch int holdend = width*5/10; float flatxstep = xstep-z*0.5*xstep; float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart)); float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg); __m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff); if(bottomxstep < 0.0) { bottomxstep = 0.0; flatxstep = modified_xstep_avg + modified_xstep_avg; } if(flatxstep < 0.0) { flatxstep = 0.0; bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg); } if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13) { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } /* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width); if(fxpos >= 0.0 && fxpos <= 1.0) { if(z > 0.0) { fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos); fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z); } else { fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos; fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z); } } */ xp = (fxpos * 65536.0*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1;// was -2 causing a right shift //DAN20100225 if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-1)*3; src64 = (__m64 *)&sscanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit src64 = (__m64 *)&sscanline[linepos+3]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+6]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+9]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 1); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else { int i,t,r=0,g=0,b=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { /* if(i == 3) //DAN20101112 this code was crashing disparity zoom { gains = lanczos[rmdr]>>1; r += (gains * sscanline[(xp-1)*3]); g += (gains * sscanline[(xp-1)*3+1]); b += (gains * sscanline[(xp-1)*3+2]); } else */ { gains += lanczos[rmdr]>>1; } } else { gains += lanczos[rmdr]>>1; r += (gains * sscanline[xp*3]); g += (gains * sscanline[xp*3+1]); b += (gains * sscanline[xp*3+2]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; } xx+=3; } } else { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } /* fxpos = xbase + xstep * x;//(float)ixpos/(65536.0*(float)width); if(fxpos >= 0.0 && fxpos <= 1.0) { if(z > 0.0) { fxpos = 1.8*fxpos - 2.4*fxpos*fxpos + (1.6*fxpos*fxpos*fxpos); fxpos = fxpos * (z) + (xbase + xstep * x) * (1.0-z); } else { fxpos = 3.0*fxpos*fxpos - 2.0*fxpos*fxpos*fxpos; fxpos = fxpos * (-z) + (xbase + xstep * x) * (1.0+z); } } */ xp = (fxpos * 65536.0*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1; // was -2 causing a right shift //DAN20100225 if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-0)*3; //DAN20102602 -- fix left edge error. src64 = (__m64 *)&scanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit src64 = (__m64 *)&scanline[linepos+3]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+6]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+9]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 2); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else { int i,t,r=0,g=0,b=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { /* if(i == 3) //DAN20101112 this code was crashing disparity zoom { gains = lanczos[rmdr]>>1; r += (gains * scanline[(xp-1)*3]); g += (gains * scanline[(xp-1)*3+1]); b += (gains * scanline[(xp-1)*3+2]); } else */ { gains += lanczos[rmdr]>>1; } } else { gains += lanczos[rmdr]>>1; r += (gains * scanline[xp*3]); g += (gains * scanline[xp*3+1]); b += (gains * scanline[xp*3+2]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; } xx+=3; } } } /* memcpy(scanline, RGB48, width*3*2); { for(x=0;x<width*3; x+=3) //RGB { int r,g,b,xp = ((int)xposf)*3; xposf += xstepf; if(xp<0 || xp>= width*3) { RGB48[x] = 0; RGB48[x+1] = 0; RGB48[x+2] = 0; } else { r = scanline[xp]; g = scanline[xp+1]; b = scanline[xp+2]; RGB48[x] = r; RGB48[x+1] = g; RGB48[x+2] = b; } } } */ //_mm_empty(); } #endif void RGBA64HoriShiftZoom(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, int height, int line, float hoffset, float roffset, float zoom, int flip, float frameTilt, int eye) { float xposf,xstepf; int x; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; short *sscanline = (short *)buffer; int neg = 0; float offset = hoffset; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*4) - 4; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; t = *ptrL; *ptrL++ = *ptrR; *ptrR++ = t; ptrR -= 4; } } if(eye > 0) { zoom *= 1.0f + frameTilt; } else { zoom /= 1.0f + frameTilt; } xposf = (float)width*(0.5f - 1.0f/(2.0f*zoom) - offset); xposf -= width * roffset * 0.5f; xposf += line * (width* roffset / ((float)height*zoom)); if(xposf < 0.0) neg = 1; xstepf = 1.0f/zoom; memcpy(scanline, RGB48, width*4*2); { //unsigned short zeroline[3] = {0}; int xx = 0; int ixpos = (int)(xposf * 65536.0f); int ixstep = (int)(xstepf * 65536.0f); float xbase = xposf / (float)width; float xstep = xstepf / (float)width; float z = (decoder->cfhddata.FrameHDynamic - 1.0f)*2.0f; int holdstart = width*5/10; // Use to specify a area of uniform stretch int holdend = width*5/10; float flatxstep = xstep-z*0.5f*xstep; float modified_xstep_avg = (xstep * (float)width - (float)(holdend - holdstart) * flatxstep) / (float)(width - (holdend - holdstart)); float bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg); #if MMXSUPPORTED //TODO DANREMOVE __m64 overflowprotect = _mm_set1_pi16(0x7fff-0x3fff); #endif if(bottomxstep < 0.0) { bottomxstep = 0.0; flatxstep = modified_xstep_avg + modified_xstep_avg; } if(flatxstep < 0.0) { flatxstep = 0.0; bottomxstep = modified_xstep_avg - (flatxstep- modified_xstep_avg); } if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A) { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } xp = (int)(fxpos * 65536.0f*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1;// was -2 causing a right shift //DAN20100225 #if MMXSUPPORTED //TODO DANREMOVE if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-1)*4; src64 = (__m64 *)&sscanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit src64 = (__m64 *)&sscanline[linepos+4]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+8]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&sscanline[linepos+12]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //13*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 1); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else #endif { int i,r=0,g=0,b=0,a=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { gains += lanczos[rmdr]>>1; } else { gains += lanczos[rmdr]>>1; r += (gains * sscanline[xp*4]); g += (gains * sscanline[xp*4+1]); b += (gains * sscanline[xp*4+2]); a += (gains * sscanline[xp*4+3]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; a >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; if(a<0) a=0; else if(a>65535) a=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; RGB48[xx+3] = a; } xx+=4; } } else { float fxpos = xbase; for(x=0;x<width; x++) //RGB { int gains = 0; int xp, rmdr; if(z != 0.0) { if(x<holdstart) { fxpos += bottomxstep*((float)(holdstart-x)/(float)holdstart) + flatxstep*((float)x/(float)holdstart); } else if(x>holdend) { int diff = width - x; int range = width - holdend; fxpos += bottomxstep*((float)(range-diff)/(float)range) + flatxstep*((float)(diff)/(float)range); } else { fxpos += flatxstep; } xp = (int)(fxpos * 65536.0f*(float)width); rmdr = 63-((xp>>10) & 63); xp >>= 16; } else { xp = ixpos>>16; rmdr = 63-((ixpos>>10) & 63); ixpos += ixstep; } xp -= 1; // was -2 causing a right shift //DAN20100225 #if MMXSUPPORTED //TODO DANREMOVE if(xp>4 && xp<width-4) { __m64 *src64; __m64 *dst64; __m64 sumx16; __m64 rgbx16; __m64 gain16; int linepos = (xp-0)*4; //DAN20102602 -- fix left edge error. src64 = (__m64 *)&scanline[linepos]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit sumx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit src64 = (__m64 *)&scanline[linepos+4]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+64]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+8]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+128]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); src64 = (__m64 *)&scanline[linepos+12]; rgbx16 = *src64; gain16 = _mm_set1_pi16(lanczos[rmdr+192]); //15-bit rgbx16 = _mm_srli_pi16(rgbx16, 1); //15-bit rgbx16 = _mm_mulhi_pi16(rgbx16, gain16); //15*15-bit sumx16 = _mm_adds_pi16(sumx16, rgbx16); sumx16 = _mm_adds_pi16(sumx16, overflowprotect); sumx16 = _mm_subs_pu16(sumx16, overflowprotect); sumx16 = _mm_slli_pi16(sumx16, 2); dst64 = (__m64 *)&RGB48[xx]; *dst64 = sumx16; } else #endif { int i,r=0,g=0,b=0,a=0; for(i=0; i<4; i++) { if(xp<=0 || xp>= width) { gains += lanczos[rmdr]>>1; } else { gains += lanczos[rmdr]>>1; r += (gains * scanline[xp*4]); g += (gains * scanline[xp*4+1]); b += (gains * scanline[xp*4+2]); a += (gains * scanline[xp*4+3]); gains = 0; } xp++; rmdr+=64; } r >>= 14; g >>= 14; b >>= 14; a >>= 14; if(r<0) r=0; else if(r>65535) r=65535; if(g<0) g=0; else if(g>65535) g=65535; if(b<0) b=0; else if(b>65535) b=65535; if(a<0) a=0; else if(a>65535) a=65535; RGB48[xx] = r; RGB48[xx+1] = g; RGB48[xx+2] = b; RGB48[xx+3] = a; } xx+=4; } } } #if MMXSUPPORTED //TODO DANREMOVE //_mm_empty(); #endif } void RGB48WindowMask(DECODER *decoder, unsigned short *RGB48, int width, int channel, float windowMask) { float line = (float)width * fabsf(windowMask); int pixelbytes = 6; float frac = (float)(line-(float)((int)line)); switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: pixelbytes = 8; break; } if(decoder->StereoBufferFormat == DECODED_FORMAT_W13A || decoder->StereoBufferFormat == DECODED_FORMAT_WP13) // signed math needed { short *ptrL = (short *)RGB48; short *ptrR = (short *)RGB48; if(windowMask < 0) channel = channel == 0 ? 1 : 0; if(pixelbytes == 6) { if(channel == 0) { memset(ptrL, 0, 6*(int)line); ptrL += ((int)line*3); ptrL[0] = (int)((float)ptrL[0] * (1.0-frac)); ptrL[1] = (int)((float)ptrL[1] * (1.0-frac)); ptrL[2] = (int)((float)ptrL[2] * (1.0-frac)); } else { ptrR += ((width-(int)line)*3); memset(ptrR, 0, 6*(int)line); ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac)); ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac)); ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac)); } } else { if(channel == 0) { memset(ptrL, 0, 8*(int)line); ptrL += ((int)line*4); ptrL[0] = (int)((float)ptrL[0] * (1.0-frac)); ptrL[1] = (int)((float)ptrL[1] * (1.0-frac)); ptrL[2] = (int)((float)ptrL[2] * (1.0-frac)); ptrL[3] = (int)((float)ptrL[3] * (1.0-frac)); } else { ptrR += ((width-(int)line)*4); memset(ptrR, 0, 8*(int)line); ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac)); ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac)); ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac)); ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac)); } } } else { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; if(windowMask < 0) channel = channel == 0 ? 1 : 0; if(pixelbytes == 6) { if(channel == 0) { memset(ptrL, 0, 6*(int)line); ptrL += ((int)line*3); ptrL[0] = (int)((float)ptrL[0] * (1.0-frac)); ptrL[1] = (int)((float)ptrL[1] * (1.0-frac)); ptrL[2] = (int)((float)ptrL[2] * (1.0-frac)); } else { ptrR += ((width-(int)line)*3); memset(ptrR, 0, 6*(int)line); ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac)); ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac)); ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac)); } } else { if(channel == 0) { memset(ptrL, 0, 8*(int)line); ptrL += ((int)line*4); ptrL[0] = (int)((float)ptrL[0] * (1.0-frac)); ptrL[1] = (int)((float)ptrL[1] * (1.0-frac)); ptrL[2] = (int)((float)ptrL[2] * (1.0-frac)); ptrL[3] = (int)((float)ptrL[3] * (1.0-frac)); } else { ptrR += ((width-(int)line)*4); memset(ptrR, 0, 8*(int)line); ptrR[-1] = (int)((float)ptrR[-1] * (1.0-frac)); ptrR[-2] = (int)((float)ptrR[-2] * (1.0-frac)); ptrR[-3] = (int)((float)ptrR[-3] * (1.0-frac)); ptrR[-4] = (int)((float)ptrR[-4] * (1.0-frac)); } } } } void RGB48HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip) { float xposf,remainf; int xposi,tablepos,x; int gainA,gainB,gainC,gainD; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; int neg = 0,shift = 0; __m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2; __m128i *line128, *outline128; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t1,t2,t3; t1 = ptrL[0]; ptrL[0] = ptrR[0]; ptrR[0] = t1; t2 = ptrL[1]; ptrL[1] = ptrR[1]; ptrR[1] = t2; t3 = ptrL[2]; ptrL[2] = ptrR[2]; ptrR[2] = t3; ptrL += 3; ptrR -= 3; } } if(offset < 0.0) neg = 1; xposf = width * offset; xposi = (int)floorf(xposf); remainf = xposf - (float)xposi; tablepos = (int)(remainf*(float)SUBPIXEL); xposi = abs(xposi); if(xposi==0 && tablepos == 0) return; // no move required gainA = gains[tablepos][0]; gainB = gains[tablepos][1]; gainC = gains[tablepos][2]; gainD = gains[tablepos][3]; if(neg == 0) { unsigned short *ptr = scanline; int nwidth = width-xposi+16; if(nwidth > width) nwidth = width; for(x=0;x<xposi+2;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b } memcpy(ptr, RGB48, (nwidth)*3*2); ptr += (nwidth)*3; for(x=0;x<16;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+xposi-2>=0) { *ptr++ = RGB48[(x+xposi-2)*3];//r *ptr++ = RGB48[(x+xposi-2)*3+1];//g *ptr++ = RGB48[(x+xposi-2)*3+2];//b } else { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b } } memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2); ptr += (width-xposi)*3; for(x=0;x<xposi+16;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b } } gA = _mm_set1_epi16(gainA); gB = _mm_set1_epi16(gainB); gC = _mm_set1_epi16(gainC); gD = _mm_set1_epi16(gainD); line128 = (__m128i *)&scanline[0]; //outline128 = line128; outline128 = (__m128i *)&RGB48[0]; //l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3, //l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6 //l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8 if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13) { l1 = _mm_loadu_si128(line128++); l2 = _mm_loadu_si128(line128++); l3 = _mm_loadu_si128(line128++); shift = 0; } else { l1 = _mm_loadu_si128(line128++); l1 = _mm_srli_epi16(l1,3); //13-bit unsigned l2 = _mm_loadu_si128(line128++); l2 = _mm_srli_epi16(l2,3); //13-bit unsigned l3 = _mm_loadu_si128(line128++); l3 = _mm_srli_epi16(l3,3); //13-bit unsigned shift = 3; } for(x=0;x<width*3; x+=8) { //o=l1* gainA o128 = _mm_mulhi_epi16(l1, gA); //t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0 //t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4 //t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4 //l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4 //t1 *= gainB //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_slli_si128(l2,5*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gB); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0 //t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0 //t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5 //t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5 //l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5 //t1 *= gainC //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_srli_si128(l2,3*2); t2 = _mm_slli_si128(t2,5*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gC); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0 //t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0 //t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0 //t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0 //t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6 //t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6 //t1 *= gainD //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_srli_si128(l2,6*2); t2 = _mm_slli_si128(t2,5*2); t1 = _mm_adds_epi16(t1,t2); t2 = _mm_slli_si128(l3,7*2); t1 = _mm_adds_epi16(t1,t2); t1 = _mm_mulhi_epi16(t1, gD); o128 = _mm_adds_epi16(o128,t1); l1 = l2; l2 = l3; l3 = _mm_loadu_si128(line128++); if(shift) { l3 = _mm_srli_epi16(l3,3); //13-bit unsigned o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } _mm_storeu_si128(outline128++, o128); } } void RGBA64HoriShift(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offset, int flip) { float xposf,remainf; int xposi,tablepos,x; int gainA,gainB,gainC,gainD; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; int neg = 0,shift = 0; __m128i l1,l2,l3,gA,gB,gC,gD,o128,t1,t2; __m128i *line128, *outline128; if(flip) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*4) - 4; for(x=0;x<width/2;x++) { int t1,t2,t3,t4; t1 = ptrL[0]; ptrL[0] = ptrR[0]; ptrR[0] = t1; t2 = ptrL[1]; ptrL[1] = ptrR[1]; ptrR[1] = t2; t3 = ptrL[2]; ptrL[2] = ptrR[2]; ptrR[2] = t3; t4 = ptrL[2]; ptrL[3] = ptrR[3]; ptrR[3] = t4; ptrL += 4; ptrR -= 4; } } if(offset < 0.0) neg = 1; xposf = width * offset; xposi = (int)floorf(xposf); remainf = xposf - (float)xposi; tablepos = (int)(remainf*(float)SUBPIXEL); xposi = abs(xposi); if(xposi==0 && tablepos == 0) return; // no move required gainA = gains[tablepos][0]; gainB = gains[tablepos][1]; gainC = gains[tablepos][2]; gainD = gains[tablepos][3]; if(neg == 0) { unsigned short *ptr = scanline; int nwidth = width-xposi+16; if(nwidth > width) nwidth = width; for(x=0;x<xposi+2;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b *ptr++ = 0;//a } memcpy(ptr, RGB48, (nwidth)*4*2); ptr += (nwidth)*4; for(x=0;x<16;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b *ptr++ = 0;//a } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+xposi-2>=0) { *ptr++ = RGB48[(x+xposi-2)*4];//r *ptr++ = RGB48[(x+xposi-2)*4+1];//g *ptr++ = RGB48[(x+xposi-2)*4+2];//b *ptr++ = RGB48[(x+xposi-2)*4+3];//a } else { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b *ptr++ = 0;//a } } memcpy(ptr, &RGB48[xposi*4], (width-xposi)*4*2); ptr += (width-xposi)*4; for(x=0;x<xposi+16;x++) { *ptr++ = 0;//r *ptr++ = 0;//g *ptr++ = 0;//b *ptr++ = 0;//a } } gA = _mm_set1_epi16(gainA); gB = _mm_set1_epi16(gainB); gC = _mm_set1_epi16(gainC); gD = _mm_set1_epi16(gainD); line128 = (__m128i *)&scanline[0]; //outline128 = line128; outline128 = (__m128i *)&RGB48[0]; //l1 = load128;//r1,g1,b1,a1,r2,g2,b2,a2, //l2 = load128;//r3,g3,b3,a3,r4,g4,b4,a4, //l3 = load128;//r5,g5,b5,a5,r6,g6,b6,a6, //l4 = load128;//r7,g7,b7,a7,r8,g8,b8,a8, if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A) { l1 = _mm_loadu_si128(line128++); l2 = _mm_loadu_si128(line128++); l3 = _mm_loadu_si128(line128++); shift = 0; } else { l1 = _mm_loadu_si128(line128++); l1 = _mm_srli_epi16(l1,3); //13-bit unsigned l2 = _mm_loadu_si128(line128++); l2 = _mm_srli_epi16(l2,3); //13-bit unsigned l3 = _mm_loadu_si128(line128++); l3 = _mm_srli_epi16(l3,3); //13-bit unsigned shift = 3; } for(x=0;x<width*4; x+=8) { //o=l1* gainA o128 = _mm_mulhi_epi16(l1, gA); //t1 = l1<<4*16 //t1 = r2,g2,b2,a2,0, 0 0 0 //t2 = l2>>4*16 //t2 = 0 0 0 0 r3,g3,b3,a4 //t1 += t2; //t1 = r2,g2,b2,a2,r3,g3,b3,a4 //l1 = t1 //l1 = r2,g2,b2,a2,r3,g3,b3,a4 //t1 *= gainB //o += t1 t1 = _mm_srli_si128(l1,4*2); t2 = _mm_slli_si128(l2,4*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gB); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<4*16 //t1 = r3,g3,b3,a3, 0 0 0 0 //t2 = l2<<4*16;//t2 = r4,g4,b4,a4, 0 0 0 0 //t2 >>= 4*16; //t2 = 0 0 0 0 r4,g4,b4,a4 //t1 += t2 //t1 = r3,g3,b3,a4,r4,g4,b4,a4 //l1 = t1 //l1 = r3,g3,b3,a4,r4,g4,b4,a4 //t1 *= gainC //o += t1 t1 = _mm_srli_si128(l1,4*2); t2 = _mm_srli_si128(l2,4*2); t2 = _mm_slli_si128(t2,4*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gC); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<4*16 //t1 = r4,g4,b4,a4,0 0 0 0 //t2 = l3>>4*16 //t2 = 0 0 0 0 r5,g5,b5,a5 //t1 += t2 //t1 = r4,g4,b4,a4,r5,g5,b5,a5 //t1 *= gainD //o += t1 t1 = _mm_srli_si128(l1,4*2); t2 = _mm_slli_si128(l3,4*2); t1 = _mm_adds_epi16(t1,t2); t1 = _mm_mulhi_epi16(t1, gD); o128 = _mm_adds_epi16(o128,t1); l1 = l2; l2 = l3; l3 = _mm_loadu_si128(line128++); if(shift) { l3 = _mm_srli_epi16(l3,3); //13-bit unsigned o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } _mm_storeu_si128(outline128++, o128); } } void RGB48HoriShiftAnaglyph(DECODER *decoder, unsigned short *RGB48, unsigned short *buffer, int width, float offsetR, float offsetG, float offsetB , int flipR, int flipG, int flipB) { float Rxposf,Rremainf; int Rxposi,Rtablepos; float Gxposf,Gremainf; int Gxposi,Gtablepos; float Bxposf,Bremainf; int Bxposi,Btablepos; int x; int RgainA,RgainB,RgainC,RgainD; int GgainA,GgainB,GgainC,GgainD; int BgainA,BgainB,BgainC,BgainD; //int endofSSEline = 0; unsigned short *scanline = (unsigned short *)buffer; int negR = 0; int negG = 0; int negB = 0; int shift = 0; __m128i l1,l2,l3,o128,t1,t2; __m128i *line128, *outline128; __m128i gA1,gB1,gC1,gD1,gA2,gB2,gC2,gD2,gA3,gB3,gC3,gD3; if(flipR) { unsigned short *ptrL = RGB48; unsigned short *ptrR = RGB48; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL = *ptrR; *ptrR = t; ptrL += 3; ptrR -= 3; } } if(flipG) { unsigned short *ptrL = &RGB48[1]; unsigned short *ptrR = &RGB48[1]; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL = *ptrR; *ptrR = t; ptrL += 3; ptrR -= 3; } } if(flipB) { unsigned short *ptrL = &RGB48[2]; unsigned short *ptrR = &RGB48[2]; ptrR += (width*3) - 3; for(x=0;x<width/2;x++) { int t; t = *ptrL; *ptrL = *ptrR; *ptrR = t; ptrL += 3; ptrR -= 3; } } if(offsetR < 0.0) negR = 1; if(offsetG < 0.0) negG = 1; if(offsetB < 0.0) negB = 1; Rxposf = width * offsetR; Rxposi = (int)floorf(Rxposf); Rremainf = Rxposf - (float)Rxposi; Rtablepos = (int)(Rremainf*(float)SUBPIXEL); Gxposf = width * offsetG; Gxposi = (int)floorf(Gxposf); Gremainf = Gxposf - (float)Gxposi; Gtablepos = (int)(Gremainf*(float)SUBPIXEL); Bxposf = width * offsetB; Bxposi = (int)floorf(Bxposf); Bremainf = Bxposf - (float)Bxposi; Btablepos = (int)(Bremainf*(float)SUBPIXEL); Rxposi = abs(Rxposi); Gxposi = abs(Gxposi); Bxposi = abs(Bxposi); if(Rxposi==0 && Rtablepos == 0) return; // no move required RgainA = gains[Rtablepos][0]; RgainB = gains[Rtablepos][1]; RgainC = gains[Rtablepos][2]; RgainD = gains[Rtablepos][3]; GgainA = gains[Gtablepos][0]; GgainB = gains[Gtablepos][1]; GgainC = gains[Gtablepos][2]; GgainD = gains[Gtablepos][3]; BgainA = gains[Btablepos][0]; BgainB = gains[Btablepos][1]; BgainC = gains[Btablepos][2]; BgainD = gains[Btablepos][3]; if(negR == 0) { unsigned short *ptr = scanline; int nwidth = width-Rxposi+16; if(nwidth > width) nwidth = width; for(x=0;x<Rxposi+2;x++) { *ptr++ = 0;//r ptr++;//g ptr++;//b } for(x=0;x<nwidth;x++) { *ptr++ = RGB48[x*3];//r ptr++;//g ptr++;//b } for(x=0;x<16;x++) { *ptr++ = 0;//r ptr++;//g ptr++;//b } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+Rxposi-2>=0) { *ptr++ = RGB48[(x+Rxposi-2)*3];//r ptr++;//g ptr++;//b } else { *ptr++ = 0;//r ptr++;//g ptr++;//b } } //memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2); //ptr += (width-xposi)*3; for(x=Rxposi;x<width;x++) { *ptr++ = RGB48[x*3];//r ptr++;//g ptr++;//b } for(x=0;x<Rxposi+16;x++) { *ptr++ = 0;//r ptr++;//g ptr++;//b } } if(negG == 0) { unsigned short *ptr = scanline; int nwidth = width-Gxposi+16; if(nwidth > width) nwidth = width; for(x=0;x<Gxposi+2;x++) { ptr++;//r *ptr++ = 0;//g ptr++;//b } for(x=0;x<nwidth;x++) { ptr++;//r *ptr++ = RGB48[x*3+1];//g ptr++;//b } for(x=0;x<16;x++) { ptr++;//r *ptr++ = 0;//g ptr++;//b } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+Gxposi-2>=0) { ptr++;//r *ptr++ = RGB48[(x+Gxposi-2)*3+1];//g ptr++;//b } else { ptr++;//r *ptr++ = 0;//g ptr++;//b } } //memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2); //ptr += (width-xposi)*3; for(x=Gxposi;x<width;x++) { ptr++;//r *ptr++ = RGB48[x*3+1];//g ptr++;//b } for(x=0;x<Gxposi+16;x++) { ptr++;//r *ptr++ = 0;//g ptr++;//b } } if(negB == 0) { unsigned short *ptr = scanline; int nwidth = width-Bxposi+16; if(nwidth > width) nwidth = width; for(x=0;x<Bxposi+2;x++) { ptr++;//r ptr++;//g *ptr++ = 0;//b } for(x=0;x<nwidth;x++) { ptr++;//r ptr++;//g *ptr++ = RGB48[x*3+2];//b } for(x=0;x<16;x++) { ptr++;//r ptr++;//g *ptr++ = 0;//b } } else { unsigned short *ptr = scanline; for(x=0;x<2;x++) { if(x+Bxposi-2>=0) { ptr++;//r ptr++;//g *ptr++ = RGB48[(x+Bxposi-2)*3+2];//b } else { ptr++;//r ptr++;//g *ptr++ = 0;//b } } //memcpy(ptr, &RGB48[xposi*3], (width-xposi)*3*2); //ptr += (width-xposi)*3; for(x=Bxposi;x<width;x++) { ptr++;//r ptr++;//g *ptr++ = RGB48[x*3+2];//b } for(x=0;x<Bxposi+16;x++) { ptr++;//r ptr++;//g *ptr++ = 0;//b } } gA1 = _mm_set_epi16(RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA); gA2 = _mm_set_epi16(BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA,RgainA); gA3 = _mm_set_epi16(GgainA,BgainA,RgainA,GgainA,BgainA,RgainA,GgainA,BgainA); gB1 = _mm_set_epi16(RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB); gB2 = _mm_set_epi16(BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB,RgainB); gB3 = _mm_set_epi16(GgainB,BgainB,RgainB,GgainB,BgainB,RgainB,GgainB,BgainB); gC1 = _mm_set_epi16(RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC); gC2 = _mm_set_epi16(BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC,RgainC); gC3 = _mm_set_epi16(GgainC,BgainC,RgainC,GgainC,BgainC,RgainC,GgainC,BgainC); gD1 = _mm_set_epi16(RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD); gD2 = _mm_set_epi16(BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD,RgainD); gD3 = _mm_set_epi16(GgainD,BgainD,RgainD,GgainD,BgainD,RgainD,GgainD,BgainD); line128 = (__m128i *)&scanline[0]; //outline128 = line128; outline128 = (__m128i *)&RGB48[0]; //l1 = load128;//r1,g1,b1,r2,g2,b2,r3,g3, //l2 = load128;//b3,r4,g4,b4,r5,g5,b5,r6 //l3 = load128;//g6,b6,r7,g7,b7,r8,g8,b8 if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13) { l1 = _mm_loadu_si128(line128++); l2 = _mm_loadu_si128(line128++); l3 = _mm_loadu_si128(line128++); shift = 0; } else { l1 = _mm_loadu_si128(line128++); l1 = _mm_srli_epi16(l1,3); //13-bit unsigned l2 = _mm_loadu_si128(line128++); l2 = _mm_srli_epi16(l2,3); //13-bit unsigned l3 = _mm_loadu_si128(line128++); l3 = _mm_srli_epi16(l3,3); //13-bit unsigned shift = 3; } for(x=0;x<width*3; x+=8) { //o=l1* gainA o128 = _mm_mulhi_epi16(l1, gA1); //t1 = l1<<3*16 //t1 = r2,g2,b2,r3,g3, 0 0 0 //t2 = l2>>16*5 //t2 = 0 0 0 0 0 b3,r4,g4 //t1 += t2; //t1 = r2,g2,b2,r3,g3,b3,r4,g4 //l1 = t1 //l1 = r2,g2,b2,r3,g3,b3,r4,g4 //t1 *= gainB //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_slli_si128(l2,5*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gB1); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<3*16 //t1 = r3,g3,b3,r4,g4 0 0 0 //t2 = l2<<3*16; //t2 = b4,r5,g5,b5,r6 0 0 0 //t2 >>= 5*16; //t2 = 0 0 0 0 0 b4,r5,g5 //t1 += t2 //t1 = r3,g3,b3,r4,g4,b4,r5,g5 //l1 = t1 //l1 = r3,g3,b3,r4,g4,b4,r5,g5 //t1 *= gainC //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_srli_si128(l2,3*2); t2 = _mm_slli_si128(t2,5*2); t1 = _mm_adds_epi16(t1,t2); l1 = t1; t1 = _mm_mulhi_epi16(t1, gC1); o128 = _mm_adds_epi16(o128,t1); //t1 = l1<<3*16 //t1 = r4,g4,b4,r5,g5 0 0 0 //t2 = l2<<6*16 //t2 = b5,r6 0 0 0 0 0 0 //t2 >>= 5 * 16; //t2 = 0 0 0 0 0 b5,r6 0 //t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6, 0 //t2 = l3>>7*16 //t2 = 0 0 0 0 0 0 0 g6 //t1 += t2 //t1 = r4,g4,b4,r5,g5,b5,r6,g6 //t1 *= gainD //o += t1 t1 = _mm_srli_si128(l1,3*2); t2 = _mm_srli_si128(l2,6*2); t2 = _mm_slli_si128(t2,5*2); t1 = _mm_adds_epi16(t1,t2); t2 = _mm_slli_si128(l3,7*2); t1 = _mm_adds_epi16(t1,t2); t1 = _mm_mulhi_epi16(t1, gD1); o128 = _mm_adds_epi16(o128,t1); t1 = gA1; gA1 = gA2; gA2 = gA3; gA3 = t1; t1 = gB1; gB1 = gB2; gB2 = gB3; gB3 = t1; t1 = gC1; gC1 = gC2; gC2 = gC3; gC3 = t1; t1 = gD1; gD1 = gD2; gD2 = gD3; gD3 = t1; l1 = l2; l2 = l3; l3 = _mm_loadu_si128(line128++); if(shift) { l3 = _mm_srli_epi16(l3,3); //13-bit unsigned o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x0fff)); o128 = _mm_slli_epi16(o128,4); } else { // upper limit to 32767 o128 = _mm_adds_epi16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_subs_epu16(o128, _mm_set1_epi16(0x7fff - 0x3fff)); o128 = _mm_slli_epi16(o128,1); } _mm_storeu_si128(outline128++, o128); } } void HistogramLine(DECODER *decoder, unsigned short *sbase, int width, int format, int whitepoint) { int x,val,ypos=0,upos=1,vpos=3; int step = 1,pos=0; short *ssbase = (short *)sbase; uint32_t *lbase = (uint32_t *)sbase; ToolsHandle *tools = decoder->tools; int scaledvectorscope = 0; if(tools == NULL) return; if(whitepoint == 13) { if(format == DECODED_FORMAT_RG64) format = DECODED_FORMAT_W13A; else format = DECODED_FORMAT_WP13; } while(width/step > 360) { step*=2; } tools->waveformWidth = width/step; decoder->tools->blurUVdone = 0; switch(format & 0xffffff) { case DECODED_FORMAT_WP13: decoder->tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; R = ssbase[0]>>5; G = ssbase[1]>>5; B = ssbase[2]>>5; if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; ssbase += step*3; } break; case DECODED_FORMAT_W13A: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; R = ssbase[0]>>5; G = ssbase[1]>>5; B = ssbase[2]>>5; if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; ssbase += step*4; } break; case DECODED_FORMAT_RG48: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; R = sbase[0]>>8; G = sbase[1]>>8; B = sbase[2]>>8; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; sbase += step*3; } break; case DECODED_FORMAT_AB10: case DECODED_FORMAT_RG30: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; val = lbase[x]; R = (val>>22)&0xff; G = (val>>12)&0xff; B = (val>>02)&0xff; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case DECODED_FORMAT_AR10: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; val = lbase[x]; B = (val>>22)&0xff; G = (val>>12)&0xff; R = (val>>02)&0xff; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case DECODED_FORMAT_R210: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; val = SwapInt32BtoN(lbase[x]); R = (val>>22)&0xff; G = (val>>12)&0xff; B = (val>>02)&0xff; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case DECODED_FORMAT_DPX0: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; val = SwapInt32BtoN(lbase[x]); R = (val>>24)&0xff; G = (val>>14)&0xff; B = (val>>04)&0xff; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case DECODED_FORMAT_RG64: case DECODED_FORMAT_B64A: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int32_t R,G,B,U,V; R = sbase[1]>>8; G = sbase[2]>>8; B = sbase[3]>>8; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; sbase += step*4; } break; case COLOR_FORMAT_UYVY: ypos=1,upos=0,vpos=2; case DECODED_FORMAT_CbYCrY_8bit: // CMD: 20100109 case COLOR_FORMAT_YUYV: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int Y,U,V,R,G,B; uint8_t *bptr = (uint8_t *)sbase; bptr += x * 2; Y = bptr[ypos]-16; U = bptr[upos]-128; Y+= bptr[ypos+2]-16; Y>>=1; V = bptr[vpos]-128; R = (9535*Y + 14688*V)>>13; //13-bit white G = (9535*Y - 4375*V - 1745*U)>>13; B = (9535*Y + 17326*U)>>13; //TODO much -20 to 120 RGB range. if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; if(scaledvectorscope) { U *= 255; U /= 314; V *= 255; V /= 244; } //* 255.0/314.0 //* 255.0/244.0 U += 128; V += 128; if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_YU64: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int Y,U,V,R,G,B; uint8_t *bptr = (uint8_t *)sbase; bptr += x * 4; bptr++; //read only the high byte out of the 16-bit Y = bptr[0]-16; V = bptr[2]-128; Y+= bptr[4]-16; Y>>=1; U = bptr[6]-128; R = (9535*Y + 14688*V)>>13; //13-bit white G = (9535*Y - 4375*V - 1745*U)>>13; B = (9535*Y + 17326*U)>>13; if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; if(scaledvectorscope) { U *= 255; U /= 314; V *= 255; V /= 244; } U += 128; V += 128; if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_V210: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int Y,U,V,R,G,B; uint32_t *lptr = (uint32_t *)sbase; lptr += (x/6)*4; switch(x % 6) { case 0: V = ((*lptr>>02) & 0xff) - 128; Y = ((*lptr>>12) & 0xff) - 16; U = ((*lptr>>22) & 0xff) - 128; lptr++; Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1; break; case 1: lptr++; Y = ((*lptr>>02) & 0xff) - 16; V = ((*lptr>>12) & 0xff) - 128; Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1; lptr--; U = ((*lptr>>22) & 0xff) - 128; break; case 2: lptr++; Y = ((*lptr>>22) & 0xff) - 16; lptr++; U = ((*lptr>>02) & 0xff) - 128; Y+= ((*lptr>>12) & 0xff) - 16; Y>>=1; V = ((*lptr>>22) & 0xff) - 128; break; case 3: lptr++; V = ((*lptr>>12) & 0xff) - 128; lptr++; U = ((*lptr>>02) & 0xff) - 128; Y = ((*lptr>>12) & 0xff) - 16; lptr++; Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1; break; case 4: lptr+=2; V = ((*lptr>>22) & 0xff) - 128; lptr++; Y = ((*lptr>>02) & 0xff) - 16; U = ((*lptr>>12) & 0xff) - 128; Y+= ((*lptr>>22) & 0xff) - 16; Y>>=1; break; case 5: lptr+=2; V = ((*lptr>>22) & 0xff) - 128; lptr++; U = ((*lptr>>12) & 0xff) - 128; Y = ((*lptr>>22) & 0xff) - 16; lptr++; Y+= ((*lptr>>02) & 0xff) - 16; Y>>=1; break; } R = (9535*Y + 14688*V)>>13; //13-bit white G = (9535*Y - 4375*V - 1745*U)>>13; B = (9535*Y + 17326*U)>>13; if(R > 255) R = 255; if(R < 0) R = 0; if(G > 255) G = 255; if(G < 0) G = 0; if(B > 255) B = 255; if(B < 0) B = 0; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; if(scaledvectorscope) { U *= 255; U /= 314; V *= 255; V /= 244; } U += 128; V += 128; if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_RGB24: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int R,G,B,U,V; uint8_t *bptr = (uint8_t *)sbase; bptr += x * 3; R = bptr[2]; G = bptr[1]; B = bptr[0]; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_RGB32: tools->histogram = 1; for(x=0,pos=0; x<width; x+=step,pos++) { int R,G,B,U,V; uint8_t *bptr = (uint8_t *)sbase; bptr += x * 4; R = bptr[2]; G = bptr[1]; B = bptr[0]; tools->histR[R]++; tools->histG[G]++; tools->histB[B]++; tools->waveR[pos][R]++; tools->waveG[pos][G]++; tools->waveB[pos][B]++; //Y = (((1499 * R) + (5030 * G) + (508 * B))>>13) + 16; if(scaledvectorscope) { U = ((((-672* R) - (2249 * G) + (2920* B))>>13)) + 128; //* 255.0/314.0 V = ((((3758* R) - (3416 * G) - (343 * B))>>13)) + 128; //* 255.0/244.0 } else { U = ((((-827* R) - (2769 * G) + (3596* B))>>13)) + 128; V = ((((3596* R) - (3269 * G) - (328 * B))>>13)) + 128; } if(U<0) U=0; if(U>255) U=255; if(V<0) V=0; if(V>255) V=255; tools->scopeUV[U][V]++; } break; case COLOR_FORMAT_BYR2: case COLOR_FORMAT_BYR4: //do nothing break; default: assert(0); #if (0 && DEBUG) fprintf(stderr,"decoder.HistogramLine: Unsupported pixel format\n"); #endif break; } } void GhostBust(DECODER *decoder, unsigned short *sbaseL, unsigned short *sbaseR, int width, int ileakL, int ileakR) { #if 1 int x,RL,GL,BL,RR,GR,BR; int nRL,nGL,nBL; int nRR,nGR,nBR; int max = 1024*1024-1; unsigned short *sqrttable = decoder->sqrttable; ileakL>>=6; ileakR>>=6; if(sqrttable == NULL) return; for(x=0;x<width;x++) { RL = sbaseL[0]>>6; GL = sbaseL[1]>>6; //10-bit BL = sbaseL[2]>>6; RL*=RL; GL*=GL; //20-bit BL*=BL; RR = sbaseR[0]>>6; GR = sbaseR[1]>>6; //10-bit BR = sbaseR[2]>>6; RR*=RR; GR*=GR; //20-bit BR*=BR; nRL = RL*(1023-ileakL) + ileakL*max - RR*ileakL; //30-bit nGL = GL*(1023-ileakL) + ileakL*max - GR*ileakL; nBL = BL*(1023-ileakL) + ileakL*max - BR*ileakL; nRL >>= 10; //20-bit nGL >>= 10; nBL >>= 10; if(nRL>max) nRL=max; if(nRL<0) nRL=0; if(nGL>max) nGL=max; if(nGL<0) nGL=0; if(nBL>max) nBL=max; if(nBL<0) nBL=0; if(sqrttable[nRL] == 65535) sqrttable[nRL] = (int)sqrt(nRL); if(sqrttable[nGL] == 65535) sqrttable[nGL] = (int)sqrt(nGL); if(sqrttable[nBL] == 65535) sqrttable[nBL] = (int)sqrt(nBL); sbaseL[0] = sqrttable[nRL]<<6; sbaseL[1] = sqrttable[nGL]<<6; sbaseL[2] = sqrttable[nBL]<<6; sbaseL += 3; nRR = RR*(1023-ileakR) + ileakR*max - RL*ileakR; //30-bit nGR = GR*(1023-ileakR) + ileakR*max - GL*ileakR; nBR = BR*(1023-ileakR) + ileakR*max - BL*ileakR; nRR >>= 10; //20-bit nGR >>= 10; nBR >>= 10; if(nRR>max) nRR=max; if(nRR<0) nRR=0; if(nGR>max) nGR=max; if(nGR<0) nGR=0; if(nBR>max) nBR=max; if(nBR<0) nBR=0; if(sqrttable[nRR] == 65535) sqrttable[nRR] = (int)sqrt(nRR); if(sqrttable[nGR] == 65535) sqrttable[nGR] = (int)sqrt(nGR); if(sqrttable[nBR] == 65535) sqrttable[nBR] = (int)sqrt(nBR); sbaseR[0] = sqrttable[nRR]<<6; sbaseR[1] = sqrttable[nGR]<<6; sbaseR[2] = sqrttable[nBR]<<6; sbaseR += 3; } #else // works and fast but has not image linearization, not as good __m128i *ptrL = (__m128i *)sbaseL; __m128i *ptrR = (__m128i *)sbaseR; __m128i t,L,R,nL,nR; int x,width8 = (width*3) & ~7; __m128i white_epi16 = _mm_set1_epi16(32767); __m128i leak_epi16 = _mm_set1_epi16(ileak>>1); __m128i oneNegLeak_epi16 = _mm_set1_epi16(32767-(ileak>>1)); for(x=0;x<width8;x+=8) { L = _mm_load_si128(ptrL); R = _mm_load_si128(ptrR); L = _mm_srli_epi16(L,1); //15-bit R = _mm_srli_epi16(R,1); //15-bit nL = _mm_mulhi_epi16(L, oneNegLeak_epi16); t = _mm_mulhi_epi16(white_epi16, leak_epi16); nL = _mm_adds_epi16(nL, t); t = _mm_mulhi_epi16(R, leak_epi16); nL = _mm_subs_epu16(nL, t); nR = _mm_mulhi_epi16(R, oneNegLeak_epi16); t = _mm_mulhi_epi16(white_epi16, leak_epi16); nR = _mm_adds_epi16(nR, t); t = _mm_mulhi_epi16(L, leak_epi16); nR = _mm_subs_epu16(nR, t); L = _mm_slli_epi16(nL,2); R = _mm_slli_epi16(nR,2); _mm_store_si128(ptrL++, L); _mm_store_si128(ptrR++, R); } #endif } void GhostBustRC(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR) { #if 1 int x,R,G,B; int nR,nG,nB; int max = 1024*1024-1; unsigned short *sqrttable = decoder->sqrttable; ileakL>>=6; ileakR>>=6; if(sqrttable == NULL) return; for(x=0;x<width;x++) { R = sbase[0]>>6; G = sbase[1]>>6; //10-bit B = sbase[2]>>6; R*=R; G*=G; //20-bit B*=B; nR = R*(1023-ileakL) + ileakL*max - ((G+B)>>1)*ileakL; //30-bit nG = G*(1023-ileakR) + ileakR*max - R*ileakR; nB = B*(1023-ileakR) + ileakR*max - R*ileakR; nR >>= 10; //20-bit nG >>= 10; nB >>= 10; if(nR>max) nR=max; if(nR<0) nR=0; if(nG>max) nG=max; if(nG<0) nG=0; if(nB>max) nB=max; if(nB<0) nB=0; if(sqrttable[nR] == 65535) sqrttable[nR] = (int)sqrt(nR); if(sqrttable[nG] == 65535) sqrttable[nG] = (int)sqrt(nG); if(sqrttable[nB] == 65535) sqrttable[nB] = (int)sqrt(nB); sbase[0] = sqrttable[nR]<<6; sbase[1] = sqrttable[nG]<<6; sbase[2] = sqrttable[nB]<<6; sbase += 3; } #elif 0 int x; float R,G,B; float nR,nG,nB; float fleakL = (float)ileakL / 65535.0; float fleakR = (float)ileakR / 65535.0; for(x=0;x<width;x++) { R = sbase[0]; G = sbase[1]; B = sbase[2]; R /= 65535.0; G /= 65535.0; B /= 65535.0; R *= R; G *= G; B *= B; nR = R*(1.0-fleakL) + fleakL - (G+B)*0.5*fleakL; nG = G*(1.0-fleakR) + fleakR - R*fleakR; nB = B*(1.0-fleakR) + fleakR - R*fleakR; if(nR<0) nR=0; if(nG<0) nG=0; if(nB<0) nB=0; nR = sqrt(nR); nG = sqrt(nG); nB = sqrt(nB); sbase[0] = nR * 65535.0; sbase[1] = nG * 65535.0; sbase[2] = nB * 65535.0; sbase += 3; } #elif 0 __m128i RGBRGB,rgb_epi32,RGB1,RGB2; __m128i zero_epi128 = _mm_setzero_si128(); int x,width6 = (width*3) / 6 * 6; __m128 white_ps = _mm_set1_ps(1.0); __m128 mul_neg_leak_ps = _mm_set_ps(1.0 - ((float)ileakL/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakR/65536.0), 1.0 - ((float)ileakL/65536.0)); __m128 leak_ps = _mm_set_ps((float)ileakL/65536.0, (float)ileakR/65536.0, (float)ileakR/65536.0, (float)ileakL/65536.0); __m128 scale_ps = _mm_set1_ps(65535.0); __m128 scalehalf_ps = _mm_set1_ps(32767.0); __m128 zero_ps = _mm_set1_ps(0.0); __m128 rgb_ps, alt_rgb_ps; __m128i sub_epi32; __m128 sub_ps; for(x=0;x<width6;x+=6) // two RGB pairs { int R,G,B; RGBRGB = _mm_loadu_si128((__m128i *)sbase); R = _mm_extract_epi16(RGBRGB, 0); G = _mm_extract_epi16(RGBRGB, 1); B = _mm_extract_epi16(RGBRGB, 2); G+=B; G>>=1; sub_epi32 = _mm_set_epi32(G,R,R,G); sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0 sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0 sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128); rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0 rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0 rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL; rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL; sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;] rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;] rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0; rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt() rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767 RGB1 = _mm_cvtps_epi32(rgb_ps); RGB1 = _mm_packs_epi32 (RGB1, zero_epi128); RGB1 = _mm_slli_si128(RGB1, 10); RGB1 = _mm_srli_si128(RGB1, 10); RGBRGB = _mm_srli_si128(RGBRGB, 6); R = _mm_extract_epi16(RGBRGB, 0); G = _mm_extract_epi16(RGBRGB, 1); B = _mm_extract_epi16(RGBRGB, 2); G+=B; G>>=1; sub_epi32 = _mm_set_epi32(G,R,R,G); sub_ps = _mm_cvtepi32_ps(sub_epi32); // range 0 to 65535.0 sub_ps = _mm_div_ps(sub_ps, scale_ps); // range 0 to 1.0 sub_ps = _mm_mul_ps(sub_ps, sub_ps); // square rgb_epi32 = _mm_unpacklo_epi16(RGBRGB, zero_epi128); rgb_ps = _mm_cvtepi32_ps(rgb_epi32); // range 0 to 65535.0 rgb_ps = _mm_div_ps(rgb_ps, scale_ps); // range 0 to 1.0 rgb_ps = _mm_mul_ps(rgb_ps, rgb_ps); // square rgb_ps = _mm_mul_ps(rgb_ps, mul_neg_leak_ps); // [R*(1.0-fleakL)] + fleakL - (G+B)*0.5*fleakL; rgb_ps = _mm_add_ps(rgb_ps, leak_ps); // R*(1.0-fleakL) [+ fleakL] - (G+B)*0.5*fleakL; sub_ps = _mm_mul_ps(sub_ps, leak_ps); // R*(1.0-fleakL) + fleakL - [(G+B)*0.5*fleakL;] rgb_ps = _mm_sub_ps(rgb_ps, sub_ps); // R*(1.0-fleakL) + fleakL] [- (G+B)*0.5*fleakL;] rgb_ps = _mm_max_ps(rgb_ps, zero_ps); // if(x < 0) x= 0; rgb_ps = _mm_sqrt_ps(rgb_ps); // sqrt() rgb_ps = _mm_mul_ps(rgb_ps, scalehalf_ps); // range 0 to 32767 RGB2 = _mm_cvtps_epi32(rgb_ps); RGB2 = _mm_packs_epi32 (RGB2, zero_epi128); RGB2 = _mm_slli_si128(RGB2, 6); RGB1 = _mm_adds_epi16(RGB1, RGB2); RGB1 = _mm_slli_epi16(RGB1, 1); RGB1 = _mm_slli_si128(RGB1, 4); RGB1 = _mm_srli_si128(RGB1, 4); RGBRGB = _mm_srli_si128(RGBRGB, 6); RGBRGB = _mm_slli_si128(RGBRGB, 12); RGBRGB = _mm_adds_epi16(RGB1, RGBRGB); _mm_storeu_si128((__m128i *)sbase, RGBRGB); sbase += 6; } #endif } void GhostBustAB(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR) { int x,R,G,B; int nR,nG,nB; int max = 1024*1024-1; unsigned short *sqrttable = decoder->sqrttable; ileakL>>=6; ileakR>>=6; if(sqrttable == NULL) return; for(x=0;x<width;x++) { R = sbase[0]>>6; G = sbase[1]>>6; //10-bit B = sbase[2]>>6; R*=R; G*=G; //20-bit B*=B; nR = R*(1023-ileakL) + ileakL*max - B*ileakL; nG = G*(1023-ileakL) + ileakL*max - B*ileakL; nB = B*(1023-ileakR) + ileakR*max - ((R+G)>>1)*ileakR; nR >>= 10; //20-bit nG >>= 10; nB >>= 10; if(nR>max) nR=max; if(nR<0) nR=0; if(nG>max) nG=max; if(nG<0) nG=0; if(nB>max) nB=max; if(nB<0) nB=0; if(sqrttable[nR] == 65535) sqrttable[nR] = (int)sqrt(nR); if(sqrttable[nG] == 65535) sqrttable[nG] = (int)sqrt(nG); if(sqrttable[nB] == 65535) sqrttable[nB] = (int)sqrt(nB); sbase[0] = sqrttable[nR]<<6; sbase[1] = sqrttable[nG]<<6; sbase[2] = sqrttable[nB]<<6; sbase += 3; } } void GhostBustGM(DECODER *decoder, unsigned short *sbase, int width, int ileakL, int ileakR) { int x,R,G,B; int nR,nG,nB; int max = 1024*1024-1; unsigned short *sqrttable = decoder->sqrttable; ileakL>>=6; ileakR>>=6; if(sqrttable == NULL) return; for(x=0;x<width;x++) { R = sbase[0]>>6; G = sbase[1]>>6; //10-bit B = sbase[2]>>6; R*=R; G*=G; //20-bit B*=B; nR = R*(1023-ileakL) + ileakL*max - G*ileakL; nG = G*(1023-ileakR) + ileakR*max - ((R+B)>>1)*ileakR; nB = B*(1023-ileakL) + ileakL*max - G*ileakL; nR >>= 10; //20-bit nG >>= 10; nB >>= 10; if(nR>max) nR=max; if(nR<0) nR=0; if(nG>max) nG=max; if(nG<0) nG=0; if(nB>max) nB=max; if(nB<0) nB=0; if(sqrttable[nR] == 65535) sqrttable[nR] = (int)sqrt(nR); if(sqrttable[nG] == 65535) sqrttable[nG] = (int)sqrt(nG); if(sqrttable[nB] == 65535) sqrttable[nB] = (int)sqrt(nB); sbase[0] = sqrttable[nR]<<6; sbase[1] = sqrttable[nG]<<6; sbase[2] = sqrttable[nB]<<6; sbase += 3; } } void ProcessLine3D(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *source_buffer, int source_pitch, int channel_offset, int y, int blank) { uint16_t *scratchline,*scratchline2,*scratchline3; uint16_t *sptr; uint16_t *srclineA,*srclineB; uint16_t *dstlineA,*dstlineB; int x,y2; int width = decoder->frame.width; int height = decoder->frame.height; int skip = 3; int sskip = 3; uint8_t *bptr1; uint8_t *bptr2; uint8_t *baseptr1; uint8_t *baseptr2; float windowMaskL = decoder->cfhddata.channel[0].FloatingWindowMaskL; float windowMaskR = decoder->cfhddata.channel[0].FloatingWindowMaskR; float frameTilt = decoder->cfhddata.channel[0].FrameTilt; float horizOffset = decoder->cfhddata.channel[1].HorizontalOffset; float horizOffsetR = decoder->cfhddata.channel[2].HorizontalOffset; float rotOffset = decoder->cfhddata.channel[1].RotationOffset; float rotOffsetR = decoder->cfhddata.channel[2].RotationOffset; float horizOffsetStep = 0; float horizOffsetStepR = 0; int flip1=0,flip2=0; int channel_flip = decoder->cfhddata.channel_flip; int source_pitch1 = source_pitch; int source_pitch2 = source_pitch; uint8_t *outputline = output+y*pitch; uint8_t *outputline2 = NULL; float horizOffsetBase; float rotOffsetBase; float horizOffsetBaseR; float rotOffsetBaseR; int formatdone = 0; float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX; float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX; //float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY; float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY; float zoom; float zoomR; float frameZoom1 = decoder->cfhddata.channel[1].FrameZoom; float frameZoom2 = decoder->cfhddata.channel[2].FrameZoom; float frameAutoZoom = decoder->cfhddata.channel[0].FrameAutoZoom; float frameDiffZoom1 = decoder->cfhddata.channel[1].FrameDiffZoom; float frameDiffZoom2 = decoder->cfhddata.channel[2].FrameDiffZoom; float frameHDynamic = decoder->cfhddata.FrameHDynamic; float frameHDynCenter = decoder->cfhddata.FrameHDynCenter; float frameHDynWidth = decoder->cfhddata.FrameHDynWidth; float frameHScale = decoder->cfhddata.FrameHScale; int alphachannel = 0; int whitepoint = 16; float blursharpenL = decoder->cfhddata.channel[1].user_blur_sharpen; float blursharpenR = decoder->cfhddata.channel[2].user_blur_sharpen; float vignette = decoder->cfhddata.channel[0].user_vignette_start; int flip_LR = 0; float vig_r1; float vig_r2; float vig_gain; if(blank) // blankline, no shifts required { windowMaskL = 0; windowMaskR = 0; frameTilt = 0; horizOffset = 0; horizOffsetR = 0; rotOffset = 0; rotOffsetR = 0; frameZoom1 = 1.0; frameZoom2 = 1.0; frameAutoZoom = 1.0; frameDiffZoom1 = 1.0; frameDiffZoom2 = 1.0; frameHScale = 1.0; frameHDynamic = 1.0; frameHDynCenter = 0.5; frameHDynWidth = 0.0; } if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A || decoder->StereoBufferFormat == DECODED_FORMAT_RGB32) alphachannel = 1; if(xmax == 0.0) xmax = 1.0; if(ymax == 0.0) ymax = 1.0; if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { width *= 2; } if(decoder->source_channels < 2) // 2D { channel_flip &= 0x3; channel_flip |= channel_flip<<2; decoder->cfhddata.channel_flip = channel_flip; } if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX) || decoder->frame.resolution == DECODED_RESOLUTION_QUARTER || decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY || decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED) { blursharpenL = 0.0; blursharpenR = 0.0; } if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION)) { horizOffset = rotOffset = 0; horizOffsetR = rotOffsetR = 0; frameTilt = 0; frameAutoZoom = 1.0; frameDiffZoom1 = 1.0; frameDiffZoom2 = 1.0; } if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS)) { channel_flip = 0; } if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) { horizOffset += decoder->cfhddata.FrameOffsetX; horizOffsetR -= decoder->cfhddata.FrameOffsetX; frameZoom1 += frameHScale - 1.0f; frameZoom2 += frameHScale - 1.0f; if(frameHDynamic != 1.0) { frameZoom1 += 0.00001f; frameZoom2 += 0.00001f; } if(vignette != 0.0) { float vig_diag = sqrtf(1.0f + ((float)decoder->frame.height / (float) decoder->frame.width) * ((float)decoder->frame.height / (float) decoder->frame.width)); vig_r1 = (vignette+1.0f); vig_r2 = (decoder->cfhddata.channel[0].user_vignette_end+1.0f); vig_gain = decoder->cfhddata.channel[0].user_vignette_gain; vig_r1 *= vig_diag; vig_r2 *= vig_diag; } } else { frameZoom1 = 1.0f; frameZoom2 = 1.0f; vignette = 0; } zoom = frameZoom1 * frameAutoZoom * frameDiffZoom1; if(frameDiffZoom2 != 0.0) zoomR = frameZoom2 * frameAutoZoom / frameDiffZoom2; else zoomR = 0.0; if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) { if(decoder->cfhddata.InvertOffset) { rotOffset = -rotOffset; rotOffsetR = -rotOffsetR; rotOffset -= decoder->cfhddata.FrameOffsetR; rotOffsetR -= -decoder->cfhddata.FrameOffsetR; } else { rotOffset += decoder->cfhddata.FrameOffsetR; rotOffsetR += -decoder->cfhddata.FrameOffsetR; } } rotOffsetBase = rotOffset; horizOffsetBase = horizOffset; rotOffsetBaseR = rotOffsetR; horizOffsetBaseR = horizOffsetR; horizOffset -= rotOffset * 0.5f; horizOffsetStep = rotOffset / (float)height; horizOffsetR -= rotOffsetR * 0.5f; horizOffsetStepR = rotOffsetR / (float)height; horizOffset += horizOffsetStep * y; horizOffsetR += horizOffsetStepR * y; assert(bufferremain >= width * 8 * 2 * 2); baseptr1 = source_buffer; baseptr2 = source_buffer + channel_offset; if(channel_flip & 0xf) { if(channel_flip & 1) { flip1 = 1; } if(channel_flip & 4) { flip2 = 1; } } if(source_pitch1 < 0) flip_LR = 1; decoder->sharpen_flip = 0; if(channel_flip & 2) //ProcessLine3D { if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1) { } else { baseptr1 += source_pitch1*(height-1); source_pitch1 = -source_pitch1; decoder->sharpen_flip = 1; } } if(channel_flip & 8) { if(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1) // right channel only (stored in baseptr1) { baseptr1 += source_pitch1*(height-1); source_pitch1 = -source_pitch1; decoder->sharpen_flip = 1; } else { baseptr2 += source_pitch2*(height-1); source_pitch2 = -source_pitch2; } } bptr1 = baseptr1 + y*source_pitch1; bptr2 = baseptr2 + y*source_pitch2; y2 = y; if(decoder->channel_blend_type == BLEND_FREEVIEW) //FreeView { if(y2 < height/4) { blank = 1; y2 = 0; } else { y2 -= height/4; y2 *= 2; if(y2 >= height-1) { blank = 1; y2 = height - 2; } } bptr1 = baseptr1 + y2*source_pitch1; bptr2 = baseptr2 + y2*source_pitch2; } srclineA = (uint16_t *)bptr1; srclineB = (uint16_t *)bptr2; scratchline = (uint16_t *)buffer; scratchline2 = (uint16_t *)(buffer + width * 6 + width) /* as we pad the line */ ;; scratchline3 = (uint16_t *)(buffer + width * 6*2 + width*2) /* as we pad the line */ ; if(alphachannel) { scratchline = (uint16_t *)buffer; scratchline2 = (uint16_t *)(buffer + width * 8 + width) /* as we pad the line */ ;; scratchline3 = (uint16_t *)(buffer + width * 8*2 + width*2) /* as we pad the line */ ; } dstlineA = sptr = scratchline; dstlineB = scratchline3; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RG64: whitepoint = 16; skip = 8; sskip = 4; break; case DECODED_FORMAT_W13A: whitepoint = 13; skip = 8; sskip = 4; break; case DECODED_FORMAT_WP13: whitepoint = 13; skip = 6; sskip = 3; break; case DECODED_FORMAT_RG48: skip = 6; sskip = 3; break; case DECODED_FORMAT_RGB32: skip = 4; break; case DECODED_FORMAT_RGB24: skip = 3; break; case DECODED_FORMAT_YUYV: skip = 2; break; } if(blank) { if(srclineA) memset(srclineA, 0, width*skip); if(srclineB && decoder->channel_decodes > 1) memset(srclineB, 0, width*skip); } if(blursharpenL != 0.0 || blursharpenR != 0.0) { if(decoder->channel_blend_type == BLEND_FREEVIEW || decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED ) { decoder->doVerticalFilter = 0; } else { decoder->doVerticalFilter = 1; } } { switch(decoder->channel_blend_type) { case BLEND_FREEVIEW: case BLEND_SIDEBYSIDE_ANAMORPHIC: //side by side if(!blank) { if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) { dstlineA = srclineA; sptr = dstlineA; if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(!alphachannel) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, srclineA, scratchline2, width/2, -horizOffset, flip1); RGBA64HoriShift(decoder, srclineB, scratchline2, width/2, horizOffsetR, flip2); } else { RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width/2, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width/2, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { int cwidth= width/2; if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC) cwidth= width; FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width/2, cwidth, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width/2, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip); memcpy(dstlineA+sskip*(width/2), srclineB, width/2*sskip*2); } else { int16_t *ptr; int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(!alphachannel) { if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } else { if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2); } else { RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { int cwidth= width/2; if(decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC) cwidth= width; FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width, cwidth, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineB, blursharpenR, decoder->frame.resolution, skip); dstlineA = srclineA; ptr = (int16_t *)srclineA; for(x=0; x<width/2; x++) { *ptr++ = (ptr1[0]+ptr1[3])>>1; *ptr++ = (ptr1[1]+ptr1[4])>>1; *ptr++ = (ptr1[2]+ptr1[5])>>1 ; ptr1+=sskip*2; } for(; x<width; x++) { *ptr++ = (ptr2[0]+ptr2[3])>>1; *ptr++ = (ptr2[1]+ptr2[4])>>1; *ptr++ = (ptr2[2]+ptr2[5])>>1; ptr2+=sskip*2; } } if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, dstlineA, width/2, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, dstlineA, width/2, 0, windowMaskL); if(xmin) { RGB48WindowMask(decoder, dstlineA, width/2, 1, xmin); } } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 1, windowMaskR); if(xmin) { RGB48WindowMask(decoder, dstlineA+width*sskip/2, width/2, 0, xmin); } } if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { GhostBust(decoder, dstlineA, dstlineA+width*sskip/2, width/2, decoder->ghost_bust_left, decoder->ghost_bust_right); } } if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { memcpy(scratchline2+width*sskip/2, dstlineA, width*sskip*2/2); memcpy(dstlineA, dstlineA+width*sskip/2, width*sskip*2/2); memcpy(dstlineA+width*sskip/2, scratchline2+width*sskip/2, width*sskip*2/2); } } break; case BLEND_STACKED_ANAMORPHIC: //stacked case BLEND_LINE_INTERLEAVED: //fields if((y & 1) == 1) return; if(!blank) { uint16_t *ptrA1 = (uint16_t *)srclineA; uint16_t *ptrA2 = (uint16_t *)srclineA + (source_pitch1>>1); uint16_t *ptrB1 = (uint16_t *)srclineB; uint16_t *ptrB2 = (uint16_t *)srclineB + (source_pitch2>>1); FastBlendWP13((short *)ptrA1, (short *)ptrA2, (short *)ptrA1/*output*/, width*skip); FastBlendWP13((short *)ptrB1, (short *)ptrB2, (short *)ptrB1/*output*/, width*skip); if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt) { if(!alphachannel) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2); } else { RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip); if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, srclineA, width, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); if(xmin) { RGB48WindowMask(decoder, srclineA, width, 1, xmin); } } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, srclineB, width, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR); if(xmin) { RGB48WindowMask(decoder, srclineB, width, 0, xmin); } } if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right); } } if(decoder->doVerticalFilter == 0) { if(decoder->channel_blend_type==BLEND_STACKED_ANAMORPHIC) //stacked { if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { outputline2 = output+(y>>1)*pitch; outputline = output+((y>>1)+(height/2))*pitch; } else { outputline = output+(y>>1)*pitch; outputline2 = output+((y>>1)+(height/2))*pitch; } } else //fields { if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { outputline = output+(y)*pitch; outputline2 = output+(y+1)*pitch; } else { outputline2 = output+(y)*pitch; outputline = output+(y+1)*pitch; } } if(flip_LR/*source_pitch1 < 0*/) // flip Left and Right { uint8_t *tmp = outputline2; outputline2 = outputline; outputline = tmp; } } else { if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { memcpy(scratchline2, srclineA, width*skip); memcpy(srclineA, srclineB, width*skip); memcpy(srclineB, scratchline2, width*skip); } } } break; case BLEND_ONION: //onion case BLEND_DIFFERENCE: //difference case BLEND_SPLITVIEW: //splitView if(!blank) { //dstlineA = source_buffer; //dstlineA += (source_pitch>>1) * y; sptr = dstlineA = srclineA; srclineA = (uint16_t *)bptr1; srclineB = (uint16_t *)bptr2; if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt) { if(!alphachannel) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); RGBA64HoriShift(decoder, srclineB, scratchline2, width, horizOffsetR, flip2); } else { RGBA64HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip); if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, srclineA, width, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); if(xmin) { RGB48WindowMask(decoder, srclineA, width, 1, xmin); } } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, srclineB, width, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR); if(xmin) { RGB48WindowMask(decoder, srclineB, width, 0, xmin); } } x = 0; if(decoder->channel_blend_type == BLEND_SPLITVIEW) //split view { int xsplit = width * (decoder->cfhddata.split_pos_xy & 0xff) / 255; for(x = xsplit*sskip; x<width*sskip; x++) { srclineA[x] = srclineB[x]; } } else if(decoder->channel_blend_type == BLEND_ONION) //onion { FastBlendWP13((short *)srclineA, (short *)srclineB, (short *)dstlineA/*output*/, width*skip); } else if(decoder->channel_blend_type == BLEND_DIFFERENCE) //difference { #if XMMOPT int width8 = (width*sskip) & 0xfff8; __m128i mid_epi16; //int unaligned = ((int)sbase) & 15; //unaligned += ((int)in_rgb8) & 15; if(whitepoint == 13) mid_epi16 = _mm_set1_epi16(0x0fff); else mid_epi16 = _mm_set1_epi16(0x1fff); for(x=0; x<width8; x+=8) { __m128i rgb16A = _mm_load_si128((__m128i *)&srclineA[x]); __m128i rgb16B = _mm_load_si128((__m128i *)&srclineB[x]); // 0 to 0xffff if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { rgb16A = _mm_subs_epi16(rgb16B, rgb16A); // -3fff to 3fff } else { rgb16A = _mm_subs_epi16(rgb16A, rgb16B); } rgb16A = _mm_adds_epi16(rgb16A, mid_epi16); // -0x1fff to 0x5fff , avg 0x1fff _mm_store_si128((__m128i *)&dstlineA[x], rgb16A); } #endif for(; x<width*sskip; x++) { int val; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { val = (srclineB[x] - srclineA[x]) + 32768; } else { val = (srclineA[x] - srclineB[x]) + 32768; } if(val > 0x7fff) val = 0x7fff; if(val < 0) val = 0; dstlineA[x] = val; } } } break; case BLEND_ANAGLYPH_RC: case BLEND_ANAGLYPH_RC_BW: case BLEND_ANAGLYPH_AB: case BLEND_ANAGLYPH_AB_BW: case BLEND_ANAGLYPH_GM: case BLEND_ANAGLYPH_GM_BW: case BLEND_ANAGLYPH_DUBOIS: //Optimized { uint16_t *sptr1 = scratchline2; uint16_t *sptr2 = scratchline3; dstlineA = (uint16_t *)bptr1; // dstlineA += (source_pitch>>1) * y; sptr = dstlineA; sptr1 = srclineA = (uint16_t *)bptr1; sptr2 = srclineB = (uint16_t *)bptr2; if(zoom != 1.0 || zoomR != 1.0 || horizOffset || horizOffsetR || channel_flip || frameTilt) { if(!alphachannel) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGB48HoriShift(decoder, srclineA, scratchline, width, -horizOffset, flip1); RGB48HoriShift(decoder, srclineB, scratchline, width, horizOffsetR, flip2); } else { RGB48HoriShiftZoom(decoder, srclineA, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGB48HoriShiftZoom(decoder, srclineB, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) { RGBA64HoriShift(decoder, scratchline2, scratchline, width, -horizOffset, flip1); RGBA64HoriShift(decoder, scratchline3, scratchline, width, horizOffsetR, flip2); } else { RGBA64HoriShiftZoom(decoder, scratchline2, scratchline, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); RGBA64HoriShiftZoom(decoder, scratchline3, scratchline, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } if(vignette != 0.0) { FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineA, decoder->frame.resolution, skip); FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (short *)srclineB, decoder->frame.resolution, skip); } if(blursharpenL != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineA, blursharpenL, decoder->frame.resolution, skip); if(blursharpenR != 0.0) FastSharpeningBlurHinplaceWP13(width, (short *)srclineB, blursharpenR, decoder->frame.resolution, skip); if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right); } } if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, srclineA, width, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); if(xmin) { RGB48WindowMask(decoder, srclineA, width, 1, xmin); } } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, srclineB, width, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR); if(xmin) { RGB48WindowMask(decoder, srclineB, width, 0, xmin); } } if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { uint16_t *tmp = srclineA; srclineA = srclineB; srclineB = tmp; } switch(decoder->channel_blend_type) { case BLEND_ANAGLYPH_RC: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { sptr[0] = ptr2[0]; sptr[1] = ptr1[1]; sptr[2] = ptr1[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { sptr[0] = ptr1[0]; sptr[1] = ptr2[1]; sptr[2] = ptr2[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_RC_BW: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y2; sptr[1] = y1; sptr[2] = y1; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y1; sptr[1] = y2; sptr[2] = y2; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_AB: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { sptr[0] = ptr2[0]; sptr[1] = ptr2[1]; sptr[2] = ptr1[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { sptr[0] = ptr1[0]; sptr[1] = ptr1[1]; sptr[2] = ptr2[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_AB_BW: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y2; sptr[1] = y2; sptr[2] = y1; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y1; sptr[1] = y1; sptr[2] = y2; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_GM: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { sptr[0] = ptr1[0]; sptr[1] = ptr2[1]; sptr[2] = ptr1[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { sptr[0] = ptr2[0]; sptr[1] = ptr1[1]; sptr[2] = ptr2[2]; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_GM_BW: { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; if(decoder->channel_swapped_flags & FLAG3D_SWAPPED) { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y1; sptr[1] = y2; sptr[2] = y1; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } else { for(x=0; x<width; x++) { int y1 = (ptr1[0]*5+ptr1[1]*10+ptr1[2])>>4; int y2 = (ptr2[0]*5+ptr2[1]*10+ptr2[2])>>4; sptr[0] = y2; sptr[1] = y1; sptr[2] = y2; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } } break; case BLEND_ANAGLYPH_DUBOIS: //Optimized { int16_t *ptr1 = (int16_t *)srclineA; int16_t *ptr2 = (int16_t *)srclineB; int r,g,b; for(x=0; x<width; x++) { r =(ptr1[0]*456 + ptr1[1]*500 + ptr1[2]*176 + ptr2[0]*-43 + ptr2[1]*-88 + ptr2[2]*-2 ) / 1000; g =(ptr1[0]*-40 + ptr1[1]*-38 + ptr1[2]*-16 + ptr2[0]*378 + ptr2[1]*734 + ptr2[2]*-18 ) / 1000; b =(ptr1[0]*-15 + ptr1[1]*-21 + ptr1[2]*-5 + ptr2[0]*-72 + ptr2[1]*-113+ ptr2[2]*1226) / 1000; if(r<0) r=0; if(r>0x3fff) r=0x3fff; if(g<0) g=0; if(g>0x3fff) g=0x3fff; if(b<0) b=0; if(b>0x3fff) b=0x3fff; sptr[0] = r; sptr[1] = g; sptr[2] = b; ptr1 += sskip; ptr2 += sskip; sptr += sskip; } } break; } } break; case BLEND_NONE: default: if(decoder->channel_decodes == 1) // only one channel { if(skip == 8) { //the data is already in the correct format sptr = (unsigned short *)bptr1; // shift if needed. if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(decoder->channel_current == 0) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGBA64HoriShift(decoder, sptr, scratchline2, width, -horizOffset, flip1); else RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGBA64HoriShift(decoder, sptr, scratchline2, width, horizOffsetR, flip2); else RGBA64HoriShiftZoom(decoder, sptr, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } } else if(skip == 6) { //the data is already in the correct format dstlineA = sptr = (unsigned short *)srclineA; // shift if needed. if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(decoder->channel_current == 0) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); else RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); } else { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGB48HoriShift(decoder, srclineA, scratchline2, width, horizOffsetR, flip2); else RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } } if(vignette != 0.0) { FastVignetteInplaceWP13(decoder, width, width, height, y, vig_r1, vig_r2, vig_gain, (int16_t *)srclineA, decoder->frame.resolution, skip); } if(decoder->channel_current == 0) { if(blursharpenL != 0.0) { FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenL, decoder->frame.resolution, skip); } } else { if(blursharpenR != 0.0) { FastSharpeningBlurHinplaceWP13(width, (int16_t *)srclineA, blursharpenR, decoder->frame.resolution, skip); } } } if ((windowMaskL && decoder->channel_current == 0) || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; if(decoder->channel_current != 0) mask = xmin; if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); RGB48WindowMask(decoder, srclineA, width, 0, mask); } if ((windowMaskR && decoder->channel_current == 1) || (1.0f-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); if(decoder->channel_current != 1) mask = (1.0f-xmax); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineA, width, 1, windowMaskR); RGB48WindowMask(decoder, srclineA, width, 1, mask); } } else { outputline2 = output+(y+height)*pitch; if(zoom != 1.0 || zoomR != 1.0 || horizOffsetR || horizOffset || channel_flip || frameTilt) { if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGB48HoriShift(decoder, srclineA, scratchline2, width, -horizOffset, flip1); else RGB48HoriShiftZoom(decoder, srclineA, scratchline2, width, height, y, -horizOffsetBase, rotOffsetBase, zoom, flip1, frameTilt, 0); if(zoom == 1.0 && zoomR == 1.0 && frameTilt == 0.0) RGB48HoriShift(decoder, srclineB, scratchline2, width, horizOffset, flip2); else RGB48HoriShiftZoom(decoder, srclineB, scratchline2, width, height, y, horizOffsetBaseR, -rotOffsetBaseR, zoomR, flip2, frameTilt, 1); } if(windowMaskL || xmin) { float mask = windowMaskL > xmin ? windowMaskL : xmin; RGB48WindowMask(decoder, srclineA, width, 0, mask); if(windowMaskL < 0) RGB48WindowMask(decoder, srclineA, width, 0, windowMaskL); } if(windowMaskR || (1.0-xmax)) { float mask = windowMaskR > (1.0f-xmax) ? windowMaskR : (1.0f-xmax); RGB48WindowMask(decoder, srclineB, width, 1, mask); if(windowMaskR < 0) RGB48WindowMask(decoder, srclineB, width, 1, windowMaskR); } if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { GhostBust(decoder, srclineA, srclineB, width, decoder->ghost_bust_left, decoder->ghost_bust_right); } } } break; } } if(!formatdone) { int flags = ACTIVEMETADATA_PRESATURATED; int whitebitdepth = 16; if(decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A) { flags = 0; whitebitdepth = 13; } if(outputline2) { // if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools) // HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth); if(decoder->doVerticalFilter == 0) // No sharp stage so output now { if(alphachannel) Convert4444LinesToOutput(decoder, width, 1, y, srclineA, outputline, pitch, decoder->frame.format, whitebitdepth, flags); else ConvertLinesToOutput(decoder, width, 1, y, srclineA, outputline, pitch, decoder->frame.format, whitebitdepth, flags); //if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools) // HistogramLine(decoder, dstlineA, width, DECODED_FORMAT_RG48, whitebitdepth); if(alphachannel) Convert4444LinesToOutput(decoder, width, 1, y, srclineB, outputline2, pitch, decoder->frame.format, whitebitdepth, flags); else ConvertLinesToOutput(decoder, width, 1, y, srclineB, outputline2, pitch, decoder->frame.format, whitebitdepth, flags); } } else { //if(decoder->cfhddata.ComputeFlags&2 && (0 == (y&3)) && decoder->tools) //{ // if(alphachannel) // HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG64, whitebitdepth); // else // HistogramLine(decoder, srclineA, width, DECODED_FORMAT_RG48, whitebitdepth); //} if(decoder->doVerticalFilter == 0) // No sharp stage so output now { if(alphachannel) Convert4444LinesToOutput(decoder, width, 1, y, srclineA, outputline, pitch, decoder->frame.format, whitebitdepth, flags); else ConvertLinesToOutput(decoder, width, 1, y, srclineA, outputline, pitch, decoder->frame.format, whitebitdepth, flags); } } } } void SharpenLine(DECODER *decoder, uint8_t *buffer, int bufferremain, uint8_t *output, int pitch, uint8_t *local_output, int local_pitch, int channel_offset, int y, int thread_index) { uint16_t *sbase;//*sbase2 = NULL; int width = decoder->frame.width; int height = decoder->frame.height; int skip = 3; //int flip1=0;//flip2=0; int channel_flip = decoder->cfhddata.channel_flip; //int local_pitch1 = local_pitch; //int local_pitch2 = local_pitch; uint8_t *outputline = output+y*pitch; //uint8_t *outputline2 = NULL; short *scratch; //int formatdone = 0; //float xmin = decoder->cfhddata.channel[0].FrameMask.topLftX; //float xmax = decoder->cfhddata.channel[0].FrameMask.topRgtX; //float ymin = decoder->cfhddata.channel[0].FrameMask.topLftY; //float ymax = decoder->cfhddata.channel[0].FrameMask.botLftY; int alphachannel = 0; float blursharpen = 0; int line_max = decoder->frame.height; int yy = y; if(decoder->channel_current == 0) blursharpen = decoder->cfhddata.channel[1].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen else blursharpen = decoder->cfhddata.channel[2].user_blur_sharpen; // TODO LEFT and RIGHT separate vertical sharpen if(!(decoder->cfhddata.process_path_flags & PROCESSING_COLORMATRIX)|| decoder->frame.resolution == DECODED_RESOLUTION_QUARTER || decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY || decoder->frame.resolution == DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED) { blursharpen = 0.0; } if(decoder->channel_mix_half_res == 1) line_max *= 2; if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS)) { channel_flip = 0; } if(decoder->sharpen_flip) //SharpenLine { //if(!(decoder->channel_blend_type == BLEND_NONE && decoder->channel_current == 1)) // right channel only (stored in baseptr1) { yy = (line_max - 1 - y); outputline = output+yy*pitch; } } if( decoder->StereoBufferFormat == DECODED_FORMAT_RG64 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A || decoder->StereoBufferFormat == DECODED_FORMAT_RGB32) alphachannel = 1; if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { width *= 2; } sbase = (uint16_t *)local_output; sbase += (local_pitch>>1) * y; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RG64: case DECODED_FORMAT_W13A: skip = 8; break; case DECODED_FORMAT_WP13: skip = 6; break; case DECODED_FORMAT_RG48: skip = 6; break; case DECODED_FORMAT_RGB32: skip = 4; break; case DECODED_FORMAT_RGB24: skip = 3; break; case DECODED_FORMAT_YUYV: skip = 2; break; } scratch = (short*)(buffer + width * skip * thread_index); { int flags = ACTIVEMETADATA_PRESATURATED; int whitebitdepth = 16; if((decoder->StereoBufferFormat == DECODED_FORMAT_WP13 || decoder->StereoBufferFormat == DECODED_FORMAT_W13A)) { int use_pitch = local_pitch; int edgeclose = 0; flags = 0; whitebitdepth = 13; if(blursharpen != 0.0 && local_pitch != 0) { short *Aptr,*Bptr,*Cptr,*Dptr,*Eptr; switch(decoder->channel_blend_type) { case BLEND_STACKED_ANAMORPHIC: sbase = (uint16_t *)local_output; sbase += (local_pitch>>1) * y * 2; if(y<=4) edgeclose = 1; if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase; if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase; Cptr = (short *)sbase; if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase; if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase; if(y>=height-4) edgeclose = 1; use_pitch = local_pitch * 2; break; case BLEND_LINE_INTERLEAVED: sbase = (uint16_t *)local_output; if(y & 1) { y--; sbase += (local_pitch>>1) * y; } else { sbase += (local_pitch>>1) * y; sbase += channel_offset>>1; } if(y<=8) edgeclose = 1; if(y>=4) Aptr = (short *)sbase - (local_pitch>>1) * 4; else Aptr = (short *)sbase; if(y>=2) Bptr = (short *)sbase - (local_pitch>>1) * 2; else Bptr = (short *)sbase; Cptr = (short *)sbase; if(y<height-2) Dptr = (short *)sbase + (local_pitch>>1) * 2; else Dptr = (short *)sbase; if(y<height-4) Eptr = (short *)sbase + (local_pitch>>1) * 4; else Eptr = (short *)sbase; if(y>=height-8) edgeclose = 1; use_pitch = local_pitch * 2; break; default: if(y<=4) edgeclose = 1; if(y>=2) Aptr = (short *)sbase - (local_pitch>>1) * 2; else Aptr = (short *)sbase; if(y>=1) Bptr = (short *)sbase - (local_pitch>>1) * 1; else Bptr = (short *)sbase; Cptr = (short *)sbase; if(y<height-1) Dptr = (short *)sbase + (local_pitch>>1) * 1; else Dptr = (short *)sbase; if(y<height-2) Eptr = (short *)sbase + (local_pitch>>1) * 2; else Eptr = (short *)sbase; if(y>=height-4) edgeclose = 1; use_pitch = local_pitch; break; } if(skip == 8) { FastSharpeningBlurVW13A(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose, scratch, width, blursharpen, decoder->frame.resolution, decoder->channel_blend_type); } else { FastSharpeningBlurVWP13(Aptr, Bptr, Cptr, Dptr, Eptr, use_pitch, edgeclose, scratch, width, blursharpen, decoder->frame.resolution, decoder->channel_blend_type); } sbase = (uint16_t *)scratch; } } if(alphachannel) Convert4444LinesToOutput(decoder, width, 1, y, sbase, outputline, pitch, decoder->frame.format, whitebitdepth, flags); else ConvertLinesToOutput(decoder, width, 1, y, sbase, outputline, pitch, decoder->frame.format, whitebitdepth, flags); } } #if _GRAPHICS void PaintFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format) { int x,y,v,width, height; int maxR=0,maxG=0,maxB=0; width = decoder->frame.width; height = decoder->frame.height; if(decoder->cfhddata.BurninFlags == 0) return; if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1) // tools { if(decoder->tools == NULL) { #if _ALLOCATOR decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle)); #else decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle)); #endif if(decoder->tools) { memset(decoder->tools, 0, sizeof(ToolsHandle)); } else { return; } } } decoder->frame.output_format = output_format; #if _THREADED && 1 if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & ~1 && decoder->tools) // histogram/scopes/waveform { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits; #if _DELAY_THREAD_START if(decoder->tools->histogram == 0 && decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif { int avgR=0,avgG=0,avgB=0; // Post a message to the mailbox mailbox->output = output; if(height >= 1080) { mailbox->pitch = pitch*4; // only read every 4th scan line workunits = height/4; // only read every 4th scan line } else if(height >= 540) { mailbox->pitch = pitch*2; // only read every 2th scan line workunits = height/2; // only read every 2th scan line } else { mailbox->pitch = pitch; // read every scan line workunits = height; // read every scan line } if(decoder->tools->histogram == 0) { mailbox->jobType = JOB_TYPE_HISTOGRAM; // histogram // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } for(x=0;x<256;x++) { avgR += decoder->tools->histR[x]; avgG += decoder->tools->histG[x]; avgB += decoder->tools->histB[x]; //if(maxR < decoder->histR[x]) maxR = decoder->histR[x]; //if(maxG < decoder->histG[x]) maxG = decoder->histG[x]; //if(maxB < decoder->histB[x]) maxB = decoder->histB[x]; } avgR /= 256; avgG /= 256; avgB /= 256; //maxR++; //maxG++; //maxB++; decoder->tools->maxR = avgR*3;//maxR; decoder->tools->maxG = avgG*3;//maxG; decoder->tools->maxB = avgB*3;//maxB; } } #endif if(decoder->cfhddata.BurninFlags && DrawOpen(decoder)) { if(decoder->cfhddata.BurninFlags & 3) // overlays / tools { #if _THREADED //DrawInit(decoder); //DrawStartThreaded(decoder); if(decoder->draw_thread.pool.thread_count > 0) { DrawWaitThreaded(decoder); } else #endif { DrawInit(decoder); DrawMetadataObjects(decoder); } } else { DrawInit(decoder); } if(decoder->drawSafeMarkers) DrawSafeMarkers(decoder); if(decoder->cfhddata.BurninFlags & 2) // tools { if(decoder->tools) { if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 16) DrawGrid(decoder, 0/*decoder->MDPcurrent.parallax*/); if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 2) DrawHistogram(decoder, 0/*decoder->MDPcurrent.parallax*/); if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 4) DrawWaveform(decoder, 0/*decoder->MDPcurrent.parallax*/); if(decoder->tools->histogram && decoder->cfhddata.ComputeFlags & 8) DrawVectorscope(decoder, 0/*decoder->MDPcurrent.parallax*/); } } DrawScreen(decoder, output, pitch, output_format); } #if 0 #if _THREADED && 1 if(decoder->cfhddata.BurninFlags & 2 && decoder->cfhddata.ComputeFlags & 2 && decoder->tools) // histogram { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits; int targetW, targetH; if(width < 256 || height < 256) return; targetW = width / 4; targetH = height / 8; mailbox->output = output; mailbox->pitch = pitch; workunits = targetW; mailbox->jobType = JOB_TYPE_BURNINS; // burnin // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else if(decoder->histogram == 0) { for(y=0; y<height; y+=4) { uint8_t *bptr = output; bptr += pitch * y; HistogramLine(decoder, (unsigned short *)bptr, width, output_format); if(decoder->histogram == 0) return; // don't know how to create Histogram for that format } } for(x=1;x<255;x++) { if(maxR < decoder->histR[x]) maxR = decoder->histR[x]; if(maxG < decoder->histG[x]) maxG = decoder->histG[x]; if(maxB < decoder->histB[x]) maxB = decoder->histB[x]; } maxR++; maxG++; maxB++; decoder->maxR = maxR; decoder->maxG = maxG; decoder->maxB = maxB; for(x=0; x<targetW; x++) { HistogramRender(decoder, output, pitch, output_format, x, targetW, targetH); } #endif #endif if(decoder->tools) memset(decoder->tools, 0, sizeof(ToolsHandle)); } #endif extern int geomesh_alloc_cache(void *gm); #define DEG2RAD(d) (PI*(d)/180.0f) #define RAD2DEG(r) (180.0f*(r)/PI) bool approx_equal(int x, int y) { if(y > 1080) { x >>= 6; y >>= 6; } else if(y > 540) { x >>= 5; y >>= 5; } else { x >>= 4; y >>= 4; } if(x == y || x+1 == y || x == y+1) return true; return false; } bool approx_equal_float(float x, float y) { if (x*0.99 < y && y < x*1.01) return true; return false; } #if WARPSTUFF void WarpFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format) { int width, height; //int maxR = 0, maxG = 0, maxB = 0; int status = WARPLIB_SUCCESS; CFHDDATA *cfhddata = &decoder->cfhddata; int backgroundfill = cfhddata->lensFill; float sensorcrop = 1.0; float phi, theta, rho; int srcLens = HERO4; if (!cfhddata->doMesh) return; if (decoder->lastLensOffsetX != cfhddata->LensOffsetX || decoder->lastLensOffsetY != cfhddata->LensOffsetY || decoder->lastLensOffsetZ != cfhddata->LensOffsetZ || decoder->lastLensOffsetR != cfhddata->LensOffsetR || decoder->lastLensZoom != cfhddata->LensZoom || decoder->lastLensFishFOV != cfhddata->LensFishFOV || decoder->lastLensGoPro != cfhddata->lensGoPro || decoder->lastLensSphere != cfhddata->lensSphere || decoder->lastLensFill != cfhddata->lensFill || decoder->lastLensStyleSel != cfhddata->lensStyleSel || memcmp(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC)) || memcmp(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)) ) { if (decoder->mesh) geomesh_destroy(decoder->mesh); width = decoder->frame.width; height = decoder->frame.height; if (approx_equal(width, height * 2)) // approx. 2:1 { float outputaspect = 16.0f/9.0f; srcLens = EQUIRECT; sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image. if (cfhddata->lensCustomSRC[1]) { outputaspect = cfhddata->lensCustomSRC[0] / cfhddata->lensCustomSRC[1]; if (outputaspect >= 1.0f && outputaspect <= 3.0f) { //float sourceratio = (float)width / (float)height; if (approx_equal_float(outputaspect, 4.0f / 3.0f)) sensorcrop = sqrtf((float)(width*width + height*height)) / sqrtf((float)((width * 2 / 3)*(width * 2 / 3) + (height*height))); if (approx_equal_float(outputaspect, 16.0f / 9.0f)) // 0.88; sensorcrop = 1.00623f; // Fixes the slight calculation error difference between 16x9 with a 4x3, and 16x9 within a 2x1 image. } } if (width >= 2496) decoder->mesh = geomesh_create(199, 99); else if (width >= 1272) decoder->mesh = geomesh_create(99, 49); else decoder->mesh = geomesh_create(49, 25); phi = cfhddata->LensOffsetX * DEG2RAD(720.0f); // +-180deg HFOV for 2:1 theta = cfhddata->LensOffsetY * DEG2RAD(720.0f); // +-180deg VFOV for 2:1 rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg } else if (approx_equal(width * 3, height * 4)) // approx. 4:3 { srcLens = HERO4; sensorcrop = 1.0; if (width > 2880) // UHD decoder->mesh = geomesh_create(159, 119); else if (width >= 1920) //HD/2.7K decoder->mesh = geomesh_create(79, 59); else decoder->mesh = geomesh_create(39, 29); phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60deg HFOV for 16:9 theta = cfhddata->LensOffsetY * DEG2RAD(98.0f); // +-49deg VFOV for 16:9 rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg } else //if(approx_equal(width*9,height*16)) // approx. 16:9 { srcLens = HERO4; sensorcrop = sqrtf(1920 * 1920 + 1080 * 1080) / sqrtf(2000 * 2000 + 1500 * 1500); // 3840x2160 from 4000x3000 if (width > 2880) // UHD decoder->mesh = geomesh_create(159, 119); else if (width >= 1920) //HD/2.7K decoder->mesh = geomesh_create(79, 59); else decoder->mesh = geomesh_create(39, 29); phi = cfhddata->LensOffsetX * DEG2RAD(120.0f); // +-60.1deg HFOV for 16:9 theta = cfhddata->LensOffsetY * DEG2RAD(70.0f); // +-34.75deg VFOV for 16:9 rho = (cfhddata->LensOffsetZ - 1.0f)*4.0f* DEG2RAD(360.0f); // +-360deg } if ((output_format & 0x7fffffff) == COLOR_FORMAT_YUYV) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_YUY2, width, height, pitch, WARPLIB_FORMAT_YUY2, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RGB32) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_32BGRA, width, height, pitch, WARPLIB_FORMAT_32BGRA, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_W13A) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_W13A, width, height, pitch, WARPLIB_FORMAT_W13A, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_WP13) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_WP13, width, height, pitch, WARPLIB_FORMAT_WP13, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_RG48) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_RG48, width, height, pitch, WARPLIB_FORMAT_RG48, backgroundfill); else if ((output_format & 0x7fffffff) == COLOR_FORMAT_BGRA64) status |= geomesh_init(decoder->mesh, width, height, pitch, WARPLIB_FORMAT_64ARGB, width, height, pitch, WARPLIB_FORMAT_64ARGB, backgroundfill); else assert(0); if (cfhddata->lensSphere == 1) { if (cfhddata->lensGoPro != 2) // not outputting EQUIRECT { if (cfhddata->LensOffsetR != 0.0) { //float angle = 360.0 * asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159); float angle = 360.0f * cfhddata->LensOffsetR * cfhddata->LensOffsetR * 2.1f;//asinf(cfhddata->LensOffsetR * 1.7777777777) / (2.0 * 3.14159); if (cfhddata->LensOffsetR < 0.0) angle = -angle; geomesh_transform_rotate(decoder->mesh, angle); } if (cfhddata->LensZoom != 1.0) geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom); if (cfhddata->LensFishFOV != 0.0) // DeFish { float fov = cfhddata->LensFishFOV;// *180.0; if (fov > 89.9f) fov = 89.9f; if (fov < -89.9f) fov = -89.9f; if (fov) status |= geomesh_transform_defish(decoder->mesh, fov); } } switch (cfhddata->lensGoPro) { case 0: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, RECTILINEAR); break; case 1: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, HERO4); break; case 2: geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, srcLens, EQUIRECT); break; case 4: geomesh_set_custom_lens(decoder->mesh, cfhddata->lensCustomSRC, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)); if (srcLens == EQUIRECT) geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, EQUIRECT, CUSTOM_LENS); else geomesh_transform_repoint_src_to_dst(decoder->mesh, sensorcrop, phi, theta, rho, CUSTOM_LENS, CUSTOM_LENS); break; } } else // old boring geometry { if (cfhddata->LensZoom != 1.0) geomesh_transform_scale(decoder->mesh, cfhddata->LensZoom, cfhddata->LensZoom); // basic orthographic moves if (cfhddata->LensOffsetX != 0.0 || cfhddata->LensOffsetY != 0.0) geomesh_transform_pan(decoder->mesh, cfhddata->LensOffsetX*(float)width, -cfhddata->LensOffsetY*(float)height); if (cfhddata->LensOffsetR != 0.0) { float angle = 360.0f * asinf(cfhddata->LensOffsetR * 1.7777777777f) / (2.0f * 3.14159f); geomesh_transform_rotate(decoder->mesh, angle); } if (cfhddata->lensGoPro == 0) //Rectilear status |= geomesh_transform_gopro_to_rectilinear(decoder->mesh, sensorcrop); //status |= geomesh_fisheye_gopro_adjustmesh(mesh, &correction_mode, WARPLIB_ALGORITHM_PRESERVE_EVERYTHING,//WARPLIB_ALGORITHM_BEST_FIT, // width, height, product, model, lens_type, fov, (int)decoder->frame.resolution); } geomesh_alloc_cache(decoder->mesh); // required for JOB_TYPE_WARP_CACHE if (status == WARPLIB_SUCCESS) { if (decoder->lens_correct_buffer == NULL) { #if _ALLOCATOR decoder->lens_correct_buffer = (int *)Alloc(decoder->allocator, pitch * height); #else decoder->lens_correct_buffer = (int *)MEMORY_ALLOC(pitch * height); #endif } } else { return; } /* need resources? { if(decoder->tools == NULL) { #if _ALLOCATOR decoder->tools = (ToolsHandle *)Alloc(decoder->allocator, sizeof(ToolsHandle)); #else decoder->tools = (ToolsHandle *)MEMORY_ALLOC(sizeof(ToolsHandle)); #endif if(decoder->tools) { memset(decoder->tools, 0, sizeof(ToolsHandle)); } else { return; } } } */ #if _THREADED && 1 { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits = decoder->frame.height; #if _DELAY_THREAD_START if (decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16, WorkerThreadProc, decoder); } #endif { // Post a message to the mailbox mailbox->data = decoder->mesh; mailbox->output = output; mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer; mailbox->line_max = decoder->frame.height; mailbox->chunk_size = 16; workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; mailbox->jobType = JOB_TYPE_WARP_CACHE; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } } #endif //decoder->frame.output_format = output_format; decoder->lastLensOffsetX = cfhddata->LensOffsetX; decoder->lastLensOffsetY = cfhddata->LensOffsetY; decoder->lastLensOffsetZ = cfhddata->LensOffsetZ; decoder->lastLensOffsetR = cfhddata->LensOffsetR; decoder->lastLensZoom = cfhddata->LensZoom; decoder->lastLensFishFOV = cfhddata->LensFishFOV; decoder->lastLensGoPro = cfhddata->lensGoPro; decoder->lastLensSphere = cfhddata->lensSphere; decoder->lastLensFill = cfhddata->lensFill; decoder->lastLensStyleSel = cfhddata->lensStyleSel; memcpy(decoder->lastLensCustomSRC, cfhddata->lensCustomSRC, sizeof(cfhddata->lensCustomSRC)); memcpy(decoder->lastLensCustomDST, cfhddata->lensCustomDST, sizeof(cfhddata->lensCustomDST)); } #if _THREADED && 1 { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits = decoder->frame.height; mailbox->data = decoder->mesh; mailbox->output = output; mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer; mailbox->line_max = decoder->frame.height; mailbox->chunk_size = 16; workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size; mailbox->jobType = JOB_TYPE_WARP; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); if(backgroundfill) // may need to blur the filled in areas { mailbox->data = decoder->mesh; mailbox->output = (uint8_t *)decoder->lens_correct_buffer; mailbox->local_output = (uint8_t *)decoder->lens_correct_buffer; mailbox->line_max = decoder->frame.width; mailbox->chunk_size = 16; mailbox->pitch = pitch; workunits = (mailbox->line_max + mailbox->chunk_size-1)/mailbox->chunk_size; mailbox->jobType = JOB_TYPE_WARP_BLURV; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } } #else // not threading { //geomesh_cache_init_bilinear(decoder->mesh); //bad geomesh_cache_init_bilinear_range(decoder->mesh, 0, decoder->frame.height); //good geomesh_apply_bilinear(decoder->mesh, (unsigned char *)output, (unsigned char *)decoder->lens_correct_buffer, 0, decoder->frame.height); } #endif memcpy(output, decoder->lens_correct_buffer, pitch * decoder->frame.height); /* if(lens_correct_buffer) #if _ALLOCATOR Free(decoder->allocator, lens_correct_buffer); #else MEMORY_ALIGNED_FREE(lens_correct_buffer); #endif geomesh_destroy(mesh); */ } void MaskFrame(DECODER *decoder, uint8_t *output, int pitch, int output_format) { int x, y, width, height; int minY, maxY; int minX, maxX; CFHDDATA *cfhddata = &decoder->cfhddata; uint8_t *line = output; uint32_t fillA = 0; uint32_t fillB = 0; int bitsize = 8; if (!cfhddata->doMesh) return; width = decoder->frame.width; height = decoder->frame.height; if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 0.0 && decoder->cfhddata.LensXmax == 0.0) return; if (decoder->cfhddata.LensYmin == 0.0 && decoder->cfhddata.LensXmin == 0.0 && decoder->cfhddata.LensYmax == 1.0 && decoder->cfhddata.LensXmax == 1.0) return; minY = (int)(decoder->cfhddata.LensYmin*(float)height); maxY = (int)(decoder->cfhddata.LensYmax*(float)height); minX = 0xfffc & (int)(decoder->cfhddata.LensXmin*(float)pitch); maxX = 0xfffc & (int)(decoder->cfhddata.LensXmax*(float)pitch); if (FORMATRGB(output_format)) { line = output; // Top rows for (y = 0; y < minY; y++) { memset(line, 0, abs(pitch)); line += pitch; } // Left and Right edges of middle rows if (maxX - minX != pitch) { for (; y < maxY; y++) { memset(line, 0, minX); memset(line + maxX, 0, pitch - maxX); line += pitch; } } //Bottom wows y = maxY; line = output + y*pitch; for (; y < height; y++) { memset(line, 0, abs(pitch)); line += pitch; } } else { switch (output_format & 0x7fffffff) { case COLOR_FORMAT_YVYU: case COLOR_FORMAT_YUYV: fillA = 0x10; fillB = 0x80; break; case COLOR_FORMAT_UYVY: case COLOR_FORMAT_2VUY: fillA = 0x80; fillB = 0x10; break; case COLOR_FORMAT_YU64: fillA = 0x8000; fillB = 0x1000; bitsize = 16; break; } } if (bitsize == 8) { line = output; // Top rows for (y = 0; y < minY; y++) { for (x = 0; x < pitch; x += 2) { line[x] = fillA; line[x + 1] = fillB; } line += pitch; } // Left and Right edges of middle rows if (maxX - minX != pitch) { for (; y < maxY; y++) { for (x = 0; x < minX; x += 2) { line[x] = fillA; line[x + 1] = fillB; } for (x = maxX; x < pitch; x += 2) { line[x] = fillA; line[x + 1] = fillB; } line += pitch; } } //Bottom wows y = maxY; line = output + y*pitch; for (; y < height; y++) { for (x = 0; x < pitch; x += 2) { line[x] = fillA; line[x + 1] = fillB; } line += pitch; } } } #endif //#if WARPSTUFF void ConvertLocalToOutput(DECODER *decoder, uint8_t *output, int pitch, int output_format, uint8_t *local_output, int local_pitch, int channel_offset) { uint8_t *local_output_double = local_output; //Frame_Region emptyFrameMask = {0}; if(decoder->StereoBuffer) local_output_double = local_output = (uint8_t *)decoder->StereoBuffer; if(channel_offset < 0) // channel swapped { channel_offset = -channel_offset; } if(INVERTEDFORMAT(decoder->frame.format) != INVERTEDFORMAT(output_format)) { local_output += local_pitch*(decoder->frame.height-1); if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) local_output_double += local_pitch*(decoder->frame.height*decoder->channel_decodes-1); else local_output_double = local_output; local_pitch = -local_pitch; } if(FLIPCOLORS(output_format) || output_format & 0x80000000) { decoder->cfhddata.InvertOffset = 1; } else { decoder->cfhddata.InvertOffset = 0; } decoder->frame.format = output_format; //decoder->frame.colorspace = COLOR_SPACE_CG_601; #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int workunits; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) && (decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[1].FrameKeyStone || decoder->cfhddata.channel[1].VerticalOffset || decoder->cfhddata.channel[1].RotationOffset || decoder->cfhddata.channel[1].FrameTilt || decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[2].FrameKeyStone || decoder->cfhddata.channel[2].VerticalOffset || decoder->cfhddata.channel[2].RotationOffset || decoder->cfhddata.channel[2].FrameTilt)) || ((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) && (decoder->cfhddata.FrameOffsetY || decoder->cfhddata.FrameOffsetR || // decoder->cfhddata.FrameOffsetX || || decoder->cfhddata.FrameHScale != 1.0 || decoder->cfhddata.FrameHDynamic != 1.0 || decoder->cfhddata.channel[1].FrameZoom != 1.0 || decoder->cfhddata.channel[2].FrameZoom != 1.0) )) { //int x; int xbytes, xstep; //uint8_t *base = local_output; int width, height, chunk_size; int fine_vertical = 0; width = decoder->frame.width; height = decoder->frame.height; switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: xbytes = width*4; xstep = 16; break; case DECODED_FORMAT_RGB24: xbytes = width*3; xstep = 16; break; case DECODED_FORMAT_YUYV: xbytes = width*2; xstep = 16; break; case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: xbytes = width*8; xstep = 32; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: xbytes = width*6; xstep = 32; break; default: assert(0); break; } if(!(decoder->cfhddata.process_path_flags & (PROCESSING_ORIENTATION|PROCESSING_FRAMING)) || (decoder->cfhddata.channel[1].RotationOffset == 0.0 && decoder->cfhddata.channel[1].FrameKeyStone == 0.0 && decoder->cfhddata.channel[2].RotationOffset == 0.0 && decoder->cfhddata.channel[2].FrameKeyStone == 0.0 && decoder->cfhddata.FrameOffsetR == 0.0)) { chunk_size = 8; } else { chunk_size = 1; if((fabs(decoder->cfhddata.channel[1].RotationOffset) + fabs(decoder->cfhddata.channel[1].FrameKeyStone*0.2) + fabs(decoder->cfhddata.FrameOffsetR)) > 0.015 || (fabs(decoder->cfhddata.channel[2].RotationOffset) + fabs(decoder->cfhddata.channel[2].FrameKeyStone*0.2) + fabs(decoder->cfhddata.FrameOffsetR)) > 0.015) { switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: xstep = 4; break; case DECODED_FORMAT_RGB24: xstep = 3; break; case DECODED_FORMAT_YUYV: xstep = 4; break; case DECODED_FORMAT_W13A: case DECODED_FORMAT_RG64: xstep = 8; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: default: xstep = 6; break; } fine_vertical = 1; } } if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 && (decoder->frame.resolution == DECODED_RESOLUTION_FULL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) && decoder->codec.progressive == false) { int interlaced_pitch = local_pitch * 2; uint8_t *field2_output = local_output + local_pitch; // Post a message to the mailbox mailbox->local_output = local_output; mailbox->local_pitch = interlaced_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->info.height >>= 1; mailbox->line_max = (xbytes + xstep-1)/xstep; mailbox->chunk_size = chunk_size; mailbox->fine_vertical = fine_vertical; mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); // Post a message to the mailbox mailbox->local_output = field2_output; mailbox->local_pitch = interlaced_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->info.height >>= 1; mailbox->chunk_size = chunk_size; mailbox->line_max = (xbytes + xstep-1)/xstep; mailbox->fine_vertical = fine_vertical; mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } else { //TODO Lens corect here. //call JOB_TYPE_VERTICAL_3D then (or lens correction equivalent.) // JOB_TYPE_HORIZONTAL_3D //before doing any offset and rotation corrections. if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK //DAN20110129 width /= 2; // Post a message to the mailbox mailbox->local_output = local_output; mailbox->local_pitch = local_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->chunk_size = chunk_size; mailbox->line_max = (xbytes + xstep-1)/xstep; mailbox->fine_vertical = fine_vertical; mailbox->jobType = JOB_TYPE_VERTICAL_3D; // 3d work -- vertical workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } } // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; mailbox->local_output = local_output; mailbox->local_pitch = local_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->chunk_size = 16; mailbox->line_max = decoder->frame.height; if(decoder->channel_mix_half_res == 1) mailbox->line_max *= 2; workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; decoder->doVerticalFilter = 0; mailbox->jobType = JOB_TYPE_HORIZONAL_3D; // 3d work && horizontal and vertical flips // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); if(decoder->doVerticalFilter) { // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; mailbox->local_output = local_output_double; mailbox->local_pitch = local_pitch; mailbox->channel_offset = channel_offset; memcpy(&mailbox->info, &decoder->frame, sizeof(FRAME_INFO)); mailbox->chunk_size = 16; mailbox->line_max = decoder->frame.height; if(decoder->channel_decodes == 2 && decoder->channel_blend_type == 0) mailbox->line_max *= 2; if(decoder->channel_mix_half_res == 1) mailbox->line_max *= 2; workunits = (mailbox->line_max + mailbox->chunk_size - 1) / mailbox->chunk_size; mailbox->jobType = JOB_TYPE_SHARPEN; // 3d work && horizontal and vertical flips // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, workunits); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } } #else { int y,width, height; uint8_t scratch[4096*16]; int scratchremain = 4096*16; int ymin = 0, ymax; width = decoder->frame.width; height = decoder->frame.height; ymax = height; if((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) && memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32)) { ymin = (float)height * decoder->cfhddata.channel[0].FrameMask.topLftY; ymax = (float)height * decoder->cfhddata.channel[0].FrameMask.botLftY; } if( ((decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION) && (decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[1].FrameKeyStone || decoder->cfhddata.channel[1].VerticalOffset || decoder->cfhddata.channel[1].RotationOffset || decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[2].FrameKeyStone || decoder->cfhddata.channel[2].VerticalOffset || decoder->cfhddata.channel[2].RotationOffset)) || ((decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) && (decoder->cfhddata.FrameOffsetY || decoder->cfhddata.FrameOffsetR || decoder->cfhddata.FrameOffsetX || decoder->cfhddata.FrameHScale != 1.0 || decoder->cfhddata.FrameHDynamic != 1.0 || decoder->cfhddata.channel[1].FrameZoom != 1.0 || decoder->cfhddata.channel[2].FrameZoom != 1.0)) { int x,xbytes, xstep; uint8_t *base = local_output; float voffsetstep; float voffset = decoder->cfhddata.channel[1].VerticalOffset; float roffset = decoder->cfhddata.channel[1].RotationOffset; float voffset1, voffset2; float voffsetstep1, voffsetstep2; int channel_flip = decoder->cfhddata.channel_flip; int aspectx,aspecty; float aspectfix; GetDisplayAspectRatio(decoder, &aspectx, &aspecty); aspectfix = (float)(aspectx*aspectx) / (float)(aspecty*aspecty); if(!(decoder->cfhddata.process_path_flags & PROCESSING_ORIENTATION)) { voffset = roffset = 0; } if(!(decoder->cfhddata.process_path_flags & PROCESSING_IMAGEFLIPS)) { channel_flip = 0; } if(decoder->cfhddata.process_path_flags & PROCESSING_FRAMING) voffset += decoder->cfhddata.FrameOffsetY; if(decoder->cfhddata.InvertOffset) { voffset = -voffset; roffset = -roffset; } switch(decoder->StereoBufferFormat) { case DECODED_FORMAT_RGB32: xbytes = width*4; xstep = 16; break; case DECODED_FORMAT_RGB24: xbytes = width*3; xstep = 16; break; case DECODED_FORMAT_YUYV: xbytes = width*2; xstep = 16; break; case DECODED_FORMAT_WP13: case DECODED_FORMAT_RG48: default: xbytes = width*6; xstep = 32; break; } //DAN20100923 -- simplied //voffset += roffset * (float)(width*width) / (float)(height*height) * 0.5; //voffsetstep = -roffset * (float)(width*width) / (float)(height*height) / (float)(xbytes/xstep); voffset += roffset * aspectfix * 0.5; voffsetstep = -roffset * aspectfix / (float)(xbytes/xstep); if(roffset == 0.0) xstep = xbytes; voffset1 = voffset2 = voffset; voffsetstep1 = voffsetstep2 = voffsetstep; if(channel_flip & 0xf) { if(channel_flip & 2) { voffset1 = -voffset1; voffsetstep1 = -voffsetstep1; } if(channel_flip & 8) { voffset2 = -voffset2; voffsetstep2 = -voffsetstep2; } if(channel_flip & 1) { voffset1 += voffsetstep1*(xbytes/xstep); voffsetstep1 = -voffsetstep1; } if(channel_flip & 4) { voffset2 += voffsetstep2*(xbytes/xstep); voffsetstep2 = -voffsetstep2; } } for(x=0; x<xbytes; x+=xstep) { if(decoder->channel_decodes == 1 && decoder->channel_current == 1) // Right only { RGB48VerticalShift(decoder, base, (unsigned short *)scratch, xstep, height, local_pitch, -voffset2); } else { RGB48VerticalShift(decoder, base, (unsigned short *)scratch, xstep, height, local_pitch, voffset1); } if(decoder->channel_decodes == 2) { uint8_t *bptr = base + channel_offset; RGB48VerticalShift(decoder, bptr, (unsigned short *)scratch, xstep, height, local_pitch, -voffset2); } base += xstep; voffset1 += voffsetstep1; voffset2 += voffsetstep2; } } if(decoder->channel_mix_half_res == 1) height *= 2; if(ymin) { memset(local_output, 0, abs(local_pitch)); // zero one line; } for(y=0; y<ymin; y++) { ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0); } for(; y<ymax; y++) { ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, local_pitch, channel_offset, y, 0); } for(; y<height; y++) { ProcessLine3D(decoder, scratch, scratchremain, output, pitch, local_output, 0, channel_offset, y, 0); } } #endif } // Decode a sample from the input bitstream into the output frame buffer bool DecodeSample(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams, CFHDDATA *cfhddata) { //CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 1, 1, 1, 0, 0, 0}; int channel_decodes = 1; // 3D Work int channel_offset = 0; int channel_mask = 0; int channel_current = 0; //int wavelet_index; bool result = true; uint8_t *local_output = output; uint8_t *local_buffer = NULL; int local_pitch = pitch; int internal_format = decoder->frame.format; int output_format = decoder->frame.output_format; bool use_local_buffer = false; DECODER *local_decoder = decoder; //Frame_Region emptyFrameMask = {0}; Frame_Region emptyFrameMask = FRAME_REGION_INITIALIZER; int orig_width = decoder->frame.width; int orig_height = decoder->frame.height; decoder->local_output = local_output; // used for NV12 decodes. decoder->sample_uncompressed = 0; // set if a uncompressed sample is found. decoder->image_dev_only = 0; if(decoder->flags & (1<<3)) // This is an image development only decode. { decoder->sample_uncompressed = 1; decoder->image_dev_only = 1; decoder->codec.encoded_format = ENCODED_FORMAT_RGB_444; decoder->codec.unique_framenumber = 0; //What should this be? decoder->frame.white_point = 16; // how to we pass this in? decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentBuffer; switch(output_format & 0x7fffffff) { case COLOR_FORMAT_RGB24: decoder->uncompressed_size = orig_width * orig_height * 3; break; case COLOR_FORMAT_RGB32: decoder->uncompressed_size = orig_width * orig_height * 4; break; case COLOR_FORMAT_RG48: case COLOR_FORMAT_WP13: decoder->uncompressed_size = orig_width * orig_height * 6; break; default: decoder->uncompressed_size = orig_width * orig_height * 6; assert(0); break; } } decoder->frame.alpha_Companded = 0; // reset this state. if(decoder->parallelDecoder) decoder->parallelDecoder->sample_uncompressed = 0; decoder->error = CODEC_ERROR_OKAY; input->error = BITSTREAM_ERROR_OKAY; // first time through encoded_format is not initized. if(input->nWordsUsed > 4096 && decoder->image_dev_only == 0) // an I-frame is needed { SAMPLE_HEADER header; BITSTREAM input2; InitBitstreamBuffer(&input2, input->lpCurrentWord, input->nWordsUsed, BITSTREAM_ACCESS_READ); memset(&header, 0, sizeof(SAMPLE_HEADER)); header.find_lowpass_bands = 2; // help finding the uncompressed flag if(ParseSampleHeader(&input2, &header)) { decoder->codec.encoded_format = header.encoded_format; decoder->sample_uncompressed = header.hdr_uncompressed; if(decoder->parallelDecoder) decoder->parallelDecoder->sample_uncompressed = header.hdr_uncompressed; } } if((uintptr_t)input->lpCurrentBuffer & 0x3) { if(decoder->aligned_sample_buffer == NULL) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; decoder->aligned_sample_buffer = (uint8_t *)AllocAligned(allocator, (size_t)input->dwBlockLength, 16); #else decoder->aligned_sample_buffer = (uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16); #endif memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength); decoder->aligned_sample_buffer_size = input->dwBlockLength; } else { if ((size_t)input->dwBlockLength <= decoder->aligned_sample_buffer_size) { memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength); } else { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; FreeAligned(decoder->allocator, decoder->aligned_sample_buffer); decoder->aligned_sample_buffer = (uint8_t *)AllocAligned(allocator, input->dwBlockLength, 16); #else MEMORY_ALIGNED_FREE(decoder->aligned_sample_buffer); decoder->aligned_sample_buffer = (uint8_t *)MEMORY_ALIGNED_ALLOC(input->dwBlockLength, 16); #endif memcpy(decoder->aligned_sample_buffer, input->lpCurrentBuffer, input->dwBlockLength); decoder->aligned_sample_buffer_size = input->dwBlockLength; } } input->lpCurrentBuffer = decoder->aligned_sample_buffer; input->lpCurrentWord = decoder->aligned_sample_buffer; } #if 0 // Test for missaligning the image data if(((int)input->lpCurrentBuffer&3) == 0) { int i; uint8_t *ptr = (uint8_t *)input->lpCurrentBuffer; int missaligned = 1; //2 or 3 for(i=input->dwBlockLength-1; i>=0; i--) ptr[i+missaligned] = ptr[missaligned]; input->lpCurrentBuffer = (uint8_t *)&ptr[missaligned]; input->lpCurrentWord = (uint8_t *)&ptr[missaligned]; } #endif //HACK // Unfortunately I need color matrix data deep within the codec for RT playback. if(cfhddata && cfhddata->MagicNumber == CFHDDATA_MAGIC_NUMBER) // valid input { if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER) { //int size = cfhddata->size; size_t size = cfhddata->size; memset(&decoder->cfhddata, 0, sizeof(CFHDDATA)); if (size > sizeof(CFHDDATA)) { // Limit the size to the known structure size = sizeof(CFHDDATA); } memcpy(&decoder->cfhddata, cfhddata, size); } } else { unsigned short value; if(decoder->cfhddata.MagicNumber != CFHDDATA_MAGIC_NUMBER || decoder->cfhddata.size != sizeof(CFHDDATA)) { memset(&decoder->cfhddata, 0, sizeof(CFHDDATA)); decoder->cfhddata.MagicNumber = CFHDDATA_MAGIC_NUMBER; decoder->cfhddata.size = sizeof(CFHDDATA); if(decoder->image_dev_only) // For baseband image only corrections, initize the decoder with defaults { decoder->cfhddata.cfhd_subtype = 2; //RGB decoder->cfhddata.num_channels = 3; } else if(GetTuplet(input->lpCurrentBuffer, input->nWordsUsed, CODEC_TAG_INPUT_FORMAT, &value)) { if(value == COLOR_FORMAT_RG48) { decoder->cfhddata.cfhd_subtype = 2; //RGB decoder->cfhddata.num_channels = 3; } else if(value == COLOR_FORMAT_RG64) { decoder->cfhddata.cfhd_subtype = 3; //RGBA decoder->cfhddata.num_channels = 4; } else if(value > COLOR_FORMAT_BAYER && value < COLOR_FORMAT_BAYER_END) { unsigned int format = BAYER_FORMAT_RED_GRN; decoder->cfhddata.cfhd_subtype = 1; //BAYER decoder->cfhddata.bayer_format = format; // default to Red-Grn decoder->cfhddata.version = CFHDDATA_VERSION; } } } } OverrideCFHDDATA(decoder, input->lpCurrentBuffer, input->nWordsUsed); if(decoder->image_dev_only) // HACK we need to support 3D also. decoder->source_channels = 1; else decoder->source_channels = decoder->real_channels = SkipVideoChannel(decoder, input, 0); if(!decoder->basic_only && (decoder->cfhddata.MSChannel_type_value || decoder->cfhddata.MSCTV_Override)) { //int channels = 0; int channel_blend_type = BLEND_NONE; int channel_swapped_flags = 0; if(decoder->cfhddata.MSCTV_Override) { channel_mask = decoder->cfhddata.MSCTV_Override&0xff; channel_blend_type = ((decoder->cfhddata.MSCTV_Override>>8) & 0xff); channel_swapped_flags = ((decoder->cfhddata.MSCTV_Override>>16) & 0xffff); } else { channel_mask = decoder->cfhddata.MSChannel_type_value&0xff; channel_blend_type = ((decoder->cfhddata.MSChannel_type_value>>8) & 0xff); channel_swapped_flags = ((decoder->cfhddata.MSChannel_type_value>>16) & 0xffff); } if(channel_mask != 3) { channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } //if(channels >= 2) // even "mono" files need to be displayed as Stereo if a 3D mode is selected //DAN20090302 { if(channel_mask == 1 && decoder->source_channels >= 2) // Decode Left only { if(decoder->cfhddata.FramingFlags & 2) // channel swap { SkipVideoChannel(decoder, input, 2); // 3D work } } else if(channel_mask == 2 && decoder->source_channels >= 2) // Decode Right only { if(decoder->cfhddata.FramingFlags & 2) // channel swap { SkipVideoChannel(decoder, input, 1); // 3D work } else { //assume second channel decode SkipVideoChannel(decoder, input, 2); // 3D work } channel_current = 1; channel_decodes = 1; channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } else if(channel_mask == 2 && decoder->source_channels <= 1) // Decode 2D as Right channel { channel_current = 1; channel_decodes = 1; channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } else if((channel_mask&3) == 3) // A+B 3d work { channel_decodes = 2; decoder->channel_mix_half_res = 0; if(channel_blend_type != BLEND_NONE) { if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) { //if(decoder->frame.format == DECODED_FORMAT_W13A) // { // decoder->frame.format = internal_format = DECODED_FORMAT_W13A; // } //else //{ // decoder->frame.format = internal_format = DECODED_FORMAT_RG64; // } decoder->frame.format = internal_format = DECODED_FORMAT_RGB32; local_pitch = decoder->frame.width * 4; } else { decoder->frame.format = internal_format = DECODED_FORMAT_RGB24; local_pitch = decoder->frame.width * 3; //RGB24 } /* if(decoder->frame.resolution == DECODED_RESOLUTION_FULL && (output_format == DECODED_FORMAT_YUYV || output_format == DECODED_FORMAT_UYVY)) { if( channel_blend_type == BLEND_FREEVIEW || ((channel_blend_type == BLEND_STACKED_ANAMORPHIC || channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || channel_blend_type == BLEND_LINE_INTERLEAVED) && decoder->frame.width > 1280)) { decoder->frame.resolution = DECODED_RESOLUTION_HALF; decoder->channel_mix_half_res = 1; decoder->frame.width /= 2; decoder->frame.height /= 2; local_pitch = (decoder->frame.width) * 3; //RGB24 } } */ } /* if(channel_blend_type == BLEND_STEREO_YUY2inRGBA) //YUY2 in RGBA { decoder->frame.format = internal_format = DECODED_FORMAT_YUYV; local_pitch = decoder->frame.width * 2; //YUY2 channel_offset = local_pitch * (decoder->frame.height); use_local_buffer = true; }*/ /* DAN20120316 FLAG3D_HALFRES broken if(decoder->frame.resolution == DECODED_RESOLUTION_FULL && channel_swapped_flags & FLAG3D_HALFRES && output_format != DECODED_FORMAT_W13A) { decoder->frame.resolution = DECODED_RESOLUTION_HALF; decoder->channel_mix_half_res = 1; decoder->frame.width /= 2; decoder->frame.height /= 2; local_pitch /= 2; } */ if( decoder->frame.resolution == DECODED_RESOLUTION_FULL && (channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || channel_blend_type == BLEND_FREEVIEW)) { if(decoder->codec.encoded_format != ENCODED_FORMAT_BAYER) { if(decoder->sample_uncompressed) { decoder->frame.resolution = DECODED_RESOLUTION_HALF; decoder->channel_mix_half_res = 1; decoder->frame.width /= 2; decoder->frame.height /= 2; local_pitch /= 2; } else { if(decoder->preformatted_3D_type > BLEND_NONE) { // leave as is. } else if(FORMAT8BIT(output_format)) { decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL; decoder->frame.width /= 2; local_pitch /= 2; } } } else { if(FORMAT8BIT(output_format)) decoder->frame.resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER; } //TODO int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed; } if(channel_blend_type >= BLEND_STACKED_ANAMORPHIC && channel_blend_type < BLEND_ANAGLYPH_RC)// stacked, side-by-side, fields, Onion, YUY2 { channel_offset = local_pitch * (decoder->frame.height); } else if(channel_blend_type >= BLEND_ANAGLYPH_RC) { /* if(channel_blend_type & 1 && channel_blend_type <= 21) // B&W Anaglyph { //B&W using YUYV decoder->frame.format = internal_format = DECODED_FORMAT_YUYV; local_pitch = decoder->frame.width * 2; //YUY2 }*/ channel_offset = local_pitch * (decoder->frame.height); use_local_buffer = true; } else if(channel_blend_type == BLEND_NONE) // double high { channel_offset = pitch * decoder->frame.height; } else { channel_blend_type = BLEND_STACKED_ANAMORPHIC; channel_offset = pitch * (decoder->frame.height/2); } // fields, stacked, etc, only works on full or half res. if (channel_blend_type > BLEND_NONE && channel_blend_type <= BLEND_LINE_INTERLEAVED && decoder->frame.resolution == DECODED_RESOLUTION_LOWPASS_ONLY) //thumnbail. { channel_decodes = 1; channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } if (channel_blend_type != BLEND_NONE && (output_format == DECODED_FORMAT_BYR1 || output_format == DECODED_FORMAT_BYR2 || output_format == DECODED_FORMAT_BYR3 || output_format == DECODED_FORMAT_BYR4 )) { channel_decodes = 1; channel_blend_type = BLEND_NONE; channel_swapped_flags = 0; } } } decoder->channel_decodes = channel_decodes; decoder->channel_blend_type = channel_blend_type; decoder->channel_swapped_flags = channel_swapped_flags; } else { decoder->channel_decodes = channel_decodes = 1; decoder->channel_blend_type = BLEND_NONE; decoder->channel_swapped_flags = 0; } if(cfhddata) // So the P-frames can know the bayerformat { //int size = cfhddata->size; size_t size = cfhddata->size; if (size > sizeof(CFHDDATA)) { size = sizeof(CFHDDATA); } memcpy(cfhddata, &decoder->cfhddata, size); } { bool doOrientation = true; bool doFraming = true; bool doBurins = true; bool doImageflips = true; bool doGhostBust = false; bool doPrimaries = true; int process_path_flags = decoder->cfhddata.process_path_flags; int process_path_flags_mask = decoder->cfhddata.process_path_flags_mask; if(decoder->basic_only) { doOrientation = false; doFraming = false; doBurins = false; doImageflips = false; doPrimaries = false; } else { if(decoder->cfhddata.process_path_flags_mask) { //DAN20101007 -- if(process_path_flags == 0) decoder->cfhddata.process_path_flags = process_path_flags = decoder->cfhddata.process_path_flags_mask; process_path_flags &= decoder->cfhddata.process_path_flags_mask; if(process_path_flags_mask & PROCESSING_ACTIVE2) { if(!(process_path_flags_mask & PROCESSING_ORIENTATION)) doOrientation = false; if(!(process_path_flags_mask & PROCESSING_FRAMING)) doFraming = false; if(!(process_path_flags_mask & PROCESSING_BURNINS)) doBurins = false; if(!(process_path_flags_mask & PROCESSING_IMAGEFLIPS)) doImageflips = false; } if(!(process_path_flags_mask & PROCESSING_COLORMATRIX)) doPrimaries = false; } if(process_path_flags & PROCESSING_ACTIVE2) { if(!(process_path_flags & PROCESSING_ORIENTATION)) doOrientation = false; if(!(process_path_flags & PROCESSING_FRAMING)) doFraming = false; if(!(process_path_flags & PROCESSING_BURNINS)) doBurins = false; if(!(process_path_flags & PROCESSING_IMAGEFLIPS)) doImageflips = false; if(!(process_path_flags & PROCESSING_COLORMATRIX)) doPrimaries = false; } } if(doOrientation) process_path_flags |= PROCESSING_ORIENTATION; if(doFraming) process_path_flags |= PROCESSING_FRAMING; if(doBurins) process_path_flags |= PROCESSING_BURNINS; if(doImageflips) process_path_flags |= PROCESSING_IMAGEFLIPS; if(doPrimaries) process_path_flags |= PROCESSING_COLORMATRIX; if(decoder->channel_swapped_flags & FLAG3D_GHOSTBUST) { if(decoder->ghost_bust_left || decoder->ghost_bust_right) { doGhostBust = true; } } decoder->cfhddata.process_path_flags = process_path_flags; if((!decoder->basic_only && (doOrientation && ( decoder->cfhddata.channel[0].FloatingWindowMaskL || decoder->cfhddata.channel[0].FloatingWindowMaskR || decoder->cfhddata.channel[0].FrameKeyStone || decoder->cfhddata.channel[0].FrameTilt || decoder->cfhddata.channel[0].HorizontalOffset || decoder->cfhddata.channel[0].VerticalOffset || decoder->cfhddata.channel[0].RotationOffset || decoder->cfhddata.channel[1].FloatingWindowMaskL || decoder->cfhddata.channel[1].FloatingWindowMaskR || decoder->cfhddata.channel[1].FrameKeyStone || decoder->cfhddata.channel[1].FrameTilt || decoder->cfhddata.channel[1].HorizontalOffset || decoder->cfhddata.channel[1].VerticalOffset || decoder->cfhddata.channel[1].RotationOffset || decoder->cfhddata.channel[0].FrameAutoZoom * decoder->cfhddata.channel[1].FrameDiffZoom != 1.0 || decoder->cfhddata.channel[2].FloatingWindowMaskL || decoder->cfhddata.channel[2].FloatingWindowMaskR || decoder->cfhddata.channel[2].FrameKeyStone || decoder->cfhddata.channel[2].FrameTilt || decoder->cfhddata.channel[2].HorizontalOffset || decoder->cfhddata.channel[2].VerticalOffset || decoder->cfhddata.channel[2].RotationOffset || decoder->cfhddata.channel[0].FrameAutoZoom / decoder->cfhddata.channel[2].FrameDiffZoom != 1.0))) || (doPrimaries && ( decoder->cfhddata.channel[0].user_blur_sharpen != 0.0 || decoder->cfhddata.channel[1].user_blur_sharpen != 0.0 || decoder->cfhddata.channel[2].user_blur_sharpen != 0.0)) || (doFraming && ( decoder->cfhddata.channel[0].user_vignette_start != 0.0 || decoder->cfhddata.channel[1].user_vignette_start != 0.0 || decoder->cfhddata.channel[2].user_vignette_start != 0.0)) || (doFraming && ( memcmp(&decoder->cfhddata.channel[0].FrameMask, &emptyFrameMask, 32) || decoder->cfhddata.FrameOffsetX || decoder->cfhddata.FrameOffsetY || decoder->cfhddata.FrameOffsetR || decoder->cfhddata.FrameHScale != 1.0 || decoder->cfhddata.FrameHDynamic != 1.0 || decoder->cfhddata.channel[1].FrameZoom != 1.0 || decoder->cfhddata.channel[2].FrameZoom != 1.0)) || (doGhostBust && (decoder->channel_blend_type == BLEND_NONE) && (channel_decodes == 2)) || (doImageflips && decoder->cfhddata.channel_flip) || (decoder->preformatted_3D_type == BLEND_STACKED_ANAMORPHIC) || (decoder->preformatted_3D_type == BLEND_SIDEBYSIDE_ANAMORPHIC) || (decoder->channel_blend_type && decoder->frame.resolution == DECODED_RESOLUTION_QUARTER) || // 3D mode generally don't work in quarter res -- this prevents crashes. ( ((decoder->frame.width+7)/8)*8 != decoder->frame.width || (channel_decodes > 1 && decoder->channel_blend_type != BLEND_NONE) || decoder->sample_uncompressed) || (decoder->cfhddata.doMesh) ) { if( output_format == DECODED_FORMAT_BYR1 || output_format == DECODED_FORMAT_BYR2 || output_format == DECODED_FORMAT_BYR3 || output_format == DECODED_FORMAT_BYR4 ) { // no manipulation should be applied } else { use_local_buffer = true; local_pitch = ((decoder->frame.width+7)/8)*8 * 6; //RGB48 if(decoder->image_dev_only) { decoder->frame.white_point = 13; decoder->frame.format = internal_format = DECODED_FORMAT_WP13; } else if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) { decoder->frame.white_point = 13; decoder->frame.format = internal_format = DECODED_FORMAT_W13A; local_pitch = ((decoder->frame.width+7)/8)*8 * 8; } else { decoder->frame.white_point = 13; decoder->frame.format = internal_format = DECODED_FORMAT_WP13; } if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL || decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) { local_pitch *= 2; // need horizontal room to make 3D side by side frame } /* if(output_format == DECODED_FORMAT_WP13 || output_format == DECODED_FORMAT_W13A) { // preserve HDR decoder->frame.format = internal_format = output_format;//DECODED_FORMAT_WP13; // HDR output if(output_format == DECODED_FORMAT_W13A) local_pitch = decoder->frame.width * 8; } else { if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) { decoder->frame.format = internal_format = DECODED_FORMAT_RG64; local_pitch = decoder->frame.width * 8; } else { decoder->frame.format = internal_format = DECODED_FORMAT_RG48; } }*/ channel_offset = local_pitch * (decoder->frame.height); } } } if(output_format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0) { if(decoder->BYR4LinearRestore == NULL) { int j,val; int encode_curve_type = decoder->cfhddata.encode_curve >> 16; //int encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE; float encode_curvebase; if(encode_curve_type) //1 or 2 { if(encode_curve_type & CURVE_TYPE_EXTENDED) encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases else encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff); } else { encode_curve_type = CURVE_TYPE_LOG; encode_curvebase = 90.0; } #if _ALLOCATOR decoder->BYR4LinearRestore = (unsigned short *)AllocAligned(decoder->allocator,16384*2, 16); #else decoder->BYR4LinearRestore = (unsigned short *)MEMORY_ALIGNED_ALLOC(16384*2, 16); #endif for(j=0; j<16384; j++) //0 to 1 { switch(encode_curve_type & CURVE_TYPE_MASK) { case CURVE_TYPE_LOG: val = (int)(CURVE_LOG2LIN((float)j/16384.0f, (float)encode_curvebase) * 65535.0f); break; case CURVE_TYPE_GAMMA: val = (int)(CURVE_GAM2LIN((float)j/16384.0f, (float)encode_curvebase) * 65535.0f); break; case CURVE_TYPE_CINEON: val = (int)(CURVE_CINEON2LIN((float)j/16384.0f, (float)encode_curvebase) * 65535.0f); break; case CURVE_TYPE_CINE985: val = (int)(CURVE_CINE9852LIN((float)j/16384.0f, (float)encode_curvebase) * 65535.0f); break; case CURVE_TYPE_PARA: val = (int)(CURVE_PARA2LIN((float)j/16384.0f, (int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff)) * 65535.0f); break; case CURVE_TYPE_CSTYLE: val = (int)(CURVE_CSTYLE2LIN((float)j/16384.0f, (int)((decoder->cfhddata.encode_curve >> 8) & 0xff)) * 65535.0f); break; case CURVE_TYPE_SLOG: val = (int)(CURVE_SLOG2LIN((float)j/16384.0f) * 65535.0f); break; case CURVE_TYPE_LOGC: val = (int)(CURVE_LOGC2LIN((float)j/16384.0f) * 65535.0f); break; case CURVE_TYPE_LINEAR: default: val = j; break; } if(val < 0) val = 0; if(val > 65535) val = 65535; decoder->BYR4LinearRestore[j] = val; } } } //DAN20120319 - removed /*if(decoder->channel_mix_half_res) //decoding half but scaling to double the output size { local_pitch *= 2; channel_offset = local_pitch * (decoder->frame.height*2); }*/ if(use_local_buffer == true) // need buffer for anaglyph and other 3D presentation formats { int stereoframesize = channel_offset * channel_decodes/*stacked frames*/; if(decoder->source_channels == 1 && decoder->preformatted_3D_type == BLEND_NONE) stereoframesize = channel_offset; if(channel_decodes == 1 && decoder->preformatted_3D_type != BLEND_NONE) stereoframesize = channel_offset * 2; if(channel_decodes == 2 && decoder->source_channels == 1 && decoder->channel_blend_type != BLEND_NONE) stereoframesize = channel_offset * 2; if(decoder->StereoBuffer==NULL || decoder->StereoBufferSize < stereoframesize) { #if _ALLOCATOR if(decoder->StereoBuffer) { FreeAligned(decoder->allocator, decoder->StereoBuffer); decoder->StereoBuffer = NULL; } decoder->StereoBuffer = (PIXEL16U *)AllocAligned(decoder->allocator, stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet. #else if(decoder->StereoBuffer) { MEMORY_ALIGNED_FREE(decoder->StereoBuffer); decoder->StereoBuffer = NULL; } decoder->StereoBuffer = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(stereoframesize+256, 16); //DAN20130517 add 256, as 2.7K half we are write off the buffers end for zoom, don't know why yet. #endif assert(decoder->StereoBuffer != NULL); if (! (decoder->StereoBuffer != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->StereoBufferSize = stereoframesize; } decoder->StereoBufferFormat = internal_format; local_buffer = (uint8_t *)decoder->StereoBuffer; local_output = local_buffer; } DecodeEntropyInit(decoder); //swapped -- Maybe useful for double height decodes. /* if(channel_decodes == 2 && channel_swapped_flags & FLAG3D_SWAPPED) { local_output += channel_offset; channel_offset = -channel_offset; }*/ decoder->use_local_buffer = use_local_buffer ? 1 : 0; if(channel_decodes == 2 && decoder->parallelDecoder == NULL && decoder->source_channels > 1) { int encoded_width = decoder->frame.width; int encoded_height = decoder->frame.height; if (decoder->frame.resolution == DECODED_RESOLUTION_HALF) { // Compute the encoded dimensions from the frame dimensions encoded_width *= 2; encoded_height *= 2; } else if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER) { // Compute the encoded dimensions from the frame dimensions encoded_width *= 4; encoded_height *= 4; } else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { // Compute the encoded dimensions from the frame dimensions encoded_width *= 2; } else if (decoder->frame.resolution == DECODED_RESOLUTION_HALF_VERTICAL) { // Compute the encoded dimensions from the frame dimensions encoded_height *= 2; } #if _ALLOCATOR decoder->parallelDecoder = (DECODER *)Alloc(decoder->allocator, sizeof(DECODER)); if(decoder->parallelDecoder) { memset(decoder->parallelDecoder, 0, sizeof(DECODER)); DecodeInit(decoder->allocator, decoder->parallelDecoder, encoded_width, encoded_height, internal_format, DECODED_RESOLUTION_FULL, NULL); } #else decoder->parallelDecoder = (DECODER *)MEMORY_ALLOC(sizeof(DECODER)); if(decoder->parallelDecoder) { memset(decoder->parallelDecoder, 0, sizeof(DECODER)); decoder->parallelDecoder->thread_cntrl = decoder->thread_cntrl; DecodeInit(decoder->parallelDecoder, encoded_width, encoded_height, internal_format, DECODED_RESOLUTION_FULL, NULL); } #endif } // Using the parallel decoder? if (decoder->parallelDecoder) { // Initialize the parallel decoder with parameters from the regular decoder memcpy(&decoder->parallelDecoder->cfhddata, &decoder->cfhddata, sizeof(CFHDDATA)); memcpy(decoder->parallelDecoder->licensekey,decoder->licensekey, 16); DecodeEntropyInit(decoder->parallelDecoder); DecodeOverrides(decoder->parallelDecoder, decoder->overrideData, decoder->overrideSize); decoder->parallelDecoder->channel_decodes = decoder->channel_decodes; decoder->parallelDecoder->channel_blend_type = decoder->channel_blend_type; decoder->parallelDecoder->flags = decoder->flags; decoder->parallelDecoder->frame = decoder->frame; decoder->parallelDecoder->use_local_buffer = use_local_buffer ? 1 : 0; decoder->parallelDecoder->codec.encoded_format = decoder->codec.encoded_format; if(decoder->parallelDecoder->decoder_thread.pool.thread_count == 0) { CreateLock(&decoder->parallelDecoder->decoder_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->parallelDecoder->decoder_thread.pool, 1, // ParallelThreadProc, decoder->parallelDecoder); } } if(channel_decodes == 2 && decoder->real_channels > 1 && decoder->parallelDecoder && decoder->parallelDecoder->decoder_thread.pool.thread_count) { // Second stream as a thread. BITSTREAM second_input = *input; if(decoder->cfhddata.FramingFlags & 2 && decoder->source_channels >= 2) // channel swap { BITSTREAM leftEye_input = *input; SkipVideoChannel(decoder, &leftEye_input, 2); // 3D work *input = leftEye_input; SkipVideoChannel(decoder, &second_input, 1); // 3D work } else SkipVideoChannel(decoder, &second_input, 2); // 3D work decoder->channel_current = 0; decoder->parallelDecoder->channel_current = 1; // Instead of reading the metadata databases again, use the ones in the main decoder OverrideCFHDDATAUsingParent(decoder->parallelDecoder, decoder, input->lpCurrentBuffer, input->nWordsUsed); // DAN20110404 Use left (first) eye metadata for both eyes (just in case right GUID is bad.) // OverrideCFHDDATA(decoder->parallelDecoder, input->lpCurrentBuffer, input->nWordsUsed); //OverrideCFHDDATA(decoder->parallelDecoder, second_input.lpCurrentWord, second_input.nWordsUsed); // Hack, this gets lost decoder->parallelDecoder->cfhddata.split_CC_position = decoder->cfhddata.split_CC_position; #if (_THREADED && _GRAPHICS) if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output) { if(decoder->cfhddata.BurninFlags & 3) // overlays / tools { DrawStartThreaded(decoder); } } #endif // Post a message to the mailbox decoder->parallelDecoder->decoder_thread.input = &second_input; if(use_local_buffer == false && (decoder->frame.format == DECODED_FORMAT_RGB32 || decoder->frame.format == DECODED_FORMAT_RGB24)) { decoder->parallelDecoder->decoder_thread.output = local_output; local_output += channel_offset; } else { decoder->parallelDecoder->decoder_thread.output = local_output + channel_offset; } decoder->parallelDecoder->decoder_thread.pitch = local_pitch; decoder->parallelDecoder->decoder_thread.colorparams = colorparams; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->parallelDecoder->decoder_thread.pool, 1); // Start the transform worker threads ThreadPoolSendMessage(&decoder->parallelDecoder->decoder_thread.pool, THREAD_MESSAGE_START); // do the first channel { TAGVALUE segment; int sample_type; #if _THREADED decoder->entropy_worker_new.next_queue_num = 0; decoder->entropy_worker_new.threads_used = 0; #endif // Get the type of sample segment = GetTagValue(input); //assert(segment.tuple.tag == CODEC_TAG_SAMPLE); if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) { decoder->error = CODEC_ERROR_BITSTREAM; STOP(tk_decompress); return false; } sample_type = segment.tuple.value; switch (sample_type) { case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame) result = DecodeSampleGroup(decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group result = DecodeSampleFrame(decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame result = DecodeSampleIntraFrame(decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_SEQUENCE_HEADER: // The video sequence header is ignored result = true; break; default: // Need to fill the output frame //error = CODEC_ERROR_SAMPLE_TYPE; result = false; } } // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->parallelDecoder->decoder_thread.pool); } else { while(channel_decodes > 0) { TAGVALUE segment; int sample_type; local_decoder->channel_current = channel_current++; //OverrideCFHDDATA(local_decoder, input->lpCurrentBuffer, input->nWordsUsed); #if (_THREADED && _GRAPHICS) if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output) { if(decoder->cfhddata.BurninFlags & 3) //overlays / tools { DrawStartThreaded(decoder); } } #endif #if _THREADED local_decoder->entropy_worker_new.next_queue_num = 0; local_decoder->entropy_worker_new.threads_used = 0; #endif if(decoder->image_dev_only) { result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams); } else { // Get the type of sample segment = GetTagValue(input); //assert(segment.tuple.tag == CODEC_TAG_SAMPLE); if (!IsValidSegment(input, segment, CODEC_TAG_SAMPLE)) { local_decoder->error = CODEC_ERROR_BITSTREAM; STOP(tk_decompress); return false; } sample_type = segment.tuple.value; switch (sample_type) { case SAMPLE_TYPE_GROUP: // Group of frames (decode the first frame) result = DecodeSampleGroup(local_decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_FRAME: // Decode the second or later frame in a group result = DecodeSampleFrame(local_decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_IFRAME: // Decode a sample that represents an isolated frame result = DecodeSampleIntraFrame(local_decoder, input, local_output, local_pitch, colorparams); break; case SAMPLE_TYPE_SEQUENCE_HEADER: // The video sequence header is ignored result = true; break; default: // Need to fill the output frame //error = CODEC_ERROR_SAMPLE_TYPE; result = false; } } if(ConvertPreformatted3D(decoder, use_local_buffer, internal_format, channel_mask, local_output, local_pitch, &channel_offset)) { channel_decodes = 0; } else { channel_decodes--; local_output += channel_offset; if(decoder->parallelDecoder) { local_decoder = decoder->parallelDecoder; } } } } if(use_local_buffer && output) { decoder->use_local_buffer = 0; #if WARPSTUFF WarpFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat); MaskFrame(decoder, local_buffer, local_pitch, decoder->StereoBufferFormat); #endif ConvertLocalToOutput(decoder, output, pitch, output_format, local_buffer, local_pitch, abs(channel_offset)); } else { #if WARPSTUFF WarpFrame(decoder, output, pitch, output_format); MaskFrame(decoder, output, pitch, output_format); #endif } if(decoder->channel_mix_half_res) //HACK { decoder->frame.resolution = DECODED_RESOLUTION_FULL; decoder->frame.width *= 2; decoder->frame.height *= 2; decoder->channel_mix_half_res = 0; } if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) //HACK { decoder->frame.resolution = DECODED_RESOLUTION_FULL; decoder->frame.width *= 2; } if( decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) //HACK { decoder->frame.resolution = DECODED_RESOLUTION_FULL; } #if _GRAPHICS if(decoder->cfhddata.process_path_flags & PROCESSING_BURNINS && output) { PaintFrame(decoder, output, pitch, output_format); } #endif STOP(tk_decompress); // Return indication of whether decoding succeeded or failed return result; } // Decode a sample that encoded a group of frames (return the first frame) bool DecodeSampleGroup(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int32_t frame_size = decoder->frame.height * pitch; int resolution = decoder->frame.resolution; bool result = true; static int subband_wavelet_index[] = {5, 5, 5, 5, 4, 4, 4, 3, 3, 3, 3, 1, 1, 1, 0, 0, 0}; static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3}; int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]); #if (0 && DEBUG) // Force quarter resolution decoding for debug that feature resolution = DECODED_RESOLUTION_QUARTER; #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoding sample group\n"); } #endif START(tk_decoding); // Initialize the codec state InitCodecState(&decoder->codec); // Allocate the transform data structure for the group of frames AllocDecoderGroup(decoder); // Initialize the tables for decoding the wavelet transforms InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands); // Clear the flags in the wavelet transforms ClearTransformFlags(decoder); // Process the tag value pairs until an encoded subband is found for (;;) { TAGVALUE segment; // Read the next tag value pair from the bitstream //segment = GetTagValue(input); segment = GetSegment(input); assert(input->error == BITSTREAM_ERROR_OKAY); if (input->error != BITSTREAM_ERROR_OKAY) { decoder->error = CODEC_ERROR_BITSTREAM; result = false; break; } // Update the codec state with the information in the tag value pair { TAGWORD tag = segment.tuple.tag; TAGWORD value = segment.tuple.value; // Use the tag value pair to update the codec state error = UpdateCodecState(decoder, input, codec, tag, value); //assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { decoder->error = error; result = false; break; //NOTE: Consider moving the error code into the codec state } } // Check whether the group has been decoded if (codec->sample_done) break; // Skip the rest of the current channel? if (CanSkipChannel(decoder, resolution)) { if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)) { int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; codec->num_channels = 3; goto decoding_complete; } else if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; } else { // Compute the bitstream position after the current channel int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t* position = codec->channel_position + channel_size; // Get the temporal wavelet int temporal_index = 2; TRANSFORM* transform = decoder->transform[channel]; IMAGE* wavelet = transform->wavelet[temporal_index]; if (wavelet == NULL) { decoder->error = CODEC_ERROR_BAD_FRAME; result = false; break; } #if (0 && DEBUG) if (IsBandValid(wavelet, HIGHPASS_BAND)) { int static count = 0; if (count < 20) { char label[_MAX_PATH]; sprintf(label, "Temporal-decode-%d-", count); DumpBandPGM(label, wavelet, HIGHPASS_BAND, NULL); } count++; } #endif #if _THREADED_DECODER // Ready to invert this wavelet to get the lowpass band in the lower wavelet? //if (DecodedBandsValid(wavelet, temporal_index)) if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)) #else // Have all bands in the temporal wavelet been decoded? //if (wavelet && BANDS_ALL_VALID(wavelet)) if (AllBandsValid(wavelet)) #endif { //PIXEL *buffer = (PIXEL *)decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n"); } #endif #if _THREADED_DECODER // Add the temporal inverse transform to the processing queue if(decoder->entropy_worker_new.pool.thread_count) { ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index, precision, &decoder->scratch, 1); QueueThreadedTransform(decoder, channel, temporal_index); } else #endif { // Reconstruct the lowpass bands in the first level wavelets //ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size); ReconstructWaveletBand(decoder, transform, channel, wavelet, temporal_index, precision, &decoder->scratch, 0 ); } // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; // Note that the subband flags are also reset when the channel header is decoded } // Was the wavelet created? else if (wavelet == NULL) { // The temporal wavelet is not created during quarter resolution decoding // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; } //TODO: Improve quarter resolution decoding so that the wavelet is created? } } } decoding_complete: STOP(tk_decoding); #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; int channel; for (channel = 0; channel < codec->num_channels; channel++) { TRANSFORM *transform = decoder->transform[channel]; IMAGE *wavelet = transform->wavelet[2]; uint8_t *data = (uint8_t *)wavelet->band[HIGHPASS_BAND]; int height = wavelet->height; int pitch = wavelet->pitch; int size = height * pitch; int band; for (band = 0; band < wavelet->num_bands; band++) { sprintf(label, "Temporal channel: %d, band: %d", channel, band); DumpBandStatistics(label, wavelet, band, logfile); #if 0 sprintf(label, "Temporal-channel%d-band%d-", channel, band); DumpBandPGM(label, wavelet, band, NULL); #endif } assert(size > 0); ZeroMemory(data, size); } } #endif if (result) { // Two frames have been decoded decoder->gop_length = 2; decoder->frame_count += 2; #if (1 && DEBUG) if (logfile) { fprintf(logfile, "DecodeSampleGroup, decoder: 0x%p, GOP length: %d\n", decoder, decoder->gop_length); } #endif // Return the first frame in the group if (!decoder->no_output) { #if 0 // Decoding to quarter frame resolution at full frame rate? if (resolution == DECODED_RESOLUTION_QUARTER) { int num_channels = codec->num_channels; FRAME_INFO *info = &decoder->frame; char *buffer = decoder->buffer; size_t buffer_size = decoder->buffer_size; uint8_t *frame1 = output; uint8_t *frame2 = decoder->output2; assert(frame2 != NULL); // Reconstruct two frames at quarter resolution ReconstructQuarterFrame(decoder, num_channels, frame1, frame2, pitch, info, buffer, buffer_size); } else #endif // Finish computing the output frame ReconstructSampleFrameToBuffer(decoder, 0, output, pitch); } if (decoder->error != CODEC_ERROR_OKAY) { result = false; } #if TIMING // Increment the count of bytes that have been decoded decode_byte_count += (COUNTER)BitstreamByteCount(input); #endif } if (!result) { // Check that the frame can be cleared assert(frame_size > 0); if (frame_size > 0) { // Zero the frame memset(output, 0, frame_size); } } return result; } // Decode a sample that represents the second frame in a group bool DecodeSampleFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int32_t frame_size = decoder->frame.height * pitch; bool result = true; START(tk_decoding); // Decode the tag value pairs in the frame sample for (;;) { TAGWORD tag; TAGWORD value; // Read the next tag value pair from the bitstream //TAGVALUE segment = GetTagValue(input); TAGVALUE segment = GetSegment(input); //assert(input->error == BITSTREAM_ERROR_OKAY); if (input->error != BITSTREAM_ERROR_OKAY) { decoder->error = CODEC_ERROR_BITSTREAM; result = false; break; } // Update the codec state with the information in the tag value pair tag = segment.tuple.tag; value = segment.tuple.value; // Use the tag value pair to update the codec state error = UpdateCodecState(decoder, input, codec, tag, value); assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { decoder->error = error; result = false; break; } // End of the frame header? if (tag == CODEC_TAG_FRAME_INDEX) break; } STOP(tk_decoding); #if (1 && DEBUG) if (logfile) { fprintf(logfile, "DecodeSampleFrame, decoder: 0x%p, GOP length: %d\n", decoder, decoder->gop_length); } #endif if (result) { // Return the second frame in the group // assert(decoder->gop_length >= 2); if (decoder->gop_length >= 2) { int frame_index = 1; // Display the second frame in the group ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch); if (decoder->error != CODEC_ERROR_OKAY) { result = false; } } else if (decoder->gop_length > 0) { int frame_index = 0; // Display the first frame in the group ReconstructSampleFrameToBuffer(decoder, frame_index, output, pitch); if (decoder->error != CODEC_ERROR_OKAY) { result = false; } } #if TIMING // Increment the count of bytes that have been decoded decode_byte_count += (COUNTER)BitstreamByteCount(input); #endif } if (!result) { // Frame type that is not handled // Check that the frame can be cleared assert(frame_size > 0); if (frame_size > 0) { // Zero the frame memset(output, 0, frame_size); } } return result; } // Decode a sample that encodes an intra frame bool DecodeSampleIntraFrame(DECODER *decoder, BITSTREAM *input, uint8_t *output, int pitch, ColorParam *colorparams) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int32_t frame_size = decoder->frame.height * pitch; int resolution = decoder->frame.resolution; bool result = true; int skipchan = 0; static int subband_wavelet_index[] = {2, 2, 2, 2, 1, 1, 1, 0, 0, 0}; static int subband_band_index[] = {0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3}; int num_subbands = sizeof(subband_wavelet_index)/sizeof(subband_wavelet_index[0]); START(tk_decoding); if(decoder->image_dev_only) goto decoding_completeI; // Initialize the codec state InitCodecState(&decoder->codec); // Allocate the transform data structure for the group of frames AllocDecoderGroup(decoder); // Initialize the tables for decoding the wavelet transforms InitWaveletDecoding(decoder, subband_wavelet_index, subband_band_index, num_subbands); // Clear the flags in the wavelet transforms ClearTransformFlags(decoder); //Force V210 output for debugging ***DEBUG*** //decoder->frame.format = DECODED_FORMAT_V210; // Process the tag value pairs until an encoded subband is found for (;;) { TAGVALUE segment; // Read the next tag value pair from the bitstream segment = GetSegment(input); //assert(input->error == BITSTREAM_ERROR_OKAY); if (input->error != BITSTREAM_ERROR_OKAY) { decoder->error = CODEC_ERROR_BITSTREAM; result = false; break; } { TAGWORD tag = segment.tuple.tag; TAGWORD value = segment.tuple.value; // Use the tag value pair to update the codec state error = UpdateCodecState(decoder, input, codec, tag, value); //assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { decoder->error = error; result = false; break; //NOTE: Consider moving the error code into the codec state } } // Check whether the group has been decoded if (codec->sample_done) { break; } // Skip the rest of the current channel? if (CanSkipChannel(decoder, resolution)) { skipchan++; if(skipchan > 5) { decoder->error = CODEC_ERROR_BAD_FRAME; result = false; break; } if(codec->channel == 3 && (decoder->frame.format == DECODED_FORMAT_YUYV || decoder->frame.format == DECODED_FORMAT_UYVY)) { int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; codec->num_channels = 3; goto decoding_completeI; } else if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; uint8_t *position = codec->channel_position + channel_size; // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; } else { // Compute the bitstream position after the current channel int channel = codec->channel; uint32_t channel_size = codec->channel_size[channel]; if (channel_size == 0) { decoder->error = CODEC_ERROR_BAD_FRAME; result = false; break; } uint8_t *position = codec->channel_position + channel_size; // Get the highest wavelet in the pyramid int wavelet_index = 2; TRANSFORM *transform = decoder->transform[channel]; IMAGE *wavelet = transform->wavelet[wavelet_index]; if (wavelet == NULL) { decoder->error = CODEC_ERROR_BAD_FRAME; result = false; break; } #if _THREADED_DECODER // Ready to invert this wavelet to get the lowpass band in the lower wavelet? //if (DecodedBandsValid(wavelet, temporal_index)) if (resolution != DECODED_RESOLUTION_QUARTER || (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER)) #else // Have all bands in the wavelet been decoded? if (AllBandsValid(wavelet)) #endif { //PIXEL *buffer = (PIXEL *)decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; int band; sprintf(label, "Channel: %d, index: %d", channel, wavelet_index); DumpImageStatistics(label, wavelet, logfile); #if 1 for (band = 1; band < wavelet->num_bands; band++) { sprintf(label, "Channel: %d, index: %d, band: %d", channel, wavelet_index, band); DumpBandStatistics(label, wavelet, band, logfile); } #endif } #endif #if (0 & DEBUG) if (logfile) { fprintf(logfile, "Reconstructing the lowpass bands in the first level wavelets\n"); } #endif #if _THREADED_DECODER // Add the inverse spatial transform to the processing queue if(decoder->entropy_worker_new.pool.thread_count) { ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index, precision, &decoder->scratch, 1); QueueThreadedTransform(decoder, channel, wavelet_index); } else #endif { // Reconstruct the lowpass bands in the first level wavelets //ReconstructWaveletBand(transform, channel, wavelet, temporal_index, precision, buffer, buffer_size); ReconstructWaveletBand(decoder, transform, channel, wavelet, wavelet_index, precision, &decoder->scratch, 0); } // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; // Note that the subband flags are also reset when the channel header is decoded } // Was the wavelet created? //else if (wavelet == NULL) else { // The wavelet may not have been created during quarter resolution decoding // The wavelet should have been created if all bands are valid assert(wavelet != NULL); // Advance the bitstream to the next channel SetBitstreamPosition(input, position); // Reset the decoded subband flags (otherwise this code will be executed again) codec->decoded_subband_flags = 0; } //TODO: Improve quarter resolution decoding so that the wavelet is created? } } } decoding_completeI: STOP(tk_decoding); if (result) { // One frame has been decoded decoder->gop_length = 1; decoder->frame_count += 1; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "DecodeSampleIntraFrame, decoder: 0x%p, GOP length: %d\n", decoder, decoder->gop_length); } #endif // Return the first frame (the only frame that was decoded) if (!decoder->no_output) { int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed; if ( !uncompressed && resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)) { //CODEC_STATE *codec = &decoder->codec; TRANSFORM **transform_array = decoder->transform; int num_channels = codec->num_channels; //int progressive = codec->progressive; FRAME_INFO *info = &decoder->frame; int precision = codec->precision; #if _THREADED_DECODER // Wait until the transform thread has finished all pending transforms WaitForTransformThread(decoder); #endif ConvertQuarterFrameToBuffer(decoder, transform_array, num_channels, output, pitch, info, precision); } else { // Finish computing the output frame ReconstructSampleFrameToBuffer(decoder, 0, output, pitch); } } if (decoder->error != CODEC_ERROR_OKAY) { result = false; } #if TIMING // Increment the count of bytes that have been decoded decode_byte_count += (COUNTER)BitstreamByteCount(input); #endif } if (!result) { // Check that the frame can be cleared assert(frame_size > 0); if (frame_size > 0) { // Zero the frame memset(output, 0, frame_size); } } return result; } // Decode a sample channel header bool DecodeSampleChannelHeader(DECODER *decoder, BITSTREAM *input) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_ERROR error = CODEC_ERROR_OKAY; CODEC_STATE *codec = &decoder->codec; int channel = codec->channel; CHANNEL_HEADER header; TRANSFORM *transform = decoder->transform[channel]; TRANSFORM *next_transform; // Advance to the next channel channel++; // Get the next transform for decoded information //TRANSFORM *next_transform = AllocGroupTransform(group, channel); // Decode the rest of the channel header error = DecodeChannelHeader(input, &header, SAMPLE_TYPE_CHANNEL); //assert(error == CODEC_ERROR_OKAY); decoder->error = error; if (error != CODEC_ERROR_OKAY) return false; // The decoder is not able to skip channels //assert(header.channel == channel); if (header.channel != channel) { decoder->error = CODEC_ERROR_BAD_FRAME; return false; } // Initialize the next transform using the previous one next_transform = decoder->transform[channel]; InitChannelTransform(next_transform, transform); // Update the channel codec->channel = channel; // Reset the subband counter codec->band.subband = 0; // Reset the decoded subband flags codec->decoded_subband_flags = 0; // Loop back to decode the next channel //transform = next_transform; return true; } // Decode the coefficients in a subband bool DecodeSampleSubband(DECODER *decoder, BITSTREAM *input, int subband) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int channel = codec->channel; TRANSFORM *transform = decoder->transform[channel]; int *subband_wavelet_index = decoder->subband_wavelet_index; // Used for quarter resolution and threaded decoding int transform_type = transform->type; // Wavelet parameters int width; int height; int level; int type; int band; int threading = 1; // Wavelet containing the band to decode int index; IMAGE *wavelet = NULL; bool result; if(subband >= 7 && subband <= 10 && transform_type == TRANSFORM_TYPE_FIELDPLUS) threading = 0; // Update the transform data structure from the codec state UpdateCodecTransform(transform, codec); // Is this an empty band? if (subband == 255) { // Decode an empty band // This wavelet is the temporal wavelet index = 2; wavelet = transform->wavelet[index]; // Get the wavelet parameters decoded from the bitstream width = codec->band.width; height = codec->band.height; level = codec->highpass.wavelet_level; type = codec->highpass.wavelet_type; band = codec->band.number; // The empty band should be the highpass band in a temporal wavelet //assert(type == WAVELET_TYPE_TEMPORAL && band == 1); if (!(type == WAVELET_TYPE_TEMPORAL && band == 1)) { decoder->error = CODEC_ERROR_BAD_FRAME; return false; } #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type); #else // Allocate (or reallocate) the wavelet #if _ALLOCATOR wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type); #else wavelet = ReallocWaveletEx(wavelet, width, height, level, type); #endif // Save this wavelet in the transform data structure transform->wavelet[index] = wavelet; #endif // Set the wavelet parameters wavelet->pixel_type[band] = PIXEL_TYPE_16S; wavelet->num_bands = 2; result = DecodeSampleEmptyBand(decoder, input, wavelet, band); // Set the subband number for the next band expected in the bitstream codec->band.subband = 11; } // Is this a highpass band? else if (subband > 0 && subband < CODEC_MAX_SUBBANDS) { // Decode a highpass band // Get the wavelet that contains this subband index = subband_wavelet_index[subband]; wavelet = transform->wavelet[index]; // Get the wavelet parameters decoded from the bitstream width = codec->band.width; height = codec->band.height; level = codec->highpass.wavelet_level; type = codec->highpass.wavelet_type; band = codec->band.number; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type); #else // Allocate (or reallocate) the wavelet #if _ALLOCATOR wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type); #else wavelet = ReallocWaveletEx(wavelet, width, height, level, type); #endif // Save this wavelet in the transform data structure transform->wavelet[index] = wavelet; #endif result = DecodeSampleHighPassBand(decoder, input, wavelet, band, threading); if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandStartedFlags(decoder, wavelet, band); } // Reset the default encoding method codec->band.encoding = BAND_ENCODING_RUNLENGTHS; // Set the subband number for the next band expected in the bitstream codec->band.subband = subband + 1; } else { // Decode a lowpass band // Get the wavelet that contains this subband index = subband_wavelet_index[0]; wavelet = transform->wavelet[index]; // Get the wavelet parameters decoded from the bitstream width = codec->lowpass.width; height = codec->lowpass.height; level = codec->lowpass.level; type = codec->first_wavelet; //band = codec->band.number; band = 0; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety wavelet = GetWaveletThreadSafe(decoder, transform, index, width, height, level, type); #else // Allocate (or reallocate) the wavelet #if _ALLOCATOR wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type); #else wavelet = ReallocWaveletEx(wavelet, width, height, level, type); #endif // Save this wavelet in the transform data structure transform->wavelet[index] = wavelet; #endif // The lowpass data is always stored in wavelet band zero assert(band == 0); // The lowpass band must be subband zero assert(subband == 0); result = DecodeSampleLowPassBand(decoder, input, wavelet); if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, wavelet, band); } // Set the subband number for the next band expected in the bitstream codec->band.subband = subband + 1; } // Was the subband successfully decoded? if (result) { // The transform will set the band valid flag if this is the temporal wavelet //if (index != 2) // Record that this subband has been decoded successfully if (0 <= subband && subband <= CODEC_MAX_SUBBAND) codec->decoded_subband_flags |= DECODED_SUBBAND_MASK(subband); #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded subband: %d, wavelet: %d, channel: %d\n", subband, index, channel); } #endif } #if _THREADED_DECODER // Ready to queue a threaded transform to invert this wavelet? if (BANDS_ALL_STARTED(wavelet)) { // Are frames being decoded to quarter resolution? if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)) { // Smallest spatial wavelet above the lowpass temporal band (fieldplus transform) int highest_index = 5; if (transform_type == TRANSFORM_TYPE_SPATIAL) { // Smallest wavelet in the spatial transform highest_index = 2; } // Only the smallest spatial wavelet must be reconstructed if (index != highest_index) { return result; } //TODO: Can we improve on the current scheme for quarter resolution decoding? } if ((transform->type == TRANSFORM_TYPE_SPATIAL && index > 0) || index >= 2) { if(decoder->entropy_worker_new.pool.thread_count && threading) { ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index, codec->precision, &decoder->scratch, 1); // Add the inverse wavelet transform to the processing queue QueueThreadedTransform(decoder, codec->channel, index); } else { // Apply the inverse wavelet transform to reconstruct the lower level wavelet ReconstructWaveletBand(decoder, transform, codec->channel, wavelet, index, codec->precision, &decoder->scratch, 0); } } } #else // Ready to invert this wavelet to get the lowpass band in the lower wavelet? if (BANDS_ALL_VALID(wavelet)) { int channel = codec->channel; //PIXEL *buffer = (PIXEL *)decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; int band; sprintf(label, "Channel: %d, index: %d", channel, index); DumpImageStatistics(label, wavelet, logfile); #if 1 for (band = 1; band < wavelet->num_bands; band++) { sprintf(label, "Channel: %d, index: %d, band: %d", channel, index, band); DumpBandStatistics(label, wavelet, band, logfile); } #endif } #endif // Are frames being decoded to quarter resolution? if (decoder->frame.resolution == DECODED_RESOLUTION_QUARTER && (decoder->codec.encoded_format != ENCODED_FORMAT_BAYER)) { // Smallest spatial wavelet above the lowpass temporal band (fieldplus transform) int highest_index = 5; if (transform_type == TRANSFORM_TYPE_SPATIAL) { // Smallest wavelet in the spatial transform highest_index = 2; } // Only the smallest spatial wavelet must be reconstructed if (index != highest_index) { return result; } //TODO: Can we improve on the current scheme for quarter resolution decoding? } // Apply the inverse wavelet transform to reconstruct the lower level wavelet ReconstructWaveletBand(decoder, transform, channel, wavelet, index, precision, &decoder->scratch, 0); } #endif return result; } // Decode the coefficients in a lowpass band bool DecodeSampleLowPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int channel = codec->channel; bool result = true; int lowpass_width; // Lowpass band dimensions int lowpass_height; int lowpass_pitch; PIXEL *pLowPassRow; // Pointer into the lowpass band //int wavelet_width; // Dimensions of the wavelet image //int wavelet_height; int bits_per_pixel; int quantization; int offset; //int pixel_divisor = (1 << (2 * codec->lowpass.level)); int row, column; int32_t solid_color = -1; const int gain = 128; const int colorshift = 0; // int channelgain[4]; //int waterrow=19, watercol=214; //int cspace = decoder->frame.colorspace; // Lowpass image dimensions may be smaller than the wavelet dimensions // because the encoder may have transmitted an image without the border lowpass_width = codec->lowpass.width; lowpass_height = codec->lowpass.height; lowpass_pitch = wavelet->pitch/sizeof(PIXEL); pLowPassRow = wavelet->band[0]; // Get the parameters for quantization performed by the encoder quantization = codec->lowpass.quantization; offset = codec->lowpass.pixel_offset; bits_per_pixel = codec->lowpass.bits_per_pixel; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decode lowpass subband\n"); } #endif if (bits_per_pixel == 16 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE && !(lowpass_width&1)) { int32_t *lpCurrentLong = (int32_t *)stream->lpCurrentWord; //int signval = 0; //int channel3stats = 0; int channeloffset = 0; if(decoder->codec.precision == 8) { channeloffset = (codec->num_frames==2 ? 64 : 32); } else if(decoder->codec.precision == 10) { switch(decoder->frame.format) { case DECODED_FORMAT_YU64: case DECODED_FORMAT_YR16: case DECODED_FORMAT_V210: channeloffset = codec->num_frames==2 ? 14 : 4;//DAN20090601, recal I-frame DAN20110301 break; default: channeloffset = codec->num_frames==2 ? 48 : 24;//DAN20090601 } if(decoder->sample_uncompressed) //DAN20110301 was testing the GOP length for this (why?) channeloffset = 0; //DAN20100822 -- Prevent offset between uncompressed V210 and compressed frames } else if(decoder->codec.precision == 12) { switch(decoder->frame.format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB24_INVERTED: case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB32_INVERTED: channeloffset = 8; //DAN200906010 break; // 16-bit precision: case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: case DECODED_FORMAT_B64A: case DECODED_FORMAT_WP13: case DECODED_FORMAT_W13A: channeloffset = 0; break; case DECODED_FORMAT_RG30: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: channeloffset = 6; //DAN200906010 //DAN20100822 -- prefect for uncompressed to compressed. break; default: channeloffset = 0; break; } } if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) //DAN20090728 -- Prevent offset between uncompressed and compressed RAW frames channeloffset = 0; #define DUMPLL 0 #if (_DEBUG && DUMPLL) FILE *fp; if(channel == 0) { static int inc = 1; char name[256]; sprintf(name,"C:\\Cedoc\\LLdec%03d.pgm", inc++); fp = fopen(name,"w"); fprintf(fp, "P2\n# CREATOR: DAN\n%d %d\n255\n", lowpass_width, lowpass_height); } #endif #if LOSSLESS channeloffset = 0; //LOSSLESS #endif //if(lpCurrentLong[0] == 0xffffffff) if(lpCurrentLong[0] == (int32_t)UINT32_MAX) { if(SwapInt32BtoN(lpCurrentLong[2]) == (uint32_t)lowpass_width) { if(SwapInt32BtoN(lpCurrentLong[3]) == (uint32_t)lowpass_height) { solid_color = SwapInt32BtoN(lpCurrentLong[1]); solid_color |= (solid_color<<16); lpCurrentLong += 4; } } } // Decode each row in the lowpass image for (row = 0; row < lowpass_height; row++) { int pixels; // Start at the first column column = 0; // Process the rest of the row { for (; column < lowpass_width; column++) { int pixel_value; //int i; // Perform inverse quantization if(column & 1) { pixel_value = pixels; } else { //pixels = _bswap(*(lpCurrentLong++)); if(solid_color == -1) pixels = SwapInt32BtoN(*(lpCurrentLong++)); else pixels = solid_color; pixel_value = (pixels>>16); pixels <<= 16; pixels >>= 16; } // Store the pixel in the lowpass band of the wavelet pixel_value += channeloffset; // pixel_value -= 64; // pixel_value += ((rand() & 0x7fff) - 0x4000); // if(pixel_value < 0) pixel_value = 0; if(pixel_value > 0x7fff) pixel_value = 0x7fff; pLowPassRow[column] = pixel_value; #if (_DEBUG && DUMPLL) if(channel==0 && fp) fprintf(fp, "%d\n", pixel_value>>7); #endif } } // Advance to the next row in the lowpass image pLowPassRow += lowpass_pitch; } #if (_DEBUG && DUMPLL) if(channel == 0 && fp) fclose(fp); #endif #if ERROR_TOLERANT // Update the count of bytes used stream->nWordsUsed -= (int)(((intptr_t)lpCurrentLong - (intptr_t)stream->lpCurrentWord)); #endif // Update the bitstream stream->lpCurrentWord = (uint8_t *)lpCurrentLong; } else if (bits_per_pixel == 8 && stream->nBitsFree == BITSTREAM_BUFFER_SIZE) { uint8_t *lpCurrentByte = (uint8_t *)stream->lpCurrentWord; //int signval = 0; // Decode each row in the lowpass image for (row = 0; row < lowpass_height; row++) { // Start at the first column column = 0; // Process the rest of the row for (; column < lowpass_width; column++) { int pixel_value = *(lpCurrentByte++); // Perform inverse quantization #if _ENCODE_CHROMA_ZERO if (channel == 0) pixel_value = (quantization * pixel_value) + offset; else pixel_value = (pixel_value - offset) * quantization; #else pixel_value = (quantization * pixel_value) + offset;// + colorshift; #endif pixel_value -= 128 * quantization; pixel_value *= gain; pixel_value >>= 7; pixel_value += 128 * quantization; pixel_value += colorshift; // Store the pixel in the lowpass band of the wavelet // Multiply by 16 to turn 8-bit into the new 16-bit format pLowPassRow[column] = pixel_value * 16; } // Advance to the next row in the lowpass image pLowPassRow += lowpass_pitch; } #if ERROR_TOLERANT // Update the count of bytes used stream->nWordsUsed -= (int)(((intptr_t)lpCurrentByte - (intptr_t)stream->lpCurrentWord)); #endif // Update the bitstream stream->lpCurrentWord = (uint8_t *)lpCurrentByte; } else { int channeloffset = 0; if(decoder->codec.precision == 8) { channeloffset = (codec->num_frames==2 ? 64 : 32); } else if(decoder->codec.precision == 10) { channeloffset = (codec->num_frames==2 ? 10 : 5); } else if(decoder->codec.precision == 12) { // channeloffset = (codec->num_frames==2 ? 4 : 2); // Seems to result in less shift using the viper images } //DAN20050923 no longer trying to compensate for YUV to RGB issues. if(decoder->frame.format == DECODED_FORMAT_RGB24 || decoder->frame.format == DECODED_FORMAT_RGB32) { if(decoder->codec.precision == 8) { switch(channel) { case 0: channeloffset += 8; break; // fixed rounding error introduced by YUV->RGB case 1: channeloffset += 16; break; case 2: channeloffset += 10; break; } } else if(decoder->codec.precision == 10) { switch(channel) { case 0: channeloffset += -8; break; // fixed rounding error introduced by YUV->RGB case 1: channeloffset += -4; break; case 2: channeloffset += -4; break; } } else if(decoder->codec.precision == 12) { switch(channel) { case 0: channeloffset += 0; break; // fixed rounding error introduced by YUV->RGB case 1: channeloffset += 0; break; case 2: channeloffset += 0; break; } } } if(bits_per_pixel != 16) channeloffset = 0; for (row = 0; row < lowpass_height; row++) { for (column = 0; column < lowpass_width; column++) { int pixel_value = GetBits(stream, bits_per_pixel); // Perform inverse quantization #if _ENCODE_CHROMA_ZERO if (channel == 0) pixel_value = (quantization * pixel_value) + offset; else pixel_value = (pixel_value - offset) * quantization; #else pixel_value = (quantization * pixel_value) + offset;// + colorshift; #endif // Store the pixel in the lowpass band of the wavelet pLowPassRow[column] = SATURATE(pixel_value + channeloffset); // DAN20050926 added chromaoffet to match the normal path -- this code will be used for SD (720) encodes } stream->nWordsUsed -= lowpass_width*(bits_per_pixel>>3); // Advance to the next row in the lowpass image pLowPassRow += lowpass_pitch; } } // Set the wavelet scale factor wavelet->scale[0] = quantization; // Align the bitstream to the next tag value pair AlignBitsTag(stream); // Return indication of lowpass decoding success return result; } // Decode the coefficients in a highpass band bool DecodeSampleHighPassBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band, int threading) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; //int channel = codec->channel; //int subband = codec->band.subband; //int index = codec->highpass.wavelet_number; int width; int height; int quantization; // The encoder may not have used variable-length coding int method = codec->band.encoding; bool result = true; // Check that the band index is in range //assert(0 <= band && band <= codec->max_subband); if (!(0 <= band && band <= codec->max_subband)) { decoder->error = CODEC_ERROR_BAD_FRAME; return false; } // Encoded coefficients start on a tag boundary AlignBitsTag(stream); #if (0 && DEBUG) // Dump the band header to the logfile if (logfile) { fprintf(logfile, "Band header marker: 0x%04X, subband: %d, width: %d, height: %d, encoding: %d\n", header->marker, header->subband, header->width, header->height, header->encoding); } #endif // Copy the scale factors used by the encoder into the wavelet band // (Zero means that the encoder did not supply this parameter) if (codec->band.scale > 0) { wavelet->scale[band] = codec->band.scale; } // Get the quantization factor that was used to encode the band coefficients quantization = codec->band.quantization; // Copy the quantization into the wavelet wavelet->quantization[band] = quantization; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decode highpass subband: %d, quantization: %d\n", subband, quantization); } #endif // Get the highpass band dimensions width = codec->band.width; height = codec->band.height; // Is this a special band for the temporal high pass thumbnail? if (method == BAND_ENCODING_LOSSLESS) { //lossless temporal subband //DAN20060701 result = DecodeBand16sLossless(decoder, stream, wavelet, band, width, height); assert(result); if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, wavelet, band); } } else if (method == BAND_ENCODING_16BIT) { //lossless temporal subband //DAN20060701 result = DecodeBand16s(decoder, stream, wavelet, band, width, height); assert(result); if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, wavelet, band); } } else { // Must use the runlength encoding method //assert(codec->band.encoding == BAND_ENCODING_RUNLENGTHS); if (codec->band.encoding != BAND_ENCODING_RUNLENGTHS) { decoder->error = CODEC_ERROR_BAD_FRAME; return false; } #if 0 // This code attempts to not decode various subbands for 1/4 res decodes. // Unforuntately playback would stop after 5 seonds with this code (but not in debug mode.) if (subband >= 4 && subband <= 6) { TAGVALUE segment; AlignBitsTag(stream); do { segment = GetTagValue(stream); } while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER); stream->lpCurrentWord -= 4; stream->nWordsUsed += 4; } else #elif 0 // Is this subband required for decoding the frame? if (CanSkipSubband(decoder, subband)) { // Skip past the end of this subband SkipSubband(stream); } #endif // Decode this subband result = DecodeFastRunsFSM16s(decoder, stream, wavelet, band, width, height, threading); } // Return failure if a problem was encountered while reading the band coefficients if (!result) return result; // The encoded band coefficients end on a bitstream word boundary // to avoid interference with the marker for the coefficient band trailer AlignBits(stream); // Decode the band trailer error = DecodeBandTrailer(stream, NULL); decoder->error = error; //assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Error in band %d trailer: %d\n", band, error); } #endif return false; } return result; } // Decode an empty band bool DecodeSampleEmptyBand(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int quantization; // Check that the band is in range assert(0 <= band && band <= CODEC_MAX_HIGHBANDS); // Check that the highpass band is 16 bits assert(wavelet->pixel_type[1] == PIXEL_TYPE_16S); #if (0 && DEBUG) //TODO: Change format string to handle 64-bit pointers if (logfile) { fprintf(logfile, "Start decoding an empty band, stream: 0x%p\n", stream->lpCurrentWord); } #endif // Encoded coefficients must start on a word boundary AlignBits(stream); // Copy the scale factors used by the encoder into the wavelet band // (Zero means that the encoder did not supply the parameter) if (codec->band.scale > 0) wavelet->scale[band] = codec->band.scale; // Set the quantization used to encode the band coefficients quantization = codec->band.quantization; wavelet->quantization[band] = quantization; #if (0 && DEBUG) if (logfile) { DumpBits(stream, logfile); } #endif // Decode the band trailer error = DecodeBandTrailer(stream, NULL); decoder->error = error; assert(error == CODEC_ERROR_OKAY); if (error != CODEC_ERROR_OKAY) { #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Error in band: %d, error: %d\n", band, error); } #endif return false; } // The encoded band coefficients end on a bitstream word boundary // to avoid interference with the marker for the coefficient band trailer AlignBits(stream); #if (0 && DEBUG) // Dump the band trailer to the logfile if (logfile) { fprintf(logfile, "Band trailer marker: 0x%04X\n", trailer->marker); } #endif #if (0 && DEBUG) if (logfile) { //TODO: Change format string to handle 64-bit pointers fprintf(logfile, "End decode empty band, stream: 0x%X\n", stream->lpCurrentWord); } #endif return true; } bool DecodeBand16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height) { PIXEL *rowptr = wavelet->band[band_index]; int pitch = wavelet->pitch; int row,dequant = wavelet->quantization[band_index]; // Convert the pitch from bytes to pixels pitch /= sizeof(PIXEL); //BAND_ENCODING_16BIT if(dequant == 1) { for (row = 0; row < height; row++) { int column; #if 0 for (column = 0; column < width; column++) { int value = GetWord16s(stream); rowptr[column] = value; } #else // Mild speedup (2.5% overall half-res decode improvement.) char *sptr = (char *)stream->lpCurrentWord; char *dptr = (char *)rowptr; for (column = 0; column < width; column++) { *(dptr+1) = *sptr++; *dptr = *sptr++; dptr+=2; } stream->lpCurrentWord += width*2; stream->nWordsUsed += width*2; #endif rowptr += pitch; } } else { for (row = 0; row < height; row++) { int column; for (column = 0; column < width; column++) { int value = GetWord16s(stream); rowptr[column] = value*dequant; } rowptr += pitch; } } #if (0 && DEBUG) { int static count = 0; if (count < 20) { char label[_MAX_PATH]; sprintf(label, "Hightemp-decode-%d-", count); DumpBandPGM(label, wavelet, band_index, NULL); } count++; } #endif return true; } bool DecodeBand16sLossless(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height) { //CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif int result = true; int quant = wavelet->quantization[band_index]; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; int size; PIXEL *rowptr; //int row = 0; int pitch; //CODEC_STATE *codec = &decoder->codec; //int channel = codec->channel; //int subband = codec->band.subband; //int num_subbands = codec->num_subbands; //int pixel_type = wavelet->pixel_type[band_index]; //int difference_coding = decoder->codec.difference_coding; //int localquant = 1; //int threading = 0; decoder->codec.active_codebook = 0; // reset CODEC state decoder->codec.difference_coding = 0; //reset state for next subband // Must have a valid wavelet assert(wavelet != NULL); if (! (wavelet != NULL)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } //Must have a valid FSM assert(fsm != NULL); if (! (fsm != NULL)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // All rows are treated as one int32_t row that covers the entire band size = fsm->table.num_states; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check if the band is intended for 8-bit pixels assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S); rowptr = (PIXEL *)wavelet->band[band_index]; pitch = wavelet->pitch; assert(rowptr != NULL && pitch != 0); if (! (rowptr != NULL && pitch != 0)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } DeQuantFSM(fsm, 1); // can;t use this to dequant as we split the cooefficients into high and low bytes. if (!DecodeBandFSM16sNoGap2Pass(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, quant)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } if(quant) { int x,y; PIXEL *line = rowptr; if(quant == 32) { for(y=0;y<height;y++) { for(x=0;x<width;x++) { line[x] <<= 5; } line += pitch/2; } } else { for(y=0;y<height;y++) { for(x=0;x<width;x++) { line[x] *= quant; } line += pitch/2; } } } /* if(once <= 60) { char name[200]; FILE *fp; sprintf(name,"C:/Cedoc/DUMP/Decoder/dump%02d.raw", once); fp = fopen(name,"wb"); fwrite(rowptr,width*height,1,fp); fclose(fp); once++; }*/ assert(result == true); if (! (result == true)) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } return true; } // Invert the wavelet to reconstruct the lower wavelet in the transform void ReconstructWaveletBand(DECODER *decoder, TRANSFORM *transform, int channel, IMAGE *wavelet, int index, int precision, const SCRATCH *scratch, int allocations_only) { int transform_type = transform->type; int width = wavelet->width; int height = wavelet->height; int level = wavelet->level; PIXEL *buffer = (PIXEL *)scratch->free_ptr; size_t buffer_size = scratch->free_size; // Is the current wavelet a spatial wavelet? if (transform_type == TRANSFORM_TYPE_SPATIAL && index > 0) { // Reconstruct the lowpass band in the lower wavelet int lowpass_index = index - 1; IMAGE *lowpass = transform->wavelet[lowpass_index]; int lowpass_width = 2 * width; int lowpass_height = 2 * height; int lowpass_level = level - 1; int lowpass_type = (lowpass_index == 0) ? WAVELET_TYPE_FRAME : WAVELET_TYPE_SPATIAL; //const int prescale = 1; const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT); int prescale = transform->prescale[index]; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #else // Allocate the wavelet if not already allocated #if _ALLOCATOR lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #else lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #endif transform->wavelet[lowpass_index] = lowpass; #endif // Check that the lowpass band has not already been reconstructed //assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0); if(!allocations_only) { // Check that all of the wavelet bands have been decoded //assert(BANDS_ALL_VALID(wavelet)); if (!BANDS_ALL_VALID(wavelet)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // Has this wavelet already been reconstructed? if ((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0) { // Perform the inverse spatial transform before decoding the next wavelet STOP(tk_decoding); START(tk_inverse); //TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale); TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale); STOP(tk_inverse); START(tk_decoding); // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, lowpass, 0); #if TIMING // Increment the count of spatial transforms performed during decoding spatial_decoding_count++; #endif } } } // Is the current wavelet a spatial wavelet above the temporal lowpass band? else if (index > 3) { // Reconstruct the lowpass band in the lower wavelet const int temporal_wavelet_index = 2; int lowpass_index = (index > 4) ? index - 1 : index - 2; IMAGE *lowpass = transform->wavelet[lowpass_index]; int lowpass_width = 2 * width; int lowpass_height = 2 * height; int lowpass_level = level - 1; int lowpass_type = ((lowpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL); //const int prescale = 2; const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT); int prescale = transform->prescale[index]; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety lowpass = GetWaveletThreadSafe(decoder, transform, lowpass_index, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #else // Allocate the wavelet if not already allocated #if _ALLOCATOR lowpass = ReallocWaveletEx(decoder->allocator, lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #else lowpass = ReallocWaveletEx(lowpass, lowpass_width, lowpass_height, lowpass_level, lowpass_type); #endif transform->wavelet[lowpass_index] = lowpass; #endif if(!allocations_only) { // Check that the lowpass band has not already been reconstructed assert((lowpass->band_valid_flags & BAND_VALID_MASK(0)) == 0); // Check that all of the wavelet bands have been decoded //assert(BANDS_ALL_VALID(wavelet)); if (!BANDS_ALL_VALID(wavelet)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // Perform the inverse spatial transform before decoding the next wavelet STOP(tk_decoding); START(tk_inverse); //TransformInverseSpatialQuantLowpass(wavelet, lowpass, buffer, buffer_size, prescale, inverse_prescale); TransformInverseSpatialQuantLowpass(wavelet, lowpass, scratch, prescale, inverse_prescale); STOP(tk_inverse); START(tk_decoding); // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, lowpass, 0); #if TIMING // Increment the count of spatial transforms performed during decoding spatial_decoding_count++; #endif } } // Is the current wavelet the spatial wavelet above the temporal highpass band? else if (index == 3) { // Reconstruct the highpass band in the temporal wavelet const int temporal_wavelet_index = 2; int highpass_index = index - 1; IMAGE *highpass = transform->wavelet[highpass_index]; int highpass_width = 2 * width; int highpass_height = 2 * height; int highpass_level = level - 1; int highpass_type = ((highpass_index == temporal_wavelet_index) ? WAVELET_TYPE_TEMPORAL : WAVELET_TYPE_SPATIAL); const bool inverse_prescale = (precision >= CODEC_PRECISION_10BIT); int prescale = inverse_prescale ? transform->prescale[index] : 0; #if _THREADED_DECODER // Allocate (or reallocate) the wavelet with thread safety highpass = GetWaveletThreadSafe(decoder, transform, highpass_index, highpass_width, highpass_height, highpass_level, highpass_type); #else // Allocate the wavelet if not already allocated #if _ALLOCATOR highpass = ReallocWaveletEx(decoder->allocator, highpass , highpass_width, highpass_height, highpass_level, highpass_type); #else highpass = ReallocWaveletEx(highpass , highpass_width, highpass_height, highpass_level, highpass_type); #endif transform->wavelet[highpass_index] = highpass; #endif if(!allocations_only) { // Check that the highpass band has not already been reconstructed assert((highpass->band_valid_flags & BAND_VALID_MASK(1)) == 0); // Check that all of the wavelet bands have been decoded //assert(BANDS_ALL_VALID(wavelet)); if (!BANDS_ALL_VALID(wavelet)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // Perform the inverse spatial transform before decoding the next wavelet STOP(tk_decoding); START(tk_inverse); TransformInverseSpatialQuantHighpass(wavelet, highpass, buffer, buffer_size, prescale); STOP(tk_inverse); START(tk_decoding); // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, highpass, 1); #if TIMING // Increment the count of spatial transforms performed during decoding spatial_decoding_count++; #endif } } // Is the current wavelet the temporal wavelet? else if (index == 2) { // Get the temporal wavelet IMAGE *temporal = wavelet; // Set the frame wavelet parameters int frame_level = 1; int frame_type = WAVELET_TYPE_FRAME; // Get the two frame wavelets IMAGE *frame[2]; frame[0] = transform->wavelet[0]; frame[1] = transform->wavelet[1]; // Check that the temporal wavelet is valid //assert(temporal->num_bands == 2 && temporal->wavelet_type == WAVELET_TYPE_TEMPORAL); if (!(temporal->num_bands == 2 && temporal->wavelet_type == WAVELET_TYPE_TEMPORAL)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } #if _THREADED_DECODER // Allocate (or reallocate) the frame wavelets with thread safety frame[0] = GetWaveletThreadSafe(decoder, transform, 0, width, height, frame_level, frame_type); frame[1] = GetWaveletThreadSafe(decoder, transform, 1, width, height, frame_level, frame_type); #else // Allocate the frame wavelets if not already allocated #if _ALLOCATOR frame[0] = ReallocWaveletEx(decoder->allocator, frame[0], width, height, frame_level, frame_type); frame[1] = ReallocWaveletEx(decoder->allocator, frame[1], width, height, frame_level, frame_type); #else frame[0] = ReallocWaveletEx(frame[0], width, height, frame_level, frame_type); frame[1] = ReallocWaveletEx(frame[1], width, height, frame_level, frame_type); #endif transform->wavelet[0] = frame[0]; transform->wavelet[1] = frame[1]; #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Before inverse temporal transform"); DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile); DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile); } #endif if(!allocations_only) { // Check that the lowpass bands have not already been reconstructed assert((frame[0]->band_valid_flags & BAND_VALID_MASK(0)) == 0); assert((frame[1]->band_valid_flags & BAND_VALID_MASK(0)) == 0); // Check that all of the wavelet bands have been decoded assert(BANDS_ALL_VALID(temporal)); // Invert the temporal transform between the frame wavelets STOP(tk_decoding); START(tk_inverse); TransformInverseTemporalQuant(temporal, frame[0], frame[1], buffer, buffer_size, precision); STOP(tk_inverse); START(tk_decoding); #if (0 && DEBUG) if (logfile) { IMAGE *wavelet = quad[0]; fprintf(logfile, "After inverse temporal transform\n"); DumpArray16s("Temporal Lowpass", temporal->band[0], temporal->width, temporal->height, temporal->pitch, logfile); DumpArray16s("Temporal Highpass", temporal->band[1], temporal->width, temporal->height, temporal->pitch, logfile); DumpArray16s("First frame wavelet, band 0", wavelet->band[0], wavelet->width, wavelet->height, wavelet->pitch, logfile); } #endif // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, frame[0], 0); UpdateWaveletBandValidFlags(decoder, frame[1], 0); #if TIMING // Increment the number of temporal transforms performed outside of decoding temporal_decoding_count++; #endif } } } // Compute the dimensions of the output buffer void ComputeOutputDimensions(DECODER *decoder, int frame, int *decoded_width_out, int *decoded_height_out) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; FRAME_INFO *info = &decoder->frame; //int progressive = codec->progressive; TRANSFORM **transform_array = decoder->transform; //IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; IMAGE *wavelet = NULL; int wavelet_width; int wavelet_height; int decoded_width; int decoded_height; int resolution = info->resolution; //int chroma_offset = decoder->codec.chroma_offset; int decoded_scale = 0; if (decoded_width_out == NULL || decoded_height_out == NULL) { return; } // Clear the return values in case this routine terminates early *decoded_width_out = 0; *decoded_height_out = 0; // Get the decoding scale switch(resolution) { case DECODED_RESOLUTION_FULL: case DECODED_RESOLUTION_HALF_HORIZONTAL: //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } decoded_scale = 2; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_HALF: //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } decoded_scale = 1; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_QUARTER: if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } decoded_scale = 1; wavelet = transform_array[0]->wavelet[0]; } else { decoded_scale = 1; wavelet = transform_array[0]->wavelet[3]; } break; case DECODED_RESOLUTION_LOWPASS_ONLY: decoded_scale = 1; wavelet = transform_array[0]->wavelet[5]; if(wavelet == NULL) // there Intra Frame compressed wavelet = transform_array[0]->wavelet[2]; break; default: assert(0); break; } // Get the decoded frame dimensions assert(wavelet != NULL); wavelet_width = wavelet->width; wavelet_height = wavelet->height; if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) decoded_width = wavelet_width; else decoded_width = decoded_scale * wavelet_width; decoded_height = decoded_scale * wavelet_height; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width); } #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n", decoded_width, decoded_height, info->width, info->height, pitch); } #endif // Return the decoded width and height *decoded_width_out = decoded_width; *decoded_height_out = decoded_height; } #define DEBUG_ROW16U 0 void ReconstructSampleFrameToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch) { FRAME_INFO local_info; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif FRAME_INFO *info = &local_info; CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; int progressive = codec->progressive; TRANSFORM **transform_array = decoder->transform; IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; IMAGE *wavelet; int wavelet_width; int wavelet_height; int decoded_width; int decoded_height; int resolution = decoder->frame.resolution; int chroma_offset = decoder->codec.chroma_offset; int uncompressed = decoder->uncompressed_chunk && decoder->uncompressed_size && decoder->sample_uncompressed; //TODO: Change this routine to return the codec error code CODEC_ERROR error = CODEC_ERROR_OKAY; //if(decoder->cfhddata.calibration) // LoadTweak(); //TODO: Change this routine to return an error code if (decoder == NULL) { return; } decoder->gop_frame_num = frame; #if _THREADED_DECODER // Wait until the transform thread has finished all pending transforms WaitForTransformThread(decoder); #endif //return; // copy frame info in a changable local structure memcpy(info, &decoder->frame, sizeof(FRAME_INFO)); // Use the old code for reconstructing the frame #if (0 && DEBUG) // Force quarter resolution decoding for debugging that feature resolution = DECODED_RESOLUTION_QUARTER; #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Inverting last wavelet, frame: %d\n", frame); } #endif // The decoder can decode a video sample without returning a frame if (output == NULL || pitch == 0) return; #if (1 && DEBUG_ROW16U) // Force decoding to 16-bit pixels for debugging info->format = DECODED_FORMAT_YR16; #endif #if 0 if (info->format == DECODED_FORMAT_YR16) { // Force interlaced or progressive decoding for debugging //progressive = false; progressive = true; } #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags); } #endif // Does this frame have to be reconstructed? if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) { #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoder discarding frame: %d\n", frame); } #endif return; } // Check that the requested frame is within the limits of the group of frames assert(0 <= frame && frame < decoder->gop_length); // Check that the frame resolution is valid assert(IsValidFrameResolution(resolution)); if (!IsValidFrameResolution(resolution)) { decoder->error = CODEC_ERROR_RESOLUTION; return; } #if (0 && TIMING) //(0 && DEBUG) // Override progressive flag read from the bitstream for debugging //progressive = 0; // Use the inverse frame transform progressive = 1; // Use the inverse spatial transform #endif // Build the 3D LUTs if needed ComputeCube(decoder); //HACK DAN20110131 -- some formats will not directly decode so need to use the AM route { if( decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422 && resolution == DECODED_RESOLUTION_HALF) { if( decoder->frame.format == COLOR_FORMAT_R408 || decoder->frame.format == COLOR_FORMAT_V408) { decoder->use_active_metadata_decoder = true; decoder->apply_color_active_metadata = true; } } if( decoder->frame.format == COLOR_FORMAT_NV12) { decoder->use_active_metadata_decoder = true; decoder->apply_color_active_metadata = true; // TODO, make it work with this. } if (decoder->codec.progressive == false && decoder->frame.format == COLOR_FORMAT_RGB24) { decoder->use_active_metadata_decoder = true; decoder->apply_color_active_metadata = true; } } // Get the decoding scale if(!uncompressed) { switch(resolution) { case DECODED_RESOLUTION_FULL: case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } wavelet = transform_array[0]->wavelet[0]; // Get the decoded frame dimensions //assert(wavelet != NULL); if (wavelet == NULL) { decoder->error = CODEC_ERROR_RESOLUTION; return; } wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = 2 * wavelet_width; decoded_height = 2 * wavelet_height; break; case DECODED_RESOLUTION_HALF: //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } wavelet = transform_array[0]->wavelet[0]; // Get the decoded frame dimensions //assert(wavelet != NULL); if (wavelet == NULL) { decoder->error = CODEC_ERROR_RESOLUTION; return; } wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = wavelet_width; decoded_height = wavelet_height; break; case DECODED_RESOLUTION_HALF_HORIZONTAL: //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } wavelet = transform_array[0]->wavelet[0]; // Get the decoded frame dimensions //assert(wavelet != NULL); if (wavelet == NULL) { decoder->error = CODEC_ERROR_RESOLUTION; return; } wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = wavelet_width; decoded_height = 2 * wavelet_height; break; case DECODED_RESOLUTION_QUARTER: if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { //assert(AllTransformBandsValid(transform_array, num_channels, frame)); if (!AllTransformBandsValid(transform_array, num_channels, frame)) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } wavelet = transform_array[0]->wavelet[0]; } else { wavelet = transform_array[0]->wavelet[3]; } // Get the decoded frame dimensions //assert(wavelet != NULL); if(wavelet == NULL) { decoder->error = CODEC_ERROR_RESOLUTION; return; } wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = wavelet_width; decoded_height = wavelet_height; break; case DECODED_RESOLUTION_LOWPASS_ONLY: wavelet = transform_array[0]->wavelet[5]; if(wavelet == NULL) // there Intra Frame compressed wavelet = transform_array[0]->wavelet[2]; // Get the decoded frame dimensions //assert(wavelet != NULL); if (wavelet == NULL) { decoder->error = CODEC_ERROR_RESOLUTION; return; } wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = wavelet_width; decoded_height = wavelet_height; break; default: assert(0); break; } } else { if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { decoded_width = info->width/2; decoded_height = info->height/2; } else { decoded_width = info->width; decoded_height = info->height; } } if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { if(resolution == DECODED_RESOLUTION_FULL) { if(decoded_width*2 == info->width) { info->width /= 2; info->height /= 2; info->resolution = resolution = DECODED_RESOLUTION_FULL_DEBAYER; } } else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) { if(decoded_width*2 == info->width) { info->width /= 2; info->height /= 2; } } else if(resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { if(decoded_width*2 == info->width) { info->height /= 2; info->resolution = resolution = DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER; } } else if(decoder->frame.format == DECODED_FORMAT_BYR2 || decoder->frame.format == DECODED_FORMAT_BYR4) { if(decoded_width*2 == info->width) { info->width /= 2; info->height /= 2; info->resolution = resolution = DECODED_RESOLUTION_HALF_NODEBAYER; } } else { if(resolution == DECODED_RESOLUTION_HALF) { if(decoded_width*2 == info->width) { decoded_width *= 2; decoded_height *= 2; info->resolution = resolution = DECODED_RESOLUTION_FULL; } } else if(resolution == DECODED_RESOLUTION_QUARTER) { if(uncompressed) { decoded_width *= 2; decoded_height *= 2; info->resolution = resolution = DECODED_RESOLUTION_QUARTER_NODEBAYER_SCALED; } else { if(decoded_width == info->width) { info->resolution = resolution = DECODED_RESOLUTION_HALF; } } } } } if(uncompressed) { // Call the appropriate routine for the encoded format switch (decoder->codec.encoded_format) { case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4 // Not implemented assert(0); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; case ENCODED_FORMAT_BAYER: // Bayer encoded data // Add new code here for the final steps in decoding the Bayer format error = UncompressedSampleFrameBayerToBuffer(decoder, info, frame, output, pitch); break; case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2 (always v210) error = UncompressedSampleFrameYUVToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT; break; case ENCODED_FORMAT_RGB_444: // Original encoding scheme for RGB 444 (always DPX0) error = UncompressedSampleFrameRGBToBuffer(decoder, info, frame, output, pitch);//CODEC_ERROR_UNSUPPORTED_FORMAT; break; default: // Fall through into the old code for reconstructing frames error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } } else { if (decoder->codec.num_channels < 3 || decoder->codec.num_channels > 4) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // Call the appropriate routine for the encoded format switch (decoder->codec.encoded_format) { case ENCODED_FORMAT_RGB_444: // channels = decoder->codec.num_channels; planes of RGB 4:4:4 case ENCODED_FORMAT_RGBA_4444: // Four planes of ARGB 4:4:4:4 error = ReconstructSampleFrameRGB444ToBuffer(decoder, frame, output, pitch); break; case ENCODED_FORMAT_YUVA_4444: // Four planes of YUVA 4:4:4:4 // Not implemented assert(0); //error = ReconstructSampleFrameYUVA4444ToBuffer(decoder, frame, output, pitch); break; case ENCODED_FORMAT_BAYER: // Bayer encoded data // Add new code here for the final steps in decoding the Bayer format error = ReconstructSampleFrameBayerToBuffer(decoder, info, frame, output, pitch); break; case ENCODED_FORMAT_YUV_422: // Original encoding scheme for YUV 4:2:2 // Add new code here for the final steps in decoding the original YUV 4:2:2 format error = ReconstructSampleFrameYUV422ToBuffer(decoder, frame, output, pitch); break; default: // Fall through into the old code for reconstructing frames error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } } // Was the newer code able to successfully reconstruct the frame? if (error != CODEC_ERROR_UNSUPPORTED_FORMAT) { // Save the codec error code in the decoder state and return decoder->error = error; return; } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded scale: %d, decoded width: %d, wavelet width: %d\n", decoded_scale, decoded_width, wavelet_width); } #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoded width: %d, height: %d, frame width: %d, height: %d, output pitch: %d\n", decoded_width, decoded_height, info->width, info->height, pitch); } #endif #if (0 && DEBUG) if (logfile) { IMAGE *wavelet = transform[0]->wavelet[frame]; int band = 0; fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band); DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile); } #endif // Check that the requested frame is large enough to hold the decoded frame #if (0 && DEBUG) //if (! (info->width >= decoded_width)) { if (logfile) { //fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width); fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width); } } #endif assert(info->width >= decoded_width); assert((info->height+7)/8 >= (decoded_height+7)/8); if (!(info->width >= decoded_width && (info->height+7)/8 >= (decoded_height+7)/8)) { decoder->error = CODEC_ERROR_FRAMESIZE; return; } #if (0 && DEBUG) if (logfile) { //SUBIMAGE subimage = SUBIMAGE_UPPER_LEFT(16, 16); SUBIMAGE subimage = SUBIMAGE_UPPER_RIGHT(16, 16); // Adjust the subimage to be at the middle of the right border //subimage.row += wavelet_height/2 - 8; DumpBand("SIF Image", wavelet, 0, &subimage, logfile); } #endif START(tk_inverse); if (resolution == DECODED_RESOLUTION_QUARTER) { int precision = codec->precision; // Reconstruct the frame to quarter resolution ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch, info, &decoder->scratch, precision); } else // Was the first transform a frame transform (used for interlaced frames)? if (!progressive) { // Can the inverse frame transform and output byte packing be done in one pass? if ((resolution == DECODED_RESOLUTION_FULL) && (info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY)) { // Apply the inverse frame transform and pack the results into the output buffer int precision = codec->precision; #if (0 && DEBUG) DumpWaveletBandsPGM(wavelet, frame, num_channels); #endif #if _INTERLACED_WORKER_THREADS StartInterlaceWorkerThreads(decoder); //TODO: support new threading // Send the upper and lower rows of the transforms to the worker threads TransformInverseFrameThreadedToYUV(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseFrameToYUV(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif } //#if BUILD_PROSPECT else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16) { // Apply the inverse frame transform and output rows of luma and chroma //DWORD dwThreadID1; //DWORD dwThreadID2; //HANDLE thread1; //HANDLE thread2; int precision = codec->precision; #if _INTERLACED_WORKER_THREADS StartInterlaceWorkerThreads(decoder); //TODO: support new threading // Send the upper and lower rows of the transforms to the worker threads TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels, (PIXEL16U *)output, pitch, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels, (PIXEL16U *)output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif } //#endif else { // Reconstruct the frame as separate planes and combine the planes into a packed output image int channel; if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { int scale = 13; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[5]; if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed. { scale = 12; lowpass_images[channel] = transform_array[channel]->wavelet[2]; } } STOP(tk_inverse); CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, scale, decoder->codec.encoded_format, decoder->frame.white_point); START(tk_inverse); } else // In SIF resolution, no need to reconstruct the bottom-level wavelet transforms // Just copy the lowpass images directly into output frame if (resolution == DECODED_RESOLUTION_HALF) { int precision = codec->precision; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[frame]; } STOP(tk_inverse); CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, precision, decoder->codec.encoded_format, decoder->frame.white_point); START(tk_inverse); } // In full resolution, reconstruct the frame wavelet and // convert the YUYV output to the specified color format else { int precision = codec->precision; TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); } } } else // The first transform was a spatial transform (used for progressive frames) { // Can the inverse frame transform and output byte packing be done in one pass? if ((resolution == DECODED_RESOLUTION_FULL) && (info->format == DECODED_FORMAT_YUYV || info->format == DECODED_FORMAT_UYVY) && // Output YUV decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2) { int precision = codec->precision; //DWORD dwThreadID1; //DWORD dwThreadID2; //HANDLE thread1; //HANDLE thread2; // Apply the inverse frame transform and pack the results into the output buffer #if _THREADED if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { uint8_t *pixoutput = output; if(decoder->use_active_metadata_decoder) //WIP { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, pixoutput, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sBayerThruLUT); } else { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, pixoutput, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sToBayerYUV); } } else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2YUV); } else { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sToYUV); } #else //TODO : Accelerated BAYER for single thread decoding. assert(0); // Transform the wavelets for each channel to the output image (not threaded) //TransformInverseSpatialToYUV(decoder, transform_array, frame, num_channels, output, pitch, info, // &decoder->scratch, chroma_offset, precision); #endif } else if ((resolution == DECODED_RESOLUTION_FULL) && decoder->codec.encoded_format == ENCODED_FORMAT_BAYER && (info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) && // Output RGB decoder->thread_cntrl.capabilities & _CPU_FEATURE_SSE2 && decoder->use_active_metadata_decoder) { int precision = codec->precision; //DWORD dwThreadID1; //DWORD dwThreadID2; //HANDLE thread1; //HANDLE thread2; // Apply the inverse frame transform and pack the results into the output buffer #if _THREADED { uint8_t *pixoutput = output; if(info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) { pixoutput += (info->height-1)*pitch; pitch = -pitch; } TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, pixoutput, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sBayerThruLUT); } #endif } //#if BUILD_PROSPECT else if (resolution == DECODED_RESOLUTION_FULL && info->format == DECODED_FORMAT_YR16) { // Apply the inverse frame transform and output rows of luma and chroma int precision = codec->precision; #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)output, pitch, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseSpatialToRow16u(transform_array, frame, num_channels, (PIXEL16U *)output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif } //#endif else { // Reconstruct the frame as separate planes and combine the planes into a packed output image int channel; if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { //int precision = codec->precision; int scale = 13; //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[5]; if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed. { scale = 12; lowpass_images[channel] = transform_array[channel]->wavelet[2]; } } STOP(tk_inverse); CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, scale, decoder->codec.encoded_format, decoder->frame.white_point); START(tk_inverse); } else // In SIF resolution, no need to reconstruct the bottom-level wavelet transforms // Just copy the lowpass images directly into output frame if (resolution == DECODED_RESOLUTION_HALF || resolution == DECODED_RESOLUTION_HALF_NODEBAYER)// || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) { int precision = codec->precision; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[frame]; #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; char *format = decoded_format_string[info->format]; sprintf(label, "Output, channel: %d, format: %s", channel, format); DumpImageStatistics(label, lowpass_images[channel], logfile); } #endif } STOP(tk_inverse); #if 1 //|| BAYER_SUPPORT if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { #if _THREADED WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #else //unsigned short scanline[4096*3],*sptr; //unsigned short scanline2[4096*3],*sptr2; unsigned short *scanline,*sptr; unsigned short *scanline2,*sptr2; char *buffer = decoder->scratch.free_ptr; size_t buffer_size = decoder->scratch.free_size; IMAGE *g_image = lowpass_images[0]; IMAGE *rg_image = lowpass_images[1]; IMAGE *bg_image = lowpass_images[2]; IMAGE *gd_image = lowpass_images[3]; uint8_t *outyuv,*line = output; PIXEL *bayer_line, *bayerptr; PIXEL *G,*RG,*BG,*GD; int x,y; int bayer_pitch = info->width*4; int format = info->format; bool inverted = false; int maxbound = 4095; //10-bit source int midpoint = 32768>>3; int shift = 4; if(precision == 12) { maxbound = 16383; midpoint = 32768>>1; shift = 2; } if(buffer_size < info->width * 2 * 3 * 2) assert(0); // not enough memory if (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32) { inverted = true; line += (info->height-1)*pitch; pitch = -pitch; } scanline = (unsigned short *)buffer; buffer += info->width * 2 * 3; scanline2 = (unsigned short *)buffer; G = g_image->band[0]; RG = rg_image->band[0]; BG = bg_image->band[0]; for(y=0; y<info->height; y++) { uint8_t *newline = line; PIXEL *newG=G,*newRG=RG,*newBG=BG; PIXEL *gptr,*rgptr,*bgptr,*gdptr; int r,g,b,rg,bg,y1,y2,u,v; int r1,g1,b1; int i; newline += pitch*y; newG += y * (g_image->pitch / sizeof(PIXEL)); newRG += y * (rg_image->pitch / sizeof(PIXEL)); newBG += y * (bg_image->pitch / sizeof(PIXEL)); gptr = newG; rgptr = newRG; bgptr = newBG; sptr = scanline; for(x=0; x<info->width; x++) { g = (*gptr++); if(g > maxbound) g = maxbound; rg = (*rgptr++); bg = (*bgptr++); r = (rg<<1) - midpoint + g; b = (bg<<1) - midpoint + g; if(r > maxbound) r = maxbound; if(b > maxbound) b = maxbound; if(r < 0) r = 0; if(g < 0) g = 0; if(b < 0) b = 0; *sptr++ = r<<shift; *sptr++ = g<<shift; *sptr++ = b<<shift; } { int flags = 0; int whitebitdepth = 16; sptr = scanline; if(decoder->apply_color_active_metadata) sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2, info->format, &whitebitdepth, &flags); ConvertLinesToOutput(decoder, info->width, 1, sptr, newline, y, pitch, info->format, whitebitdepth, flags); } } #endif } else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { IMAGE *g_image = lowpass_images[0]; IMAGE *rg_image = lowpass_images[1]; IMAGE *bg_image = lowpass_images[2]; uint8_t *line = output; unsigned char *rgb8; PIXEL *G,*RG,*BG; int x,y; G = g_image->band[0]; RG = rg_image->band[0]; BG = bg_image->band[0]; if(info->format == DECODED_FORMAT_RGB32) { line = output; line += (info->height-1) * pitch; for(y=0; y<info->height; y++) { PIXEL *gptr,*rgptr,*bgptr; int r,g,b; int i,noisearray[32]; for(i=0; i<32; i++) { noisearray[i] = (rand() & 63); } gptr = G; rgptr = RG; bgptr = BG; rgb8 = (unsigned char *)line; for(x=0; x<info->width; x++) { int rnd = noisearray[x&31]; g = ((*gptr++) + rnd) >> 6; r = ((*rgptr++) + rnd) >> 6; b = ((*bgptr++) + rnd) >> 6; if(r < 0) r=0; if(r > 255) r=255; if(g < 0) g=0; if(g > 255) g=255; if(b < 0) b=0; if(b > 255) b=255; *rgb8++ = b; *rgb8++ = g; *rgb8++ = r; *rgb8++ = 255; } line -= pitch; G += g_image->pitch / sizeof(PIXEL); RG += rg_image->pitch / sizeof(PIXEL); BG += bg_image->pitch / sizeof(PIXEL); } } else if(info->format == DECODED_FORMAT_RGB24) { line = output; line += (info->height-1) * pitch; for(y=0; y<info->height; y++) { PIXEL *gptr,*rgptr,*bgptr; int r,g,b; int i,noisearray[32]; for(i=0; i<32; i++) { noisearray[i] = (rand() & 63); } gptr = G; rgptr = RG; bgptr = BG; rgb8 = (unsigned char *)line; for(x=0; x<info->width; x++) { int rnd = noisearray[x&31]; g = ((*gptr++) + rnd) >> 6; r = ((*rgptr++) + rnd) >> 6; b = ((*bgptr++) + rnd) >> 6; if(r < 0) r=0; if(r > 255) r=255; if(g < 0) g=0; if(g > 255) g=255; if(b < 0) b=0; if(b > 255) b=255; *rgb8++ = b; *rgb8++ = g; *rgb8++ = r; } line -= pitch; G += g_image->pitch / sizeof(PIXEL); RG += rg_image->pitch / sizeof(PIXEL); BG += bg_image->pitch / sizeof(PIXEL); } } } else #endif { CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, precision, decoder->codec.encoded_format, decoder->frame.white_point); } START(tk_inverse); #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; int width = info->width; int height = info->height; sprintf(label, "Output"); DumpBufferStatistics(label, output, width, height, pitch, logfile); } #endif } // In full resolution, reconstruct the frame wavelet and // convert the YUYV output to the specified color format else { // Handle inversion of the output image in this routine FRAME_INFO info2; int format; bool inverted = false; int precision = codec->precision; memcpy(&info2, info, sizeof(FRAME_INFO)); format = info2.format; if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; info2.format = format; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; info2.format = format; inverted = true; } // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; if(resolution == DECODED_RESOLUTION_FULL_DEBAYER) height *= 2; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } //#if BUILD_PROSPECT // Output the frame in V210 foramt? if( (format == DECODED_FORMAT_V210 || format == DECODED_FORMAT_YU64) && decoder->codec.encoded_format != ENCODED_FORMAT_BAYER ) { //char *buffer = decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; // The output buffer is an array of 10-bit pixels packed into double words #if 0 TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch, &info2, buffer, buffer_size, chroma_offset, decoder->codec.precision); #else TransformInverseSpatialToV210(transform_array, frame, num_channels, output, pitch, &info2, &decoder->scratch, chroma_offset, precision); #endif } else //#endif // Decoding a full resolution progressive frame to a Bayer output format? if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { //char *buffer = decoder->buffer; //size_t buffer_size = decoder->buffer_size; int precision = codec->precision; // PIXEL16U *RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16); if(decoder->RawBayer16 == NULL) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; size_t size = info->width*decoded_height*4*sizeof(PIXEL); decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, size, 16); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*sizeof(PIXEL), 16); #endif decoder->RawBayerSize = info->width*decoded_height*4*sizeof(PIXEL); } //TODO: Replace this memory allocation with a scratch buffer allocation //#ifdef SHARPENING if(decoder->RGBFilterBuffer16 == NULL) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; size_t size = info->width*decoded_height*4*3*sizeof(PIXEL); decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, size, 16); #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*decoded_height*4*3*sizeof(PIXEL), 16); #endif decoder->RGBFilterBufferSize = info->width*decoded_height*4*3*sizeof(PIXEL); } //#endif if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL) { decoder->error = CODEC_ERROR_MEMORY_ALLOC; return; } if(decoder->RawBayer16) { uint8_t *line; PIXEL16U *bayer_line, *bayerptr, *outA16, *outB16; PIXEL16U *G,*RG,*BG,*GD; int x,y; int bayer_pitch = info->width*4; //float scale = 256.0; //int matrix_non_unity = 0; //int wb_non_unity = 0; //float curve2lin[2048]; //float lin2curve[2048+512+2]; #if 0 static float rgb2yuv[3][4] = { {0.183f, 0.614f, 0.062f, 16.0f/256.0f}, {-0.101f,-0.338f, 0.439f, 0.5f}, {0.439f,-0.399f,-0.040f, 0.5f} }; float mtrx[3][4] = { {1.0f, 0, 0, 0}, {0, 1.0f, 0, 0}, {0, 0, 1.0f, 0} }; float whitebalance[3] = { 1.0f, 1.0f, 1.0f }; #endif #if 0 // Matrix disabled as it can only be correct handled by the 3D LUT due to the required linear conversions /* if(decoder->cfhddata.MagicNumber == CFHDDATA_MAGIC_NUMBER && decoder->cfhddata.version >= 2) { float fval = 0.0; int i; for(i=0; i<12; i++) { mtrx[i>>2][i&3] = fval = decoder->cfhddata.colormatrix[i>>2][i&3]; if((i>>2) == (i&3)) { if(fval != 1.0) { matrix_non_unity = 1; } } else { if(fval != 0.0) { matrix_non_unity = 1; } } } // not active as VFW isn't yet support the 3D LUTs if(decoder->cfhddata.version >= 5) { int j; float encode_curvebase = 90.0; float decode_curvebase = 90.0; int encode_curve_type = decoder->cfhddata.encode_curve >> 16; int decode_curve_type = decoder->cfhddata.decode_curve >> 16; if(decoder->cfhddata.user_white_balance[0] > 0.0) { wb_non_unity = 1; whitebalance[0] = decoder->cfhddata.user_white_balance[0]; whitebalance[1] = (decoder->cfhddata.user_white_balance[1]+decoder->cfhddata.user_white_balance[2])/2.0; whitebalance[2] = decoder->cfhddata.user_white_balance[3]; } if(encode_curve_type) //1 or 2 encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff); else { encode_curve_type = 1; encode_curvebase = 90.0; } if(decode_curve_type) //1 or 2 decode_curvebase = (float)((decoder->cfhddata.decode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.decode_curve & 0xff); else { decode_curve_type = 1; decode_curvebase = 90.0; } for(j=0; j<2048; j++) { if(encode_curve_type == 1) curve2lin[j] = CURVE_LOG2LIN((float)j/2047.0,encode_curvebase); else curve2lin[j] = CURVE_GAM2LIN((float)j/2047.0,encode_curvebase); } for(j=-512; j<=2048; j++) // -1 to +4 { if(encode_curve_type == CURVE_TYPE_LOG) lin2curve[j+512] = CURVE_LIN2LOG((float)j/512.0,encode_curvebase); else lin2curve[j+512] = CURVE_LIN2GAM((float)j/512.0,encode_curvebase); } } }*/ #endif #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info, chroma_offset, precision); #else // Decode that last transform to rows of Bayer data (one row per channel) TransformInverseSpatialToRow16u(transform_array, frame, num_channels, decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info, &decoder->scratch, chroma_offset, precision); #endif if(resolution == DECODED_RESOLUTION_FULL_DEBAYER && (info->format < DECODED_FORMAT_BYR1 || info->format > DECODED_FORMAT_BYR4)) { #if _THREADED //DemosaicRAW WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #else assert(0) // old code disabled /* int bayer_format = decoder->cfhddata.bayer_format; unsigned char *outA8, *outB8; unsigned short *lineStartA16, *lineStartB16; unsigned short *lineA16, *lineB16; // int stats1=0, stats2=0, statsd=0; // double dstats1=0, dstats2=0, dstatsd=0; line = output; bayer_line = decoder->RawBayer16; for(y=0; y<info->height+DEMOSAIC_DELAYLINES; y++) { bayer_line = decoder->RawBayer16; bayer_line += bayer_pitch * y; if(y<info->height) { ColorDifference2Bayer(info->width, bayer_line, bayer_pitch, bayer_format); } if(y>=3+DEMOSAIC_DELAYLINES && y<info->height-3+DEMOSAIC_DELAYLINES) //middle scanline { unsigned short *delayptr = decoder->RawBayer16; delayptr += bayer_pitch * (y-DEMOSAIC_DELAYLINES); BayerRippleFilter(info->width, delayptr, bayer_pitch, bayer_format, decoder->RawBayer16); } if(y>=DEMOSAIC_DELAYLINES) { int delay_y = y - DEMOSAIC_DELAYLINES; unsigned short *sptr, scanline[8192*3]; outA8 = line; line += pitch; outB8 = line; line += pitch; sptr = scanline; DebayerLine(info->width*2, info->height*2, delay_y*2, decoder->RawBayer16, bayer_format, sptr, sharpening); for(x=0; x<info->width*2; x++) { outA8[2] = *sptr++>>8; outA8[1] = *sptr++>>8; outA8[0] = *sptr++>>8; outA8+=3; } for(x=0; x<info->width*2; x++) { outB8[2] = *sptr++>>8; outB8[1] = *sptr++>>8; outB8[0] = *sptr++>>8; outB8+=3; } } }*/ #endif // _THREADED } else if(format == DECODED_FORMAT_BYR2 || format == DECODED_FORMAT_BYR4) { #if _THREADED WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #else assert(0) // old code disabled /* { int bayer_format = decoder->cfhddata.bayer_format; // int stats1=0, stats2=0, statsd=0; // double dstats1=0, dstats2=0, dstatsd=0; line = output; bayer_line = decoder->RawBayer16; for(y=0; y<info->height; y++) { outA16 = (PIXEL16U *)line; line += pitch; outB16 = (PIXEL16U *)line; line += pitch; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = BG + bayer_pitch/4; for(x=0; x<info->width; x++) { int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither; g = (*G++); rg = (*RG++); bg = (*BG++); gd = (*GD++) - 32768; r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; g1 = g + gd; g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output ) // stats1+=g1; // stats2+=g2; // statsd+=gd; if(r < 0) r = 0; if(g1 < 0) g1 = 0; if(g2 < 0) g2 = 0; if(b < 0) b = 0; if(r > 0xffff) r = 0xffff; if(g1 > 0xffff) g1 = 0xffff; if(g2 > 0xffff) g2 = 0xffff; if(b > 0xffff) b = 0xffff; switch(bayer_format) { case BAYER_FORMAT_RED_GRN: //Red-grn phase *outA16++ = r; *outA16++ = g1; *outB16++ = g2; *outB16++ = b; break; case BAYER_FORMAT_GRN_RED:// grn-red *outA16++ = g1; *outA16++ = r; *outB16++ = b; *outB16++ = g2; break; case BAYER_FORMAT_GRN_BLU: *outA16++ = g1; *outA16++ = b; *outB16++ = r; *outB16++ = g2; break; case BAYER_FORMAT_BLU_GRN: *outA16++ = b; *outA16++ = g1; *outB16++ = g2; *outB16++ = r; break; } } bayer_line += bayer_pitch; } if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY) { int bayer_format = decoder->cfhddata.bayer_format; for(y=2; y<info->height-3; y++) { int offset = pitch>>1; line = output; //0 line += pitch * y * 2; // If on a red line, move to a blue line if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN) line -= pitch; { int offset = pitch>>1; outA16 = (PIXEL16U *)line; outA16++; //g //for BAYER_FORMAT_RED_GRN input outA16++; //b outA16++; //g outA16++; //b //point to green pixel with *outA16 if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU) outA16++; for(x=2; x<info->width-2; x++) { int mn,mx,g; int range = 8*256; //1<<11 int shift = 11; int delta; int alpha; g = *outA16; // lines below do not need to be tested for a corrected value mn = mx = outA16[offset+1]; if(mn > outA16[offset-1]) mn = outA16[offset-1]; if(mx < outA16[offset-1]) mx = outA16[offset-1]; if((outA16[-offset-1] & 1)==0) { if(mn > outA16[-offset-1]) mn = outA16[-offset-1]; if(mx < outA16[-offset-1]) mx = outA16[-offset-1]; } if((outA16[-offset+1] & 1)==0) { if(mn > outA16[-offset+1]) mn = outA16[-offset+1]; if(mx < outA16[-offset+1]) mx = outA16[-offset+1]; } delta = mx - mn; if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx))) { int gmn,gmx; gmn = gmx = g; if((outA16[-2*offset-2] & 1)==0) { if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2]; if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2]; } if((outA16[-2*offset] & 1)==0) { if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset]; if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset]; } if((outA16[-2*offset+2] & 1)==0) { if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2]; if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2]; } if((outA16[-2] & 1)==0) { if(gmn > outA16[-2]) gmn = outA16[-2]; if(gmx < outA16[-2]) gmx = outA16[-2]; } // lines below do not need to be tested for a corrected value if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2]; if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2]; if(gmn > outA16[2*offset]) gmn = outA16[2*offset]; if(gmx < outA16[2*offset]) gmx = outA16[2*offset]; if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2]; if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2]; if(gmn > outA16[2]) gmn = outA16[2]; if(gmx < outA16[2]) gmx = outA16[2]; if((gmx - gmn) < range) { alpha = range;//delta; if(g > mx) { alpha *= (g-mx); //max range alpha >>= shift; } else // g < mn { alpha *= (mn-g); //max range alpha >>= shift; } alpha *= alpha; alpha >>= shift; // avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2; // *outA16 = avg; //good // *outA16 = mn; //spotty if( (abs(outA16[offset] - outA16[-offset]) < range) && ((abs(outA16[1] - outA16[-1]) < range))) { int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift; if(val > 0xffff) val = 0xffff; if(val < 0) val = 0; val |= 1; *outA16 = val; // *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute } } } outA16++; //g outA16++; //b } } } } }*/ #endif } // Pack the rows of Bayer data (full resolution progressive) into BYR3 format? else if (format == DECODED_FORMAT_BYR3) { PIXEL16U *outR, *outG1, *outG2, *outB; // int stats1=0, stats2=0, statsd=0; // double dstats1=0, dstats2=0, dstatsd=0; // #pragma omp parallel for for(y=0; y<info->height; y++) { uint8_t *line = output; PIXEL *bayerptr = (PIXEL *)decoder->RawBayer16; line += pitch*2*y; bayerptr += bayer_pitch * y; outR = (PIXEL16U *)line; outG1 = outR + (pitch/4); outG2 = outR + (pitch/4)*2; outB = outR + (pitch/4)*3; G = (PIXEL16U *)bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = BG + bayer_pitch/4; // Pack the rows of Bayer components into the BYR3 pattern #if (1 && XMMOPT) { __m128i *G_128 = (__m128i *)G; __m128i *RG_128 = (__m128i *)RG; __m128i *BG_128 = (__m128i *)BG; __m128i *GD_128 = (__m128i *)GD; __m128i *outR_128 = (__m128i *)outR; __m128i *outG1_128 = (__m128i *)outG1; __m128i *outG2_128 = (__m128i *)outG2; __m128i *outB_128 = (__m128i *)outB; __m128i limiter = _mm_set1_epi16(0x7fff - 0x3ff); __m128i midpoint1 = _mm_set1_epi16(32768>>6); __m128i midpoint2 = _mm_set1_epi16(32768>>5); int column_step = 8; int post_column = (info->width) - ((info->width) % column_step); for (x=0; x < post_column; x += column_step) { __m128i r_128; __m128i g1_128; __m128i g2_128; __m128i b_128; __m128i g_128; __m128i rg_128; __m128i bg_128; __m128i gd_128; g_128 = _mm_load_si128(G_128++); rg_128 = _mm_load_si128(RG_128++); bg_128 = _mm_load_si128(BG_128++); gd_128 = _mm_load_si128(GD_128++); g_128 = _mm_srli_epi16(g_128, 6); rg_128 = _mm_srli_epi16(rg_128, 5); bg_128 = _mm_srli_epi16(bg_128, 5); gd_128 = _mm_srli_epi16(gd_128, 6); gd_128 = _mm_subs_epi16(gd_128, midpoint1); rg_128 = _mm_subs_epi16(rg_128, midpoint2); bg_128 = _mm_subs_epi16(bg_128, midpoint2); r_128 = _mm_adds_epi16(rg_128, g_128); b_128 = _mm_adds_epi16(bg_128, g_128); g1_128 = _mm_adds_epi16(g_128, gd_128); g2_128 = _mm_subs_epi16(g_128, gd_128); r_128 = _mm_adds_epi16(r_128, limiter); r_128 = _mm_subs_epu16(r_128, limiter); g1_128 = _mm_adds_epi16(g1_128, limiter); g1_128 = _mm_subs_epu16(g1_128, limiter); g2_128 = _mm_adds_epi16(g2_128, limiter); g2_128 = _mm_subs_epu16(g2_128, limiter); b_128 = _mm_adds_epi16(b_128, limiter); b_128 = _mm_subs_epu16(b_128, limiter); _mm_store_si128(outR_128++, r_128); _mm_store_si128(outG1_128++, g1_128); _mm_store_si128(outG2_128++, g2_128); _mm_store_si128(outB_128++, b_128); } G = (PIXEL16U *)G_128; RG = (PIXEL16U *)RG_128; BG = (PIXEL16U *)BG_128; GD = (PIXEL16U *)GD_128; outR = (PIXEL16U *)outR_128; outG1 = (PIXEL16U *)outG1_128; outG2 = (PIXEL16U *)outG2_128; outB = (PIXEL16U *)outB_128; } #endif for(; x<info->width; x++) { int r,g,b,rg,bg,gd,g1,g2; g = (*G++); rg = (*RG++); bg = (*BG++); gd = (*GD++) - 32768; r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; g1 = g + gd; g2 = g - gd; //TODO: Is there a DC offset to gd (causes a check in output ) if(r < 0) r = 0; if(g1 < 0) g1 = 0; if(g2 < 0) g2 = 0; if(b < 0) b = 0; if(r > 0xffff) r = 0xffff; if(g1 > 0xffff) g1 = 0xffff; if(g2 > 0xffff) g2 = 0xffff; if(b > 0xffff) b = 0xffff; //Red-grn phase *outR++ = r>>6; *outG1++ = g1>>6; *outG2++ = g2>>6; *outB++ = b>>6; } } } // Pack the rows of Bayer data (full resolution progressive) into BYR4 format? else if (format == DECODED_FORMAT_BYR4) { int bayer_format = decoder->cfhddata.bayer_format; line = output; bayer_line = decoder->RawBayer16; for(y=0; y<info->height; y++) { outA16 = (PIXEL16U *)line; line += pitch; outB16 = (PIXEL16U *)line; line += pitch; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = BG + bayer_pitch/4; for(x=0; x<info->width; x++) { //int r,g,b,rg,bg,gd,g1,g2,y1,y2,u,v,dither; int32_t r, g, b, rg, bg, gd, g1, g2; // The output of the inverse transform is unsigned 16-bit integers const int midpoint = 32768; g = (*G++); rg = (*RG++); bg = (*BG++); gd = (*GD++) - midpoint; r = ((rg - midpoint)<<1) + g; b = ((bg - midpoint)<<1) + g; g1 = g + gd; g2 = g - gd; r = SATURATE_16U(r); g1 = SATURATE_16U(g1); g2 = SATURATE_16U(g2); b = SATURATE_16U(b); // stats1+=g1; // stats2+=g2; // statsd+=gd; switch(bayer_format) { case BAYER_FORMAT_RED_GRN: //Red-grn phase *outA16++ = r; *outA16++ = g1; *outB16++ = g2; *outB16++ = b; break; case BAYER_FORMAT_GRN_RED:// grn-red *outA16++ = g1; *outA16++ = r; *outB16++ = b; *outB16++ = g2; break; case BAYER_FORMAT_GRN_BLU: *outA16++ = g1; *outA16++ = b; *outB16++ = r; *outB16++ = g2; break; case BAYER_FORMAT_BLU_GRN: *outA16++ = b; *outA16++ = g1; *outB16++ = g2; *outB16++ = r; break; default: // Unsupported Bayer format assert(0); *outA16++ = 0; *outA16++ = 0; *outB16++ = 0; *outB16++ = 0; break; } } bayer_line += bayer_pitch; } if(decoder->flags & DECODER_FLAGS_HIGH_QUALITY) { for(y=2; y<info->height-3; y++) { //int offset = pitch>>1; line = output; //0 line += pitch * y * 2; // If on a red line, move to a blue line if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_RED_GRN) line -= pitch; { int offset = pitch>>1; outA16 = (PIXEL16U *)line; outA16++; //g //for BAYER_FORMAT_RED_GRN input outA16++; //b outA16++; //g outA16++; //b //point to green pixel with *outA16 if(bayer_format == BAYER_FORMAT_GRN_RED || bayer_format == BAYER_FORMAT_GRN_BLU) outA16++; for(x=2; x<info->width-2; x++) { int mn,mx,g; int range = 8*256; //1<<11 int shift = 11; int delta; int alpha; g = *outA16; // lines below do not need to be tested for a corrected value mn = mx = outA16[offset+1]; if(mn > outA16[offset-1]) mn = outA16[offset-1]; if(mx < outA16[offset-1]) mx = outA16[offset-1]; if((outA16[-offset-1] & 1)==0) { if(mn > outA16[-offset-1]) mn = outA16[-offset-1]; if(mx < outA16[-offset-1]) mx = outA16[-offset-1]; } if((outA16[-offset+1] & 1)==0) { if(mn > outA16[-offset+1]) mn = outA16[-offset+1]; if(mx < outA16[-offset+1]) mx = outA16[-offset+1]; } delta = mx - mn; if(delta < range && ((mn-range < g && g < mn) || (mx+range > g && g > mx))) { int gmn,gmx; gmn = gmx = g; if((outA16[-2*offset-2] & 1)==0) { if(gmn > outA16[-2*offset-2]) gmn = outA16[-2*offset-2]; if(gmx < outA16[-2*offset-2]) gmx = outA16[-2*offset-2]; } if((outA16[-2*offset] & 1)==0) { if(gmn > outA16[-2*offset]) gmn = outA16[-2*offset]; if(gmx < outA16[-2*offset]) gmx = outA16[-2*offset]; } if((outA16[-2*offset+2] & 1)==0) { if(gmn > outA16[-2*offset+2]) gmn = outA16[-2*offset+2]; if(gmx < outA16[-2*offset+2]) gmx = outA16[-2*offset+2]; } if((outA16[-2] & 1)==0) { if(gmn > outA16[-2]) gmn = outA16[-2]; if(gmx < outA16[-2]) gmx = outA16[-2]; } // lines below do not need to be tested for a corrected value if(gmn > outA16[2*offset-2]) gmn = outA16[2*offset-2]; if(gmx < outA16[2*offset-2]) gmx = outA16[2*offset-2]; if(gmn > outA16[2*offset]) gmn = outA16[2*offset]; if(gmx < outA16[2*offset]) gmx = outA16[2*offset]; if(gmn > outA16[2*offset+2]) gmn = outA16[2*offset+2]; if(gmx < outA16[2*offset+2]) gmx = outA16[2*offset+2]; if(gmn > outA16[2]) gmn = outA16[2]; if(gmx < outA16[2]) gmx = outA16[2]; if((gmx - gmn) < range) { alpha = range;//delta; if(g > mx) { alpha *= (g-mx); //max range alpha >>= shift; } else // g < mn { alpha *= (mn-g); //max range alpha >>= shift; } alpha *= alpha; alpha >>= shift; // avg = (outA16[-offset-1] + outA16[offset-1] + outA16[-offset+1] + outA16[offset+1] + 2) >> 2; // *outA16 = avg; //good // *outA16 = mn; //spotty if( (abs(outA16[offset] - outA16[-offset]) < range) && ((abs(outA16[1] - outA16[-1]) < range))) { int val = (alpha*g + (range - alpha)*((mn+mx)>>1))>>shift; if(val > 0xffff) val = 0xffff; if(val < 0) val = 0; val |= 1; *outA16 = val; // *outA16 = ((mn+mx)>>1) | 1; // like avg but less compute } } } outA16++; //g outA16++; //b } } } } // Linear restore { unsigned short *buff = (unsigned short *)output; //static int pos = 0; for(y=0; y<info->height*2; y++) { for(x=0; x<info->width*2; x++) { float val = (float)buff[y*info->width*2 + x]/65535.0f; float encode_curvebase = 90.0; int encode_curve_type = CURVE_TYPE_LOG; int encode_curve_neg; if((decoder->cfhddata.encode_curve)>>16) //1 or 2 { encode_curve_type = (decoder->cfhddata.encode_curve)>>16; if(encode_curve_type & CURVE_TYPE_EXTENDED) encode_curvebase = (float)(decoder->cfhddata.encode_curve & 0xffff); // use all 16-bits for larger log bases else encode_curvebase = (float)((decoder->cfhddata.encode_curve >> 8) & 0xff) / (float)(decoder->cfhddata.encode_curve & 0xff); } if(encode_curvebase == 1.0 && encode_curve_type <= CURVE_TYPE_LINEAR) encode_curve_type = CURVE_TYPE_LINEAR; encode_curve_neg = encode_curve_type & CURVE_TYPE_NEGATIVE; switch(encode_curve_type & CURVE_TYPE_MASK) { case CURVE_TYPE_LOG: val = CURVE_LOG2LIN(val,encode_curvebase); break; case CURVE_TYPE_GAMMA: val = CURVE_GAM2LIN(val,encode_curvebase); break; case CURVE_TYPE_CINEON: val = CURVE_CINEON2LIN(val,encode_curvebase); break; case CURVE_TYPE_CINE985: val = CURVE_CINE9852LIN(val,encode_curvebase); break; case CURVE_TYPE_PARA: val = CURVE_PARA2LIN(val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff), (int)(decoder->cfhddata.encode_curve & 0xff)); break; case CURVE_TYPE_CSTYLE: val = CURVE_CSTYLE2LIN((float)val,(int)((decoder->cfhddata.encode_curve >> 8) & 0xff)); break; case CURVE_TYPE_SLOG: val = CURVE_SLOG2LIN((float)val); break; case CURVE_TYPE_LOGC: val = CURVE_LOGC2LIN((float)val); break; case CURVE_TYPE_LINEAR: default: break; } buff[y*info->width*2 + x] = (int)(val*4095.0); } } } } else { #if _THREADED WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #else //unsigned short scanline[8192*3],*sptr; //unsigned short scanline2[8192*3],*sptr2; unsigned short *scanline,*sptr; unsigned short *scanline2,*sptr2; char *buffer = decoder->scratch.free_ptr; size_t buffer_size = decoder->scratch.free_size; uint8_t *outyuv,*line = output; PIXEL *bayerptr; int x,y; if(buffer_size < info->width * 2 * 3 * 2) assert(0); // not enough memory scanline = (unsigned short *)buffer; buffer += info->width * 2 * 3; scanline2 = (unsigned short *)buffer; line = output; bayer_line = decoder->RawBayer16; for(y=0; y<info->height; y++) { int r,g,b,rg,bg,y1,y2,u,v; int r1,g1,b1; int i; __m128i gggggggg,ggggggg2,rgrgrgrg,bgbgbgbg; __m128i rrrrrrrr,bbbbbbbb; __m128i mid8192 = _mm_set1_epi16(8192); __m128i mid16384 = _mm_set1_epi16(16384); __m128i mid32768 = _mm_set1_epi16(32768); __m128i overflowprotectRGB_epi16 = _mm_set1_epi16(0x7fff-0x3fff); int sse2width = info->width & 0xfff8; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = BG + bayer_pitch/4; sptr = scanline; x = 0; for(; x<sse2width; x+=8) { gggggggg = _mm_loadu_si128((__m128i *)G); G+=8; rgrgrgrg = _mm_loadu_si128((__m128i *)RG); RG+=8; bgbgbgbg = _mm_loadu_si128((__m128i *)BG); BG+=8; ggggggg2 = _mm_srli_epi16(gggggggg, 2);// 0-16383 14bit unsigned rgrgrgrg = _mm_srli_epi16(rgrgrgrg, 2);// 14bit unsigned bgbgbgbg = _mm_srli_epi16(bgbgbgbg, 2);// 14bit unsigned rrrrrrrr = _mm_subs_epi16(rgrgrgrg, mid8192);// -8191 to 8191 14bit signed rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 1); // -16382 to 16382 15bit signed rrrrrrrr = _mm_adds_epi16(rrrrrrrr, ggggggg2); // -16382 to 32767 bbbbbbbb = _mm_subs_epi16(bgbgbgbg, mid8192);// -8191 to 8191 14bit signed bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 1); // -16382 to 16382 15bit signed bbbbbbbb = _mm_adds_epi16(bbbbbbbb, ggggggg2); // -16382 to 32767 //limit to 0 to 16383 rrrrrrrr = _mm_adds_epi16(rrrrrrrr, overflowprotectRGB_epi16); rrrrrrrr = _mm_subs_epu16(rrrrrrrr, overflowprotectRGB_epi16); //limit to 0 to 16383 bbbbbbbb = _mm_adds_epi16(bbbbbbbb, overflowprotectRGB_epi16); bbbbbbbb = _mm_subs_epu16(bbbbbbbb, overflowprotectRGB_epi16); rrrrrrrr = _mm_slli_epi16(rrrrrrrr, 2); // restore to 0 to 65535 bbbbbbbb = _mm_slli_epi16(bbbbbbbb, 2); // restore to 0 to 65535 *sptr++ = _mm_extract_epi16(rrrrrrrr, 0); *sptr++ = _mm_extract_epi16(gggggggg, 0); *sptr++ = _mm_extract_epi16(bbbbbbbb, 0); *sptr++ = _mm_extract_epi16(rrrrrrrr, 1); *sptr++ = _mm_extract_epi16(gggggggg, 1); *sptr++ = _mm_extract_epi16(bbbbbbbb, 1); *sptr++ = _mm_extract_epi16(rrrrrrrr, 2); *sptr++ = _mm_extract_epi16(gggggggg, 2); *sptr++ = _mm_extract_epi16(bbbbbbbb, 2); *sptr++ = _mm_extract_epi16(rrrrrrrr, 3); *sptr++ = _mm_extract_epi16(gggggggg, 3); *sptr++ = _mm_extract_epi16(bbbbbbbb, 3); *sptr++ = _mm_extract_epi16(rrrrrrrr, 4); *sptr++ = _mm_extract_epi16(gggggggg, 4); *sptr++ = _mm_extract_epi16(bbbbbbbb, 4); *sptr++ = _mm_extract_epi16(rrrrrrrr, 5); *sptr++ = _mm_extract_epi16(gggggggg, 5); *sptr++ = _mm_extract_epi16(bbbbbbbb, 5); *sptr++ = _mm_extract_epi16(rrrrrrrr, 6); *sptr++ = _mm_extract_epi16(gggggggg, 6); *sptr++ = _mm_extract_epi16(bbbbbbbb, 6); *sptr++ = _mm_extract_epi16(rrrrrrrr, 7); *sptr++ = _mm_extract_epi16(gggggggg, 7); *sptr++ = _mm_extract_epi16(bbbbbbbb, 7); } for(; x<info->width; x++) { g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; if(r < 0) r = 0; if(r > 0xffff) r = 0xffff; if(g < 0) g = 0; if(g > 0xffff) g = 0xffff; if(b < 0) b = 0; if(b > 0xffff) b = 0xffff; *sptr++ = r; *sptr++ = g; *sptr++ = b; } { int flags = 0; int whitebitdepth = 16; sptr = scanline; if(decoder->apply_color_active_metadata) sptr = ApplyActiveMetaData(decoder, info->width, 1, y, scanline, scanline2, info->format, &whitebitdepth, &flags); ConvertLinesToOutput(decoder, info->width, 1, sptr, line, pitch, info->format, whitebitdepth, flags); } line += pitch; bayer_line += bayer_pitch; } #endif } /* // switch to using the ApplyActiveMetaData() and ConvertLinesToOutput() calls - DAN20071201 // Pack the rows of Bayer data (full resolution progressive) into BYR2 format? else if (format == DECODED_FORMAT_YUYV) { line = output; bayer_line = decoder->RawBayer16; scale = 256.0; y_rmult = ((rgb2yuv[0][0]) * scale); y_gmult = ((rgb2yuv[0][1]) * scale); y_bmult = ((rgb2yuv[0][2]) * scale); y_offset= ((rgb2yuv[0][3]) * scale); u_rmult = ((rgb2yuv[1][0]) * scale); u_gmult = ((rgb2yuv[1][1]) * scale); u_bmult = ((rgb2yuv[1][2]) * scale); u_offset= ((rgb2yuv[1][3]) * scale); v_rmult = ((rgb2yuv[2][0]) * scale); v_gmult = ((rgb2yuv[2][1]) * scale); v_bmult = ((rgb2yuv[2][2]) * scale); v_offset= ((rgb2yuv[2][3]) * scale); r_rmult= (mtrx[0][0] * scale * whitebalance[0]); r_gmult= (mtrx[0][1] * scale * whitebalance[1]); r_bmult= (mtrx[0][2] * scale * whitebalance[2]); r_offset= (mtrx[0][3] * scale); g_rmult= (mtrx[1][0] * scale * whitebalance[0]); g_gmult= (mtrx[1][1] * scale * whitebalance[1]); g_bmult= (mtrx[1][2] * scale * whitebalance[2]); g_offset= (mtrx[1][3] * scale); b_rmult= (mtrx[2][0] * scale * whitebalance[0]); b_gmult= (mtrx[2][1] * scale * whitebalance[1]); b_bmult= (mtrx[2][2] * scale * whitebalance[2]); b_offset= (mtrx[2][3] * scale); for(y=0; y<info->height; y++) { outyuv = line; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; for(x=0; x<info->width; x+=2) { int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; // dither = (rand() & 65535)<<1; if(matrix_non_unity) { //TODO : need on convert to linear first. r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8); g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8); b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8); //TODO : need on convert back to log/display curve. if(r1 < 0) r1 = 0; if(r1 > 65535) r1 = 65535; if(g1 < 0) g1 = 0; if(g1 > 65535) g1 = 65535; if(b1 < 0) b1 = 0; if(b1 > 65535) b1 = 65535; } else { r1 = r; g1 = g; b1 = b; } y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16; u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16; v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; // dither = (rand() & 65535)<<1; if(matrix_non_unity) { //TODO : need on convert to linear first. r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>8); g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>8); b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>8); //TODO : need on convert back to log/display curve. if(r1 < 0) r1 = 0; if(r1 > 65535) r1 = 65535; if(g1 < 0) g1 = 0; if(g1 > 65535) g1 = 65535; if(b1 < 0) b1 = 0; if(b1 > 65535) b1 = 65535; } else { r1 = r; g1 = g; b1 = b; } y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16; u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16; v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16; u >>= 1; v >>= 1; y1 += y_offset; y2 += y_offset; u += u_offset; v += v_offset; if(y1 < 0) y1 = 0; if(y1 > 255) y1 = 255; if(y2 < 0) y2 = 0; if(y2 > 255) y2 = 255; if(u < 0) u = 0; if(u > 255) u = 255; if(v < 0) v = 0; if(v > 255) v = 255; *outyuv++ = y1; *outyuv++ = u; *outyuv++ = y2; *outyuv++ = v; } line += pitch; bayer_line += bayer_pitch; } } else if (format == DECODED_FORMAT_YU64) { int shift = 14; PIXEL16U *outyuv64; line = output; bayer_line = decoder->RawBayer16; scale = 16384.0; //_mm_empty(); // Clear the mmx register state y_rmult = ((rgb2yuv[0][0]) * scale); y_gmult = ((rgb2yuv[0][1]) * scale); y_bmult = ((rgb2yuv[0][2]) * scale); y_offset= ((rgb2yuv[0][3]) * scale * 4.0); u_rmult = ((rgb2yuv[1][0]) * scale); u_gmult = ((rgb2yuv[1][1]) * scale); u_bmult = ((rgb2yuv[1][2]) * scale); u_offset= ((rgb2yuv[1][3]) * scale * 4.0); v_rmult = ((rgb2yuv[2][0]) * scale); v_gmult = ((rgb2yuv[2][1]) * scale); v_bmult = ((rgb2yuv[2][2]) * scale); v_offset= ((rgb2yuv[2][3]) * scale * 4.0); scale = 4096.0; r_rmult= (mtrx[0][0] * scale * whitebalance[0]); r_gmult= (mtrx[0][1] * scale * whitebalance[1]); r_bmult= (mtrx[0][2] * scale * whitebalance[2]); r_offset= (mtrx[0][3] * scale); g_rmult= (mtrx[1][0] * scale * whitebalance[0]); g_gmult= (mtrx[1][1] * scale * whitebalance[1]); g_bmult= (mtrx[1][2] * scale * whitebalance[2]); g_offset= (mtrx[1][3] * scale); b_rmult= (mtrx[2][0] * scale * whitebalance[0]); b_gmult= (mtrx[2][1] * scale * whitebalance[1]); b_bmult= (mtrx[2][2] * scale * whitebalance[2]); b_offset= (mtrx[2][3] * scale); y_offset += 26; u_offset += 26; v_offset += 26; for(y=0; y<info->height; y++) { outyuv64 = (PIXEL16U *)line; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; for(x=0; x<info->width; x+=2) { int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v,dither; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; // dither = (rand() & 65535)<<1; if(matrix_non_unity) { //TODO : need on convert to linear first. r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12); g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12); b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12); //TODO : need on convert back to log/display curve. if(r1 < 0) r1 = 0; if(r1 > 65535) r1 = 65535; if(g1 < 0) g1 = 0; if(g1 > 65535) g1 = 65535; if(b1 < 0) b1 = 0; if(b1 > 65535) b1 = 65535; } else { r1 = r; g1 = g; b1 = b; } y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset; u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift); v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift); g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; // dither = (rand() & 65535)<<1; if(matrix_non_unity) { //TODO : need on convert to linear first. r1= (( r_rmult * r + r_gmult * g + r_bmult * b + r_offset)>>12); g1= (( g_rmult * r + g_gmult * g + g_bmult * b + g_offset)>>12); b1= (( b_rmult * r + b_gmult * g + b_bmult * b + b_offset)>>12); //TODO : need on convert back to log/display curve. if(r1 < 0) r1 = 0; if(r1 > 65535) r1 = 65535; if(g1 < 0) g1 = 0; if(g1 > 65535) g1 = 65535; if(b1 < 0) b1 = 0; if(b1 > 65535) b1 = 65535; } else { r1 = r; g1 = g; b1 = b; } y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset; u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift); v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift); u >>= 1; v >>= 1; u += u_offset; v += v_offset; if(y1 < 0) y1 = 0; if(y1 > 65535) y1 = 65535; if(y2 < 0) y2 = 0; if(y2 > 65535) y2 = 65535; if(u < 0) u = 0; if(u > 65535) u = 65535; if(v < 0) v = 0; if(v > 65535) v = 65535; *outyuv64++ = y1; *outyuv64++ = v; *outyuv64++ = y2; *outyuv64++ = u; } line += pitch; bayer_line += bayer_pitch; } } else //RGBs { line = output; bayer_line = decoder->RawBayer16; scale = 256.0; r_rmult = (mtrx[0][0]) * scale * whitebalance[0]; r_gmult = (mtrx[0][1]) * scale * whitebalance[1]; r_bmult = (mtrx[0][2]) * scale * whitebalance[2]; r_offset= (mtrx[0][3]) * scale; g_rmult = (mtrx[1][0]) * scale * whitebalance[0]; g_gmult = (mtrx[1][1]) * scale * whitebalance[1]; g_bmult = (mtrx[1][2]) * scale * whitebalance[2]; g_offset= (mtrx[1][3]) * scale; b_rmult = (mtrx[2][0]) * scale * whitebalance[0]; b_gmult = (mtrx[2][1]) * scale * whitebalance[1]; b_bmult = (mtrx[2][2]) * scale * whitebalance[2]; b_offset= (mtrx[2][3]) * scale; for(y=0; y<info->height; y++) { int i,noisearray[32]; outyuv = line; bayerptr = bayer_line; G = bayerptr; RG = G + bayer_pitch/4; BG = RG + bayer_pitch/4; GD = RG + bayer_pitch/4; for(i=0; i<32; i++) { noisearray[i] = (rand() & 127); } if(info->format == DECODED_FORMAT_RGB32) { for(x=0; x<info->width; x++) { int R1,G1,B1; int rnd = noisearray[x&31]; // *ptr++ = *bayerptr++ >> 8; // *ptr++ = 0x80; // *ptr++ = *bayerptr++ >> 8; // *ptr++ = 0x80; int r,g,b,g1,g2,gdiff,y1,y2,u,v; // g = (g1+g2)>>1; // *g_row_ptr++ = g; // *rg_row_ptr++ = (r-g+256)>>1; // *bg_row_ptr++ = (b-g+256)>>1; // *gdiff_row_ptr++ = (g1-g2+256)>>1; g = ((*G++)>>1); r = ((*RG++ + 64)>>0)-(256<<7)+g; b = ((*BG++ + 64)>>0)-(256<<7)+g; // gdiff = ((*GD++ + 64)>>7)-256+g; if(matrix_non_unity) { //TODO : need on convert to linear first. R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd; G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd; B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd; //TODO : need on convert back to log/display curve. } else { R1 = r + rnd; G1 = g + rnd; B1 = b + rnd; } R1 >>= 7; G1 >>= 7; B1 >>= 7; if(R1 < 0) R1 = 0; if(R1 > 255) R1 = 255; if(G1 < 0) G1 = 0; if(G1 > 255) G1 = 255; if(B1 < 0) B1 = 0; if(B1 > 255) B1 = 255; *outyuv++ = B1; *outyuv++ = G1; *outyuv++ = R1; *outyuv++ = 255; } } else { for(x=0; x<info->width; x++) { int R1,G1,B1; int rnd = noisearray[x&31]; // *ptr++ = *bayerptr++ >> 8; // *ptr++ = 0x80; // *ptr++ = *bayerptr++ >> 8; // *ptr++ = 0x80; int r,g,b,g1,g2,gdiff,y1,y2,u,v; //g = (g1+g2)>>1; // *g_row_ptr++ = g; // *rg_row_ptr++ = (r-g+256)>>1; // *bg_row_ptr++ = (b-g+256)>>1; // *gdiff_row_ptr++ = (g1-g2+256)>>1; g = ((*G++)>>1); r = ((*RG++ + 64)>>0)-(256<<7)+g; b = ((*BG++ + 64)>>0)-(256<<7)+g; // gdiff = ((*GD++ + 64)>>7)-256+g; if(matrix_non_unity) { //TODO: Need to convert to linear first. R1 = ((r*r_rmult + g*r_gmult + b*r_bmult + r_offset)>>8) + rnd; G1 = ((r*g_rmult + g*g_gmult + b*g_bmult + g_offset)>>8) + rnd; B1 = ((r*b_rmult + g*b_gmult + b*b_bmult + b_offset)>>8) + rnd; //TODO: Need to convert back to log/display curve. } else { R1 = r + rnd; G1 = g + rnd; B1 = b + rnd; } R1 >>= 7; G1 >>= 7; B1 >>= 7; if(R1 < 0) R1 = 0; if(R1 > 255) R1 = 255; if(G1 < 0) G1 = 0; if(G1 > 255) G1 = 255; if(B1 < 0) B1 = 0; if(B1 > 255) B1 = 255; *outyuv++ = B1; *outyuv++ = G1; *outyuv++ = R1; } } line += pitch; bayer_line += bayer_pitch; } } */ //MEMORY_ALIGNED_FREE(RawBayer16); } } else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { int precision = codec->precision; if(decoder->RawBayer16 == NULL) { #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; size_t size = info->width*info->height*num_channels*sizeof(PIXEL); decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, size, 16); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(info->width*info->height*num_channels*sizeof(PIXEL), 16); #endif decoder->RawBayerSize = info->width*info->height*num_channels*sizeof(PIXEL); } //#ifdef SHARPENING if(decoder->RGBFilterBuffer16 == NULL) { int frame_size = info->width*decoded_height*4*3*sizeof(PIXEL); if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) frame_size = info->width*decoded_height*4*4*sizeof(PIXEL); #if _ALLOCATOR { ALLOCATOR *allocator = decoder->allocator; decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16); } #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16); #endif decoder->RGBFilterBufferSize = frame_size; } //#endif if(decoder->RawBayer16 == NULL || decoder->RGBFilterBuffer16 == NULL) { decoder->error = CODEC_ERROR_MEMORY_ALLOC; return; } //TODO: Replace this memory allocation with a scratch buffer allocation if(decoder->RawBayer16) { uint8_t *outyuv,*line, *source_line; PIXEL16U *bayerptr; PIXEL16U *G,*RG,*BG; int x,y; int src_pitch = info->width*num_channels*sizeof(PIXEL); int y_rmult,y_gmult,y_bmult,y_offset;//shift=8; int u_rmult,u_gmult,u_bmult,u_offset; int v_rmult,v_gmult,v_bmult,v_offset; float scale = 256.0; //int matrix_non_unity = 0; //int wb_non_unity = 0; //float curve2lin[2048]; //float lin2curve[2048+512+2]; static float rgb2yuv[3][4] = { {0.183f, 0.614f, 0.062f, 16.0f/256.0f}, {-0.101f,-0.338f, 0.439f, 0.5f}, {0.439f,-0.399f,-0.040f, 0.5} }; #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)decoder->RawBayer16, src_pitch, info, chroma_offset, precision); #else TransformInverseSpatialToRow16u(transform_array, frame, num_channels, decoder->RawBayer16, src_pitch, info, &decoder->scratch, chroma_offset, precision); #endif if (format == DECODED_FORMAT_YUYV) { line = output; source_line = (unsigned char *)decoder->RawBayer16; scale = 256.0; y_rmult = (int)((rgb2yuv[0][0])); y_gmult = (int)((rgb2yuv[0][1])); y_bmult = (int)((rgb2yuv[0][2])); y_offset= (int)((rgb2yuv[0][3])); u_rmult = (int)((rgb2yuv[1][0])); u_gmult = (int)((rgb2yuv[1][1])); u_bmult = (int)((rgb2yuv[1][2])); u_offset= (int)((rgb2yuv[1][3])); v_rmult = (int)((rgb2yuv[2][0])); v_gmult = (int)((rgb2yuv[2][1])); v_bmult = (int)((rgb2yuv[2][2])); v_offset= (int)((rgb2yuv[2][3])); for(y=0; y<info->height; y++) { outyuv = line; bayerptr = (PIXEL16U *)source_line; G = bayerptr; RG = G + src_pitch/(2*num_channels); BG = RG + src_pitch/(2*num_channels); for(x=0; x<info->width; x+=2) { int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; r1 = r; g1 = g; b1 = b; y1= ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16; u = (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16; v = ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; r1 = r; g1 = g; b1 = b; y2 = ( y_rmult * r1 + y_gmult * g1 + y_bmult * b1 + 32768)>>16; u += (-u_rmult * r1 - u_gmult * g1 + u_bmult * b1 + 32768)>>16; v += ( v_rmult * r1 - v_gmult * g1 - v_bmult * b1 + 32768)>>16; u >>= 1; v >>= 1; y1 += y_offset; y2 += y_offset; u += u_offset; v += v_offset; if(y1 < 0) y1 = 0; if(y1 > 255) y1 = 255; if(y2 < 0) y2 = 0; if(y2 > 255) y2 = 255; if(u < 0) u = 0; if(u > 255) u = 255; if(v < 0) v = 0; if(v > 255) v = 255; *outyuv++ = y1; *outyuv++ = u; *outyuv++ = y2; *outyuv++ = v; } line += pitch; source_line += src_pitch; } } else if (format == DECODED_FORMAT_YU64) { int shift = 14; PIXEL16U *outyuv64; line = output; source_line = (unsigned char *)decoder->RawBayer16; scale = 16384.0; y_rmult = (int)((rgb2yuv[0][0]) * scale); y_gmult = (int)((rgb2yuv[0][1]) * scale); y_bmult = (int)((rgb2yuv[0][2]) * scale); y_offset= (int)((rgb2yuv[0][3]) * scale * 4.0f); u_rmult = (int)((rgb2yuv[1][0]) * scale); u_gmult = (int)((rgb2yuv[1][1]) * scale); u_bmult = (int)((rgb2yuv[1][2]) * scale); u_offset= (int)((rgb2yuv[1][3]) * scale * 4.0f); v_rmult = (int)((rgb2yuv[2][0]) * scale); v_gmult = (int)((rgb2yuv[2][1]) * scale); v_bmult = (int)((rgb2yuv[2][2]) * scale); v_offset= (int)((rgb2yuv[2][3]) * scale * 4.0f); scale = 4096.0; y_offset += 26; u_offset += 26; v_offset += 26; for(y=0; y<info->height; y++) { outyuv64 = (PIXEL16U *)line; bayerptr = (PIXEL16U *)source_line; G = bayerptr; RG = G + src_pitch/(2*num_channels); BG = RG + src_pitch/(2*num_channels); for(x=0; x<info->width; x+=2) { int r,g,b,r1,g1,b1,rg,bg,y1,y2,u,v; g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; r1 = r; g1 = g; b1 = b; y1= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset; u = (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift); v = (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift); g = (*G++); rg = (*RG++); bg = (*BG++); r = ((rg - 32768)<<1) + g; b = ((bg - 32768)<<1) + g; r1 = r; g1 = g; b1 = b; y2= (( y_rmult * r1 + y_gmult * g1 + y_bmult * b1)>>shift) + y_offset; u+= (( u_rmult * r1 + u_gmult * g1 + u_bmult * b1)>>shift); v+= (( v_rmult * r1 + v_gmult * g1 + v_bmult * b1)>>shift); u >>= 1; v >>= 1; u += u_offset; v += v_offset; if(y1 < 0) y1 = 0; if(y1 > 65535) y1 = 65535; if(y2 < 0) y2 = 0; if(y2 > 65535) y2 = 65535; if(u < 0) u = 0; if(u > 65535) u = 65535; if(v < 0) v = 0; if(v > 65535) v = 65535; *outyuv64++ = y1; *outyuv64++ = v; *outyuv64++ = y2; *outyuv64++ = u; } line += pitch; source_line += src_pitch; } } else //RGBs { line = output; source_line = (unsigned char *)decoder->RawBayer16; for(y=0; y<info->height; y++) { int i,noisearray[32]; unsigned short *rgb16 = (unsigned short *)line; outyuv = line; bayerptr = (PIXEL16U *)source_line; G = bayerptr; RG = G + src_pitch/(2*num_channels); BG = RG + src_pitch/(2*num_channels); for(i=0; i<32; i++) { noisearray[i] = (rand() & 255); } if(info->format == DECODED_FORMAT_RGB32) { for(x=0; x<info->width; x++) { int R1,G1,B1; int rnd = noisearray[x&31]; #if 0 G1 = (*G++) + rnd; R1 = ((*RG++<<1) - (128<<9)) + G1; B1 = ((*BG++<<1) - (128<<9)) + G1; #else G1 = (*G++) + rnd; R1 = (*RG++) + rnd; B1 = (*BG++) + rnd; #endif R1 >>= 8; G1 >>= 8; B1 >>= 8; if(R1 < 0) R1 = 0; if(R1 > 255) R1 = 255; if(G1 < 0) G1 = 0; if(G1 > 255) G1 = 255; if(B1 < 0) B1 = 0; if(B1 > 255) B1 = 255; *outyuv++ = B1; *outyuv++ = G1; *outyuv++ = R1; *outyuv++ = 255; } } else if(info->format == DECODED_FORMAT_RGB24) { for(x=0; x<info->width; x++) { int R1,G1,B1; int rnd = noisearray[x&31]; #if 0 G1 = (*G++) + rnd; R1 = ((*RG++<<1) - (128<<9)) + G1; B1 = ((*BG++<<1) - (128<<9)) + G1; #else G1 = (*G++) + rnd; R1 = (*RG++) + rnd; B1 = (*BG++) + rnd; #endif R1 >>= 8; G1 >>= 8; B1 >>= 8; if(R1 < 0) R1 = 0; if(R1 > 255) R1 = 255; if(G1 < 0) G1 = 0; if(G1 > 255) G1 = 255; if(B1 < 0) B1 = 0; if(B1 > 255) B1 = 255; *outyuv++ = B1; *outyuv++ = G1; *outyuv++ = R1; } } else if(info->format == DECODED_FORMAT_RG48) { for(x=0; x<info->width; x++) { int R1,G1,B1; G1 = (*G++); R1 = (*RG++); B1 = (*BG++); *rgb16++ = R1; *rgb16++ = G1; *rgb16++ = B1; } } line += pitch; source_line += src_pitch; } } //MEMORY_ALIGNED_FREE(RawBayer16); } } else // Output the frame in one of the RGB 8-bit formats { //char *buffer = decoder->buffer; //size_t buffer_size = decoder->buffer_size; // Invert the bottom wavelet and convert the output to the requested color format #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sYUVtoRGB); #else TransformInverseSpatialToBuffer(decoder, transform_array, frame, num_channels, output, pitch, &info2, &decoder->scratch, chroma_offset, precision); #endif } } } #if TIMING // Count the number of progressive frames that were decoded progressive_decode_count++; #endif } STOP(tk_inverse); #ifdef ADOBE_MEMORY_FUNCTIONS if((decoder->RawBayer16 && decoder->RawBayerSize > 2048*1152*2) || (decoder->RGBFilterBuffer16 && decoder->RGBFilterBufferSize > 2048*1152*2)) { #if _ALLOCATOR if(decoder->RawBayer16) { FreeAligned(decoder->allocator, decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = NULL; } if(decoder->RGBFilterBuffer16) { FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; decoder->RGBFilterBufferSize = NULL; } #else if(decoder->RawBayer16) { MEMORY_ALIGNED_FREE(decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = NULL; } if(decoder->RGBFilterBuffer16) { MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; decoder->RGBFilterBufferSize = NULL; } #endif } #endif #if (0 && DEBUG) if (logfile) { //uint8_t *subimage = output; uint8_t *subimage = output + (2 * info->width) - 16; DumpArray8u("YUV Image", subimage, 16, 16, pitch, logfile); } #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Exit ReconstructFrameToBuffer\n"); } #endif #if (0 && DEBUG && _WIN32) _CrtCheckMemory(); #endif } #if 0 // Reconstruct the frame to quarter resolution at full frame rate void ReconstructQuarterFrame(DECODER *decoder, int num_channels, uint8_t *frame1, uint8_t *frame2, int output_pitch, FRAME_INFO *info, char *buffer, size_t buffer_size) { TRANSFORM **transform_array = decoder->transform; int output_width = info->width; int output_height = info->height; PIXEL *low_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *high_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *out1_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *out2_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *bufptr = (PIXEL *)buffer; uint8_t *output_row_ptr = output; int low_pitch[CODEC_MAX_CHANNELS]; int high_pitch[CODEC_MAX_CHANNELS]; int channel; int row; // Check that there is enough space for the intermediate results from each channel assert(output_width * sizeof(PIXEL) < buffer_size); // Get pointers into the wavelets for each channel for (channel = 0; channel < num_channels; channel++) { // Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet IMAGE *low_wavelet = transform_array[channel]->wavelet[3]; IMAGE *high_wavelet = transform_array[channel]->wavelet[2]; // Get the pointers to the first row in each lowpass band low_row_ptr[channel] = low_wavelet->band[0]; high_row_ptr[channel] = high_wavelet->band[0]; low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL); high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL); // Allocate space for one row of results for this channel channel_row_ptr[channel] = bufptr; bufptr += low_wavelet->width; } for (row = 0; row < output_height; row++) { char *bufptr = buffer; for (channel = 0; channel < num_channels; channel++) { // Invert the temporal transform at quarter resolution InvertTemporalQuarterRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel]); // Advance to the next row in each band for the temporal transform low_row_ptr[channel] += low_pitch[channel]; high_row_ptr[channel] += high_pitch[channel]; } // Pack the intermediate results into the output row ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width); // Advance the output row pointer output_row_ptr += output_pitch; } } #else // Reconstruct the frame to quarter resolution at full frame rate void ReconstructQuarterFrame(DECODER *decoder, int num_channels, int frame_index, uint8_t *output, int output_pitch, FRAME_INFO *info, const SCRATCH *scratch, int precision) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif TRANSFORM **transform_array = decoder->transform; int output_width = info->width; int output_height = info->height; PIXEL *low_row_ptr[CODEC_MAX_CHANNELS]; PIXEL *high_row_ptr[CODEC_MAX_CHANNELS]; uint8_t *output_row_ptr = output; int low_pitch[CODEC_MAX_CHANNELS]; int high_pitch[CODEC_MAX_CHANNELS]; int channel; int row; // Value used for filling the fourth channel in ARGB output int alpha = 255; int format = COLORFORMAT(info); int color_space = COLORSPACE(info); int decoded_format = DECODEDFORMAT(info); //bool inverted = false; // The pixels are descaled in the inverse temporal transform //const int descale = 0; // Shift the intermediate results to 16-bit pixels const int shift_yu64 = 8; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; #if DEBUG size_t buffer_size = scratch->free_size; #endif // Initialize a pointer for allocating space in the buffer PIXEL *bufptr = (PIXEL *)buffer; // Array of pointers to the start of each channel in the intermediate results PIXEL *channel_row_ptr[CODEC_MAX_CHANNELS]; // Check that there is enough space for the intermediate results from each channel #if DEBUG assert(output_width * sizeof(PIXEL) < buffer_size); #endif ComputeCube(decoder); // Get pointers into the wavelets for each channel for (channel = 0; channel < num_channels; channel++) { // Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet IMAGE *low_wavelet = transform_array[channel]->wavelet[4]; IMAGE *high_wavelet = transform_array[channel]->wavelet[3]; // Get the pointers to the first row in each lowpass band low_row_ptr[channel] = low_wavelet->band[0]; high_row_ptr[channel] = high_wavelet->band[0]; low_pitch[channel] = low_wavelet->pitch / sizeof(PIXEL); high_pitch[channel] = high_wavelet->pitch / sizeof(PIXEL); // Force the row of intermediate results to be properly aligned bufptr = (PIXEL *)ALIGN16(bufptr); // Allocate space for one row of results for this channel channel_row_ptr[channel] = bufptr; bufptr += low_wavelet->width; // Check that the row of intermediate results is properly aligned assert(ISALIGNED16(channel_row_ptr[channel])); } // Invert the image if required switch (decoded_format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: output_row_ptr += (output_height - 1) * output_pitch; output_pitch = NEG(output_pitch); } //HACK: Seems to work, I don't know why. //DAN20070304 if (precision == 12) precision = 8; // Apply the inverse temporal transform to the lowpass and highpass rows for (row = 0; row < output_height; row++) { // Most of the color conversion routines use zero descaling int descale = 0; //char *bufptr = buffer; for (channel = 0; channel < num_channels; channel++) { if (frame_index == 0) { // Invert the temporal transform at quarter resolution to get the even row InvertTemporalQuarterEvenRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel], output_width, precision); } else { assert(frame_index == 1); // Invert the temporal transform at quarter resolution to get the odd row InvertTemporalQuarterOddRow16s(low_row_ptr[channel], high_row_ptr[channel], channel_row_ptr[channel], output_width, precision); } // Advance to the next row in each band for the temporal transform low_row_ptr[channel] += low_pitch[channel]; high_row_ptr[channel] += high_pitch[channel]; } if(decoder->use_active_metadata_decoder) { uint8_t *channeldata[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes int channelpitch[TRANSFORM_MAX_CHANNELS]; // used in quarter res decodes int i; FRAME_INFO info2; memcpy(&info2, info, sizeof(FRAME_INFO)); info2.height = 1; for(i=0;i<num_channels;i++) { channeldata[i] = (uint8_t *)channel_row_ptr[i]; channelpitch[i] = 0; } #if 1 { __m128i *Y = (__m128i *)channeldata[0]; __m128i *U = (__m128i *)channeldata[1]; __m128i *V = (__m128i *)channeldata[2]; __m128i v; int x; __m128i rgb_limit_epi16 = _mm_set1_epi16(0x7fff - 0x0fff); for(x=0;x<info->width;x+=8) { v = _mm_load_si128(Y); v = _mm_adds_epi16(v, rgb_limit_epi16); v = _mm_subs_epu16(v, rgb_limit_epi16); v = _mm_slli_epi16(v, 4); _mm_store_si128(Y++, v); } for(x=0;x<info->width/2;x+=8) { v = _mm_load_si128(U); v = _mm_adds_epi16(v, rgb_limit_epi16); v = _mm_subs_epu16(v, rgb_limit_epi16); v = _mm_slli_epi16(v, 4); _mm_store_si128(U++, v); } for(x=0;x<info->width/2;x+=8) { v = _mm_load_si128(V); v = _mm_adds_epi16(v, rgb_limit_epi16); v = _mm_subs_epu16(v, rgb_limit_epi16); v = _mm_slli_epi16(v, 4); _mm_store_si128(V++, v); } } #else //non SSE2 for(x=0;x<info->width*2;x++) { int val = *gptr++; if(val < 0) val = 0; if(val > 4095) val = 4095; val <<= 4; *src++ = val; } src = scanline2; #endif Row16uQuarter2OutputFormat(decoder, &info2, 0, output_row_ptr, output_pitch, decoder->gop_frame_num/*0 frame*/, scratch->free_ptr, scratch->free_size, false, channeldata, channelpitch); } else { //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; // Convert the rows of luma and chroma into the output format switch(format) { case COLOR_FORMAT_YUYV: case COLOR_FORMAT_UYVY: // Pack the intermediate results into the output row if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { assert(0);//need quarter res BAYER To YUV decoder } else if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { // assert(0);//need quarter res RGB To YUV decoder ConvertRGB2YUV( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], output_width, output_width, output_width, output_row_ptr, output_pitch, info->width, 1, 10, info->colorspace, format); } else { ConvertUnpacked16sRowToPacked8u(channel_row_ptr, num_channels, output_row_ptr, output_width, format); } break; case COLOR_FORMAT_RGB24: if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { ConvertRGB48toRGB24( channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], output_width, output_width, output_width, output_row_ptr, output_pitch, info->width, 1, 10, 0); } else { // Convert the intermediate results into a row of RGB24 ConvertUnpacked16sRowToRGB24(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space); } break; case COLOR_FORMAT_RGB32: if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { ConvertRGBA48toRGB32(channel_row_ptr[1], channel_row_ptr[0], channel_row_ptr[2], NULL, output_width, output_row_ptr, output_pitch, info->width, 1, 10, 0, 3/*only 3 chhanel not 4 for alpha*/); } else { // Convert the intermediate results into a row of RGBA32 ConvertUnpacked16sRowToRGB32(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space, alpha); } break; case COLOR_FORMAT_YU64: case COLOR_FORMAT_V210: // Convert the intermediate results into a row of YU64 ConvertUnpacked16sRowToYU64(channel_row_ptr, num_channels, output_row_ptr, output_width, shift_yu64, precision, format); break; case COLOR_FORMAT_B64A: if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToB64A(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); } else { ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, COLOR_FORMAT_B64A, color_space); } break; case COLOR_FORMAT_R210: case COLOR_FORMAT_DPX0: case COLOR_FORMAT_RG30: case COLOR_FORMAT_AR10: case COLOR_FORMAT_AB10: if((decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGB30(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, format, color_space); } else { ConvertUnpackedYUV16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, format, color_space); } break; case COLOR_FORMAT_RG48: // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGB48(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; case COLOR_FORMAT_RG64: // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGBA64(channel_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; default: #if (1 && DEBUG) if (logfile) { fprintf(logfile, "ReconstructQuarterFrame bad color format: %d\n", format); } #endif assert(0); break; } } // Advance the output row pointer output_row_ptr += output_pitch; } } #endif #if 0 // Copy the quarter resolution lowpass channels from the spatial transform void CopyQuarterFrameToBuffer(TRANSFORM **transform_array, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *info, int precision) { int output_width = info->width; int output_height = info->height; PIXEL *input_row_ptr[CODEC_MAX_CHANNELS]; uint8_t *output_row_ptr = output; int input_pitch[CODEC_MAX_CHANNELS]; int channel; int row; // Get pointers into the wavelets for each channel for (channel = 0; channel < num_channels; channel++) { // Get the lowpass bands from the two wavelets for the two halves of the temporal wavelet IMAGE *wavelet = transform_array[channel]->wavelet[1]; // Get the pointers to the first row in each lowpass band input_row_ptr[channel] = wavelet->band[0]; input_pitch[channel] = wavelet->pitch / sizeof(PIXEL); } for (row = 0; row < output_height; row++) { // Descale and pack the pixels in each output row CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width, precision); // Advance the input row pointers for (channel = 0; channel < num_channels; channel++) { input_row_ptr[channel] += input_pitch[channel]; } // Advance the output row pointer output_row_ptr += output_pitch; } } #endif // Convert the quarter resolution lowpass channels to the specified output format void ConvertQuarterFrameToBuffer(DECODER *decoder, TRANSFORM **transform_array, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *info, int precision) { int output_width = info->width; int output_height = info->height; PIXEL *input_row_ptr[CODEC_MAX_CHANNELS]; uint8_t *output_row_ptr = output; int input_pitch[CODEC_MAX_CHANNELS]; int channel; int row; // Value used for filling the fourth channel in ARGB output int alpha = 255; int format = COLORFORMAT(info); int color_space = COLORSPACE(info); int decoded_format = DECODEDFORMAT(info); //bool inverted = false; // Get pointers into the wavelets for each channel for (channel = 0; channel < num_channels; channel++) { // Get the lowpass bands from the wavelets with quarter resolution const int wavelet_index = 1; IMAGE *wavelet = transform_array[channel]->wavelet[wavelet_index]; // The wavelet should have been reconstructed //assert(wavelet != NULL); if (wavelet == NULL) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // The lowpass band should be valid //assert((wavelet->band_valid_flags & BAND_VALID_MASK(0)) != 0); if((wavelet->band_valid_flags & BAND_VALID_MASK(0)) == 0) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // Get the pointers to the first row in each lowpass band input_row_ptr[channel] = wavelet->band[0]; input_pitch[channel] = wavelet->pitch / sizeof(PIXEL); } // Invert the image if required switch (decoded_format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: output_row_ptr += (output_height - 1) * output_pitch; output_pitch = NEG(output_pitch); } ComputeCube(decoder); //HACK DAN20110122 -- some formats will not directly decode so need to use the AM route { if( format == COLOR_FORMAT_YU64 || format == COLOR_FORMAT_V210 || format == COLOR_FORMAT_R408 || format == COLOR_FORMAT_V408) { if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { decoder->use_active_metadata_decoder = true; decoder->apply_color_active_metadata = true; } } } if(decoder->use_active_metadata_decoder) { #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output_row_ptr; mailbox->pitch = output_pitch; mailbox->framenum = 0; for(channel = 0; channel < num_channels; channel++) { mailbox->channeldata[channel] = (uint8_t *)input_row_ptr[channel]; mailbox->channelpitch[channel] = input_pitch[channel]*sizeof(PIXEL); } memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 1; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; } #endif } else { //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; // Convert each row to the specified output format for (row = 0; row < output_height; row++) { // Right shift for converting lowpass coefficients to pixels int descale = 4; switch(format & 0x7fffffff) { case COLOR_FORMAT_YUYV: case COLOR_FORMAT_UYVY: if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { // assert(0);//need quarter res RGB To YUV decoder ConvertRGB2YUV( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], output_width, output_width, output_width, output_row_ptr, output_pitch, info->width, 1, 14, info->colorspace, format); } else { // Descale and pack the pixels in each output row CopyQuarterRowToBuffer(input_row_ptr, num_channels, output_row_ptr, output_width, precision, format); } break; case COLOR_FORMAT_RGB24: if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { ConvertRGB48toRGB24(input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], output_width, output_width, output_width, output_row_ptr, output_pitch, info->width, 1, 14, 0); } else { // Convert the intermediate results into a row of RGB24 ConvertUnpacked16sRowToRGB24(input_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space); } break; case COLOR_FORMAT_RGB32: case COLOR_FORMAT_RGB32_INVERTED: if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { ConvertRGBA48toRGB32( input_row_ptr[1], input_row_ptr[0], input_row_ptr[2], input_row_ptr[3], output_width, output_row_ptr, output_pitch, info->width, 1, 14, 0, num_channels); } else { // Convert the intermediate results into a row of RGBA32 ConvertUnpacked16sRowToRGB32(input_row_ptr, num_channels, output_row_ptr, output_width, descale, format, color_space, alpha); } break; case COLOR_FORMAT_YU64: case COLOR_FORMAT_V210: if( (decoder->codec.encoded_format == ENCODED_FORMAT_RGB_444) || (decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444)) { //TODO RGB to YUV Quarter RES DAN20110120 - handle above with HACK DAN20110122 // } else { // Convert the intermediate results into a row of YU64 ConvertUnpacked16sRowToYU64(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, format); } break; case COLOR_FORMAT_B64A: // Convert the intermediate results to a row of ARGB with 16 bits per pixel descale = 2; ConvertUnpacked16sRowToB64A(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; case COLOR_FORMAT_R210: case COLOR_FORMAT_DPX0: case COLOR_FORMAT_RG30: case COLOR_FORMAT_AR10: case COLOR_FORMAT_AB10: // Convert the intermediate results to a row of ARGB with 16 bits per pixel descale = 2; ConvertUnpacked16sRowToRGB30(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision, format, color_space); break; case COLOR_FORMAT_RG48: // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGB48(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; case COLOR_FORMAT_RG64: // Convert the intermediate results into a row of RGBA with 16 bits per component descale = 2; ConvertUnpacked16sRowToRGBA64(input_row_ptr, num_channels, output_row_ptr, output_width, descale, precision); break; default: assert(0); break; } // Advance the input row pointers for (channel = 0; channel < num_channels; channel++) { input_row_ptr[channel] += input_pitch[channel]; } // Advance the output row pointer output_row_ptr += output_pitch; } } } // Release all resources allocated by the decoder void DecodeRelease(DECODER *decoder, TRANSFORM *transform[], int num_transforms) { #if _TIMING && 0 FILE *logfile = decoder->logfile; uint32_t frame_count = decoder->frame_count; if (logfile != NULL && frame_count > 0)\ { #ifdef _WIN32 PrintStatistics(logfile, frame_count, NULL, TIMING_CSV_FILENAME); #else PrintStatistics(logfile, frame_count, NULL, NULL); #endif } #endif // Free the data structures allocated for decoding ClearDecoder(decoder); } void DecodeForceMetadataRefresh(DECODER *decoder) { CFHDDATA *cfhddata = &decoder->cfhddata; cfhddata->force_metadata_refresh = true; if (decoder->parallelDecoder) { cfhddata = &decoder->parallelDecoder->cfhddata; cfhddata->force_metadata_refresh = true; } } void SetDecoderFlags(DECODER *decoder, uint32_t flags) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif // Set the decoder flags decoder->flags = flags; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Decoder flags: 0x%p\n", decoder->flags); } #endif } void SetDecoderFormat(DECODER *decoder, int width, int height, int format, int resolution) { // Need to modify the codec to use the decoding format decoder->frame.width = width; decoder->frame.height = height; if(format == DECODED_FORMAT_WP13) { decoder->frame.output_format = format; //decoder->frame.format = DECODED_FORMAT_RG48; //TODO Why is this needed with W13A work natively. decoder->frame.format = format; //decoder->frame.signed_pixels = 1; decoder->frame.white_point = 13; } else if(format == DECODED_FORMAT_W13A) { decoder->frame.output_format = format; // decoder->frame.format = DECODED_FORMAT_W13A; // TODO eventually this might be DECODED_FORMAT_RG64 decoder->frame.format = format; //decoder->frame.signed_pixels = 1; decoder->frame.white_point = 13; } else { decoder->frame.output_format = format; decoder->frame.format = format; //decoder->frame.signed_pixels = 0; decoder->frame.white_point = 16; } decoder->frame.resolution = resolution; decoder->frame.pixel_size = PixelSize(decoder->frame.format); } void SetDecoderCapabilities(DECODER *decoder) { int processor_count; #ifdef _WIN32 int limit_cpus = 32; #else int limit_cpus = 32; // AJA spins off too many #endif // Set the capabilities that are most likely supported by the Intel Mac decoder->thread_cntrl.capabilities = (_CPU_FEATURE_MMX | _CPU_FEATURE_SSE | _CPU_FEATURE_SSE2); if (decoder->thread_cntrl.limit) { limit_cpus = decoder->thread_cntrl.limit; } else if (decoder->thread_cntrl.affinity) { int i; const int max_cpu_count = 32; limit_cpus = 0; for (i = 0; i < max_cpu_count; i++) { if (decoder->thread_cntrl.affinity & (1<<i)) { limit_cpus++; } } } // Set the number of processors processor_count = GetProcessorCount(); if(processor_count > limit_cpus) processor_count = limit_cpus; #if (0 && DEBUG) // Set the number of processors (for debugging) //processor_count = 8; processor_count = 1; fprintf(stderr, "Limit processors to %d\n", processor_count); #endif decoder->thread_cntrl.capabilities |= (processor_count << 16); } int GetDecoderCapabilities(DECODER *decoder) { return decoder->thread_cntrl.capabilities; } bool SetDecoderColorFlags(DECODER *decoder, uint32_t color_flags) { if (/*MIN_DECODED_COLOR_SPACE <= color_flags && */color_flags <= MAX_DECODED_COLOR_SPACE) { decoder->frame.colorspace = color_flags; // Indicate that the color flags were set as specified return true; } // The specified color flags were not valid return false; } // Compute the resolution corresponding to the specified combination of input and output dimensions int DecodedResolution(int input_width, int input_height, int output_width, int output_height) { int decoded_width; int decoded_height; // Output height can be negative for inverted RGB output_height = abs(output_height); if (output_width == input_width && output_height == input_height) { return DECODED_RESOLUTION_FULL; } // Compute the dimensions for half resolution decoding decoded_width = input_width / 2; decoded_height = input_height / 2; // Do the output dimensions correspond to half resolution decoding? if (output_width == decoded_width && output_height == decoded_height) { return DECODED_RESOLUTION_HALF; } // Compute the dimensions for quarter resolution decoding decoded_width /= 2; decoded_height /= 2; // Do the output dimensions correspond to half resolution decoding? if (output_width == decoded_width && output_height == decoded_height) { return DECODED_RESOLUTION_QUARTER; } return DECODED_RESOLUTION_UNSUPPORTED; } // Compute the decoded resolution that is closest to the output dimensions int DecodedScale(int input_width, int input_height, int output_width, int output_height) { int decoded_width = input_width; int decoded_height = input_height; static int decodedResolution[] = { DECODED_RESOLUTION_FULL, DECODED_RESOLUTION_HALF, DECODED_RESOLUTION_QUARTER }; int reduction = 0; int max_reduction = 2; // Output height can be negative for inverted RGB output_height = abs(output_height); #if 1 // Always decode to the next larger size while (decoded_width > output_width && decoded_height > output_height && reduction < max_reduction) { // Decode to a frame size that is larger than the output image int reduced_width = decoded_width / 2; int reduced_height = decoded_height / 2; if (reduced_width >= output_width && reduced_height >= output_height) { decoded_width = reduced_width; decoded_height = reduced_height; reduction++; } else { break; } } #else while (decoded_width*4 > output_width*5 && decoded_height*4 > output_height*5 && reduction < max_reduction) { #if 0 // Decode to a frame size that is larger than the output image int reduced_width = decoded_width / 2; int reduced_height = decoded_height / 2; if (reduced_width >= output_width && reduced_height >= output_height) { decoded_width = reduced_width; decoded_height = reduced_height; reduction++; } else { break; } #else // Better to scale up a smaller image than scale down a larger image decoded_width /= 2; decoded_height /= 2; reduction++; #endif } #endif // Check that the decoded resolution is valid assert(0 <= reduction && reduction <= max_reduction); return decodedResolution[reduction]; } void ComputeDecodedDimensions(int encoded_width, int encoded_height, int decoded_resolution, int *decoded_width_out, int *decoded_height_out) { switch (decoded_resolution) { default: assert(0); case DECODED_RESOLUTION_FULL: *decoded_width_out = encoded_width; *decoded_height_out = encoded_height; break; case DECODED_RESOLUTION_HALF: *decoded_width_out = encoded_width / 2; *decoded_height_out = encoded_height / 2; break; case DECODED_RESOLUTION_QUARTER: *decoded_width_out = encoded_width / 4; *decoded_height_out = encoded_height / 4; break; case DECODED_RESOLUTION_LOWPASS_ONLY: //TODO: Check that the lowpass dimensions are correct *decoded_width_out = encoded_width / 8; *decoded_height_out = encoded_height / 8; break; } } // Return true if the specified resolution is supported bool IsDecodedResolution(int resolution) { if (resolution == DECODED_RESOLUTION_QUARTER) { return true; } return (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF); } // Return true if the encoded sample is a key frame bool IsSampleKeyFrame(uint8_t *sample, size_t size) { bool key_frame_flag = false; // Search the first twenty tags for the sample type const int num_tags = 20; int i; BITSTREAM bitstream; InitBitstreamBuffer(&bitstream, sample, size, BITSTREAM_ACCESS_READ); for (i = 0; i < num_tags && size > 0; i++, size -= sizeof(TAGVALUE)) { TAGVALUE segment = GetSegment(&bitstream); if (segment.tuple.tag == CODEC_TAG_SAMPLE) { switch (segment.tuple.value) { case SAMPLE_TYPE_GROUP: case SAMPLE_TYPE_FIRST: case SAMPLE_TYPE_IFRAME: key_frame_flag = true; break; case SAMPLE_TYPE_SEQUENCE_HEADER: case SAMPLE_TYPE_FRAME: case SAMPLE_TYPE_SECOND: case SAMPLE_TYPE_PFRAME: default: key_frame_flag = false; break; case SAMPLE_TYPE_GROUP_TRAILER: case SAMPLE_TYPE_NONE: case SAMPLE_TYPE_ERROR: case SAMPLE_TYPE_CHANNEL: assert(0); // Unexpected situation key_frame_flag = false; // Report the sample as a non-key frame break; } break; // Found the sample type } } return key_frame_flag; } // Return the number of the more recent decoded frame uint32_t DecodedFrameNumber(DECODER *decoder) { CODEC_STATE *codec = &decoder->codec; if (decoder == NULL) return 0; return codec->frame_number; } /***** Start of the new code for the finite state machine (FSM) decoder *****/ #if _PROCESSOR_DISPATCH __declspec(cpu_dispatch(Pentium_4,Generic)) static inline void ZeroHighPassRow(PIXEL *rowptr, int length) { // Stub routine for processor specific dispatch } #endif #if _PROCESSOR_GENERIC #if _PROCESSOR_DISPATCH __declspec(cpu_specific(Generic)) #endif // This version assumes that the row is a multiple of 8 bytes static inline void ZeroHighPassRow(PIXEL *rowptr, int length) { int count; // Check that the row starts on a 16-byte boundary //assert(ISALIGNED(rowptr, 16)); // Check that the row length (in bytes) is a multiple of 8 byte blocks assert(ISALIGNED(length, 8)); // Convert the length from pixels to 8-byte blocks count = (length >> 3); // This code assumes that at least one 8-byte block will be zeroed assert(count > 0); __asm { pxor mm0, mm0 // Zero a 16 byte register mov eax, rowptr // Load the pointer to the memory block mov ebx, count // Load the count of 8-byte blocks loop: movq [eax], mm0 // Write 8 bytes of zeros add eax, 8 // Advance to the next 8 byte block sub ebx, 1 // Decrement the number of blocks jg loop } //_mm_empty(); } #endif #if _PROCESSOR_PENTIUM_4 #if _PROCESSOR_DISPATCH __declspec(cpu_specific(Pentium_4)) #endif #ifndef _WIN64 // This version assumes that the row is a multiple of 16 bytes static inline void ZeroHighPassRow(PIXEL *rowptr, int length) { int count; // Check that the row starts on a 16-byte boundary assert(ISALIGNED(rowptr, 16)); // Check that the row length (in bytes) is a multiple of 16 byte blocks assert(ISALIGNED(length, 16)); // Convert the length from pixels to 16-byte blocks count = (length >> 4); // This code assumes that at least one 16-byte block will be zeroed assert(count > 0); #if 1 //DANREMOVE memset(rowptr, 0, length); #else __asm { pxor xmm0, xmm0 // Zero a 16 byte register mov eax, rowptr // Load the pointer to the memory block mov ebx, count // Load the count of 16-byte blocks loop: movdqa [eax], xmm0 // Write 16 bytes of zeros add eax, 16 // Advance to the next 16 byte block sub ebx, 1 // Decrement the number of blocks jg loop } #endif } #else // This version assumes that the row is a multiple of 16 bytes static inline void ZeroHighPassRow(PIXEL *rowptr, int length) { // Check that the row starts on a 16-byte boundary assert(ISALIGNED(rowptr, 16)); // Check that the row length (in bytes) is a multiple of 16 byte blocks assert(ISALIGNED(length, 16)); memset(rowptr, 0, length); } #endif #endif #if (0 && _DEBUG) // Functions for the finite state machine decoder (debug version) static FSMENTRY *GetFSMTableEntry(FSM *fsm, int index) { // Return the address of the next table entry in the finite state machine return &fsm->next_state[index]; } static void ResetFSM(FSM *fsm) { // Reset the state to the beginning of the finite state machine entries fsm->next_state = fsm->entries; } static void UpdateFSM(FSM *fsm, int next) { // Change the state pointer to the next block of table entries fsm->next_state = fsm->entries + (next << FSM_INDEX_SIZE); } #else // Macros for the finite state machine decoder #if _INDIVIDUAL_LUT #define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index #define ResetFSM(fsm) fsm->next_state = fsm->table.entries[0] #define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries[next] #define GetFSMTableEntryIndividual(fsm, index) (FSMENTRY *)fsm->table.entries_ind[(fsm->next_state_index << FSM_INDEX_SIZE) | index] #define ResetFSMIndividual(fsm) fsm->next_state_index = 0 #define UpdateFSMIndividual(fsm, next) fsm->next_state_index = next #else #define GetFSMTableEntry(fsm, index) (FSMENTRY *)fsm->next_state+index #define ResetFSM(fsm) fsm->next_state = fsm->table.entries #define UpdateFSM(fsm, next) fsm->next_state = fsm->table.entries+((int)next << FSM_INDEX_SIZE) #endif #endif #if _DEBUG static void DebugOutputFSMEntry(FSM *fsm, int index, FSMENTRY *entry) { int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); // Remove companding int value0 = entry->value0 / 32; int value1 = entry->value1 / 32; // Convert the index to start at the beginning of the table index += (int)(fsm->next_state - fsm->table.entries[0]); } static void DebugOutputFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry) { int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); // Remove companding int value0 = (entry->values >> 16) / 32; int value1 = (entry->values & 0xFFFF) / 32; // Convert the index to start at the beginning of the table index += (int)(fsm->next_state - fsm->table.entries[0]); } static void DebugOutputFSM(FSM *fsm) { int num_entries = FSM_INDEX_ENTRIES; int i; for (i = 0; i < num_entries; i++) { FSMENTRY *entry = &fsm->table.entries[0][i]; int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); } } static void PrintFSMEntry(FSM *fsm, int index, FSMENTRY *entry, FILE *logfile) { int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); // Remove companding int value0 = entry->value0 / 32; int value1 = entry->value1 / 32; // Convert the index to start at the beginning of the table index += (int)(fsm->next_state - fsm->table.entries[0]); if (logfile) { fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip); } } static void PrintFSMEntryFast(FSM *fsm, int index, FSMENTRYFAST *entry, FILE *logfile) { int pre_skip = (entry->pre_post_skip & 0xFFF); int post_skip = (entry->pre_post_skip >> 12); // Remove companding int value0 = (entry->values >> 16) / 32; int value1 = (entry->values & 0xFFFF) / 32; // Convert the index to start at the beginning of the table index += (int)(fsm->next_state - fsm->table.entries[0]); if (logfile) { fprintf(logfile, "%d, %d, %d, %d, %d\n", index, value0, value1, pre_skip, post_skip); } } #endif static inline int GetFastByte(BITSTREAM *stream) { // Inline of the third case of GetByte uint8_t *lpCurrentWord = stream->lpCurrentWord; // Get the next byte from the bitstream int byte = (uint32_t )(*(lpCurrentWord++)); // Update the state of the bitstream stream->lpCurrentWord = lpCurrentWord; #if ERROR_TOLERANT // Update the count of bytes used stream->nWordsUsed--; #endif // Check that the high bits are zero assert((byte & ~BITMASK(8)) == 0); return byte; } #if 0 static inline int GetFastShort(BITSTREAM *stream) { // Adaptation of the code in GetByte uint8_t *lpCurrentWord = stream->lpCurrentWord; // Get the next byte from the bitstream int byte = (uint32_t )(lpCurrentWord[0]); int word = (byte << 8) | (uint32_t )(lpCurrentWord[1]); // Update the state of the bitstream stream->lpCurrentWord = lpCurrentWord+2; // Check that the high bits are zero assert((word & ~BITMASK(16)) == 0); return word; } #endif // Must declare the byte swap function even though it is an intrinsic //int _bswap(int); #if 0 static inline int GetFastLong(BITSTREAM *stream) { uint32_t *lpCurrentWord = (uint32_t *)stream->lpCurrentWord; int word = *(lpCurrentWord)++; //word = _bswap(word); word = SwapInt32BtoN(word); stream->lpCurrentWord = (uint8_t *)lpCurrentWord; return word; } #endif #if 0 //DAN20041030 not used // Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps // Original version that does not use a separate buffer for decoding bool DecodeBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization) { int index, byte; FSMENTRY *entry; PIXEL *rowptr = image; int column = 0; int32_t value; size_t bytes_row_size = width * sizeof(PIXEL); PIXEL *maxptr; int length = width * sizeof(PIXEL); //ROI roi = {width, 1}; // This version of Huffman decoder assumes that one byte // is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Convert the pitch to units of pixels pitch /= sizeof(PIXEL); // Compute the address of the row after the last row in the band maxptr = rowptr + height * pitch; // Round up the row length (in bytes) to a multiple of 16 bytes length = ALIGN16(length); #if (0 && DEBUG) zerorow_count = 0; #endif ZeroHighPassRow(rowptr, length); // Decode runs and magnitude values until the band end trailer is decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return when the entire band is decoded if (entry->value0 == BAND_END_TRAILER) { // Zero out the whole subband from here on rowptr += pitch; while(rowptr < maxptr) { ZeroHighPassRow(rowptr, length); rowptr += pitch; } ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } } // If there is only one decoded magnitude value else if(entry->value1 == 0) { // Undo quantization and scaling value = quantization * entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry->value0; rowptr[column++] = SATURATE(value); value = quantization * entry->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry->value0; rowptr[column] = SATURATE(value); value = quantization * entry->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); column = 0; rowptr[column++] = SATURATE(value); } } // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { // Zero out the whole subband from here on rowptr += pitch; while(rowptr < maxptr) { ZeroHighPassRow(rowptr, length); rowptr += pitch; } ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { // Undo quantization and scaling int32_t value = quantization * entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry->value0; rowptr[column++] = SATURATE(value); value = quantization * entry->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry->value0; rowptr[column] = SATURATE(value); value = quantization * entry->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, length); column = 0; rowptr[column++] = SATURATE(value); } } } } #endif // Decode a subband of highpass coefficients using a finite state machine. // One byte is read from the bitstream each time and decoded in two steps. // New version that uses a buffer aligned to the cache for decoding. #if 0 static inline void ZeroHighPassBuffer(PIXEL *ptrCacheLines, int numCacheLines) { // This routine assume that the cache line size is 64 bytes assert(_CACHE_LINE_SIZE == 64); // This routine assumes that the input pointer is aligned to a cache line assert(ISALIGNED(ptrCacheLines, _CACHE_LINE_SIZE)); // This routine assumes that at least one cache line will be written assert(numCacheLines > 0); #if __GNUC__ memset(ptrCacheLines, 0, numCacheLines * _CACHE_LINE_SIZE); #else __asm { pxor xmm0, xmm0 // Zero a 16 byte register mov eax, ptrCacheLines // Load the pointer to the memory block mov ebx, numCacheLines // Load the count of the number of cache lines loop: movdqa [eax], xmm0 // Write 64 bytes of zeros using aligned stores movdqa [eax+16], xmm0 movdqa [eax+32], xmm0 movdqa [eax+48], xmm0 add eax, 64 // Advance to the next cache line sub ebx, 1 // Decrement the number of cache lines jg loop } #endif // The routine returns the pointer to the cache line after zeroing the block } #endif #if 0 static inline void CopyRowBuffer(char *rowptr, PIXEL *buffer, int length) { // Note that the length is in units of bytes (not pixels) int count; // Number of 16-byte blocks to copy // Check that the row length is an integer multiple of 16-byte blocks assert(ISALIGNED(length, 16)); // Convert the row length to the number of 16-byte blocks to copy count = length >> 4; // This routine assumes that at least one 16 byte block will be copied assert(count > 0); #if __GNUC__ // Use standard memory copy memcpy(rowptr, buffer, length); #else // Copy a multiple of 16 byte blocks __asm { mov eax, rowptr // Load the pointer to the destination mov ebx, buffer // Load the pointer to the source mov ecx, count // Load the number of 16-byte blocks to copy loop: movdqa xmm0, [ebx] // Load 16 bytes from the source movntdq [eax], xmm0 // Copy 16 bytes to the destination add eax, 16 // Advance to the group of 16 bytes add ebx, 16 sub ecx, 1 // Decrement the number of blocks to copy jg loop } #endif } #endif // DecodeBandFSMBuffered is no longer used #if 0 //dan20041030 not used bool DecodeBandFSMBuffered(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization, char *decoding_buffer, size_t decoding_buffer_size) { char *rowptr = (char *)image; // Pointer to current row char *maxptr = rowptr + height * pitch; // Address of row after the last row FSMENTRY *entry; int index; int byte; int column = 0; int32_t value; size_t row_size; size_t cache_row_size; // Size of a row in bytes int cache_line_count; // Size of the buffer in cache lines PIXEL *buffer; // Pixel pointer to the buffer int length; // Length of row in bytes // Check that the processing size allows two chunks per byte assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); // The bitstream buffer should be empty assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Compute the number of cache lines used in the buffer row_size = width * sizeof(PIXEL); cache_row_size = ALIGN(row_size, _CACHE_LINE_SIZE); cache_line_count = (cache_row_size >> _CACHE_LINE_SHIFT); // Check that the buffer is large enough assert(decoding_buffer != NULL && decoding_buffer_size >= cache_row_size); // Check that the buffer starts on a cache line boundary assert(ISALIGNED(decoding_buffer, _CACHE_LINE_SIZE)); // This routine assumes that the rows are contiguous and the pitch is a multiple of 16 bytes length = pitch; assert(length == ALIGN(row_size, 16)); // Cast the buffer pointer for pixel access buffer = (PIXEL *)decoding_buffer; // Zero the decoding buffer ZeroHighPassBuffer(buffer, cache_line_count); // Decode runs and magnitude values until the band end trailer is decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return when the entire band is decoded if (entry->value0 == BAND_END_TRAILER) { // Copy the buffer to the row if not already beyond the band if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length); // Advance to the next row rowptr += pitch; // Zero the remaining rows in the subband while (rowptr < maxptr) { ZeroHighPassRow((PIXEL *)rowptr, length); rowptr += pitch; } // Reset the finite state machine to the root node in the Huffman tree ResetFSM(fsm); // Return indication that the band was fully decoded return true; } // Set the finite state machine to the next state in the Huffman tree UpdateFSM(fsm, entry->next_state); // No magnitude values decoded? if (entry->value0 == 0) { // No magnitudes decoded so just advance the column pointer column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } } // Only one magnitude value decoded? else if (entry->value1 == 0) { // Process the magnitude value that was decoded // Undo quantization and scaling value = quantization * entry->value0; // Advance to the column where the value should be placed column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan buffer[column] = SATURATE(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } } else // Two magnitude values were decoded { // Check the column before storing values assert(0 <= column && column < width); if (column < width - 1) { // Dequantize and store the first value value = quantization * entry->value0; buffer[column++] = SATURATE(value); // Dequantize and store the second value value = quantization * entry->value1; buffer[column++] = SATURATE(value); } else { // Dequantize and store the first value in the current row value = quantization * entry->value0; buffer[column] = SATURATE(value); // Dequantize the second value value = quantization * entry->value1; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); // Reset the column to the beginning of the row column = 0; // Store the second value in the new row buffer[column++] = SATURATE(value); } } // Decode the second 4-bit chunk index = byte & FSM_INDEX_MASK; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { // Copy the buffer to the row if not already beyond the band if (rowptr < maxptr) CopyRowBuffer(rowptr, buffer, length); // Advance to the next row rowptr += pitch; // Zero the remaining rows in the subband while (rowptr < maxptr) { ZeroHighPassRow((PIXEL *)rowptr, length); rowptr += pitch; } // Reset the finite state machine to the root node in the Huffman tree ResetFSM(fsm); // Return indication that the band was fully decoded return true; } // Set the finite state machine to the next state in the Huffman tree UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { // Undo quantization and scaling int32_t value = quantization * entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value at the position found in the scan buffer[column] = SATURATE(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if (column < width-1) { value = quantization * entry->value0; buffer[column++] = SATURATE(value); value = quantization * entry->value1; buffer[column++] = SATURATE(value); } else { value = quantization * entry->value0; buffer[column] = SATURATE(value); value = quantization * entry->value1; // Advance to the next row assert(rowptr < maxptr); CopyRowBuffer(rowptr, buffer, length); rowptr += pitch; // Zero the decoding buffer if there are more rows to process if (rowptr < maxptr) ZeroHighPassBuffer(buffer, cache_line_count); // Reset the column to the beginning of the row column = 0; buffer[column++] = SATURATE(value); } } } } #endif #if 0 //dan20041030 not used // Decode a subband using FSM, combine the two results decoded from one byte bool DecodeBandFSMCombined(FSM *fsm, BITSTREAM *stream, PIXEL *image, int width, int height, int pitch, int quantization) { int index, skip; uint8_t byte; FSMENTRY *entry1, *entry2; PIXEL *rowptr = image; int row = 0, column = 0; int32_t value,bytes_row_size = width*sizeof(PIXEL); PIXEL *maxptr = rowptr + height*pitch; // This Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); ZeroHighPassRow(rowptr, width); // Double check that the bitstream buffer is empty assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Decode runs and magnitude values until the band end trailer is decoded for (;;) { // Read a byte from the bitstream //byte = GetBits(stream, BITSTREAM_WORD_SIZE); #if 0 byte = GetByte(stream); if (stream->error != BITSTREAM_ERROR_OKAY) { stream->error = VLC_ERROR_NOTFOUND; return false; } #else // Inline of the third case of GetByte uint8_t *lpCurrentWord = stream->lpCurrentWord; // Get the next byte from the bitstream byte = (uint32_t )(*(lpCurrentWord++)); // Update the state of the bitstream stream->lpCurrentWord = lpCurrentWord; // Check that the high bits are zero assert((byte & ~BITMASK(8)) == 0); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; entry1 = GetFSMTableEntry(fsm, index); UpdateFSM(fsm, entry1->next_state); // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); entry2 = GetFSMTableEntry(fsm, index); UpdateFSM(fsm, entry2->next_state); // Return when the subband is completely decoded if(entry1->value0 == BAND_END_TRAILER || entry2->value0 == BAND_END_TRAILER) { ResetFSM(fsm); return true; } // If no magnitude value is decoded at the first step if (entry1->value0 == 0) { // If no magnitude is decoded at the second step if(entry2->value0 == 0) { column += entry1->pre_skip+entry2->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If one magnitude is decoded at the second step else if(entry2->value1 == 0) { // Skip to the non-zero position column += entry1->pre_skip+entry2->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Fill in the decoded magnitude // Undo quantization and scaling value = quantization * entry2->value0; // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value rowptr[column] = SATURATE(value); column += entry2->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If two magnitudes are decoded at the second step else { column += entry1->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry2->value0; rowptr[column++] = SATURATE(value); value = quantization * entry2->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry2->value0; rowptr[column] = SATURATE(value); value = quantization * entry2->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); column = 0; rowptr[column++] = SATURATE(value); } } } // If only one magnitude is decoded at the first step else if(entry1->value1 == 0) { // Undo quantization and scaling value = quantization * entry1->value0; column += entry1->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); // If no magnitude is decoded at the second step if(entry2->value0 == 0) { column += entry1->post_skip+entry2->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If one magnitude is decoded at the second step else if (entry2->value1 == 0) { // Undo quantization and scaling value = quantization * entry2->value0; column += entry1->post_skip+entry2->pre_skip; // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); column += entry2->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If two magnitudes are decoded at the second step else { column += entry1->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry2->value0; rowptr[column++] = SATURATE(value); value = quantization * entry2->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry2->value0; rowptr[column] = SATURATE(value); value = quantization * entry2->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); column = 0; rowptr[column++] = SATURATE(value); } } } // If two magnitudes are decoded at the first step else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = quantization * entry1->value0; rowptr[column++] = SATURATE(value); value = quantization * entry1->value1; rowptr[column++] = SATURATE(value); } else { value = quantization * entry1->value0; rowptr[column] = SATURATE(value); value = quantization * entry1->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); column = 0; rowptr[column++] = SATURATE(value); } // If two magnitudes are decoded at the first step // then at most one more magnitude can be decoded at the second step assert(entry2->value1 == 0); // If no magnitude is decoded at the second step if(entry2->value0 == 0) { column += entry2->pre_skip; // entry2->pre_skip <=4 must be true // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } // If one magnitude is decoded at the second step else { column += entry2->pre_skip; // must be a small zero run // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if (rowptr < maxptr) ZeroHighPassRow(rowptr, width); } // Fill in the decoded magnitude // Undo quantization and scaling value = quantization * entry2->value0; // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE(value); column += entry2->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if (rowptr < maxptr) ZeroHighPassRow(rowptr, width); } } } } } #endif #if 0 //dan20041030 not used // Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps // Original version that does not use a separate buffer for decoding bool DecodeBandFSM8s(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte; FSMENTRY *entry; PIXEL8S *rowptr = image; int column = 0; int32_t value; PIXEL8S *maxptr; int length = width * sizeof(PIXEL8S); //ROI roi = {width, 1}; // This version of Huffman decoder assumes that one byte // is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Convert the pitch to units of pixels pitch /= sizeof(PIXEL8S); // Compute the address of the row after the last row in the band maxptr = rowptr + height * pitch; // Round up the row length (in bytes) to a multiple of 16 bytes length = ALIGN16(length); ZeroHighPassRow((PIXEL *)rowptr, length); // Decode runs and magnitude values until the band end trailer is decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return when the entire band is decoded if (entry->value0 == BAND_END_TRAILER) { // Zero out the whole subband from here on rowptr += pitch; while(rowptr < maxptr) { ZeroHighPassRow((PIXEL *)rowptr, length); rowptr += pitch; } ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } } // If there is only one decoded magnitude value else if(entry->value1 == 0) { value = entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE8S(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = entry->value0; rowptr[column++] = SATURATE8S(value); value = entry->value1; rowptr[column++] = SATURATE8S(value); } else { value = entry->value0; rowptr[column] = SATURATE8S(value); value = entry->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); column = 0; rowptr[column++] = SATURATE8S(value); } } // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { // Zero out the whole subband from here on rowptr += pitch; while(rowptr < maxptr) { ZeroHighPassRow((PIXEL *)rowptr, length); rowptr += pitch; } ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { value = entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < width); // Store the saturated value at the position found in the scan rowptr[column] = SATURATE8S(value); column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= width) { // Compute the starting column for the next row column -= width; // Advance to the next row rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < width); if(column < width-1) { value = entry->value0; rowptr[column++] = SATURATE8S(value); value = entry->value1; rowptr[column++] = SATURATE8S(value); } else { value = entry->value0; rowptr[column] = SATURATE8S(value); value = entry->value1; rowptr += pitch; if(rowptr < maxptr) ZeroHighPassRow((PIXEL *)rowptr, length); column = 0; rowptr[column++] = SATURATE8S(value); } } } } #endif // same as DecodeBandFSM8sNoGap but output to 16bit data bool DecodeBandFSM16sNoGap2Pass(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, int quant) { int index, byte; FSMENTRY *entry; PIXEL *rowptr = (PIXEL *)image; PIXEL16S *bandendptr; int value; #if ERROR_TOLERANT uint8_t *startCurrentWord = stream->lpCurrentWord; int32_t startWordsUsed = stream->nWordsUsed; #endif #if _FSMBUFFER __declspec(align(32)) FSMENTRY buffer; #endif if (image == NULL) { return false; } // Reset the decoder ResetFSM(fsm); pitch /= sizeof(PIXEL16S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S)); // This Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; #if 0 // test for errors. { if((rand() % 10) == 1) stream->lpCurrentWord[rand()%50] ^= 1; } #endif // Decode runs and magnitude values until the entire band is decoded #if ERROR_TOLERANT while((intptr_t)bandendptr - (intptr_t)rowptr >= 0) #else for (;;) #endif { // Read a byte from the bitstream #if ERROR_TOLERANT if(stream->nWordsUsed) { byte = GetFastByte(stream); } else { break; } #else byte = GetFastByte(stream); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); goto SecondPass; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = value;//SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); goto SecondPass; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = value;//SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } SecondPass: rowptr = (PIXEL16S *)image; AlignBits(stream); AlignBitsTag(stream); stream->lpCurrentWord += 4; stream->nWordsUsed -= 4; // Decode runs and magnitude values until the entire band is decoded #if ERROR_TOLERANT while((intptr_t)bandendptr - (intptr_t)rowptr >= 0) #else for (;;) #endif { // Read a byte from the bitstream #if ERROR_TOLERANT if(stream->nWordsUsed) { byte = GetFastByte(stream); } else { break; } #else byte = GetFastByte(stream); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] |= value << 8; // Write down the second decoded magnitude value = entry->value1; rowptr[1] |= value << 8; // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] |= value << 8; // Write down the second decoded magnitude value = entry->value1; rowptr[1] |= value << 8; // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } #if ERROR_TOLERANT // Reset the decoder ResetFSM(fsm); // Backup the bitstream to the beginning of the band stream->lpCurrentWord = startCurrentWord; stream->nWordsUsed = startWordsUsed; #if 0 AlignBitsTag(stream); // Read the debugging marker { TAGVALUE segment; do { segment = GetTagValue(stream); } while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER); stream->lpCurrentWord -= 4; stream->nWordsUsed += 4; } #else SkipSubband(stream); #endif #endif return true; } // Same as DecodeBandFSM8sNoGap but output to 16bit data #if _DEBUG bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, FILE *logfile) #else bool DecodeBandFSM16sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch) #endif { int index, byte; FSMENTRY *entry; FSMENTRYFAST *entryfast; PIXEL16S *rowptr = image; PIXEL16S *bandendptr; PIXEL16S *fastendptr; int32_t value; uint8_t *startCurrentWord = stream->lpCurrentWord; uint8_t *CurrentWord = stream->lpCurrentWord; int32_t startWordsUsed = stream->nWordsUsed; ptrdiff_t offset; #if _FSMBUFFER __declspec(align(32)) FSMENTRY buffer; #endif #if (0 && DEBUG) DebugOutputBitstreamPosition(stream); DebugOutputBitstreamBytes(stream, 16); #endif // Reset the decoder ResetFSM(fsm); if (fsm->InitizedRestore != 1 && fsm->InitizedRestore != 0) return false; #if (0 && DEBUG) DebugOutputFSM(fsm); #endif pitch /= sizeof(PIXEL16S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S)); //memset(rowptr, 0, pitch*height*sizeof(PIXEL16S)); // This Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; #if 0 // test for errors. { if((rand() % 10) == 1) stream->lpCurrentWord[rand()%50] ^= 1; } #endif fastendptr = bandendptr; fastendptr -= 644; // two 320 zero runs with 4 zeros after is the maximum step size per loop. // Decode runs and magnitude values until the entire band is decoded while(rowptr < fastendptr) { // Read a byte from the bitstream byte = *CurrentWord++; // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index); #if (0 && DEBUG) //DebugOutputFSMEntryFast(fsm, index, entryfast); PrintFSMEntryFast(fsm, index, entryfast, logfile); #endif // Set the pointer to the next state UpdateFSM(fsm, (int)entryfast->next_state); // Skip the decoded zero runs rowptr = &rowptr[entryfast->pre_post_skip & 0x1ff]; // Write down the first decoded magnitude *((uint32_t *)rowptr) = entryfast->values; // Skip the appropriate distance rowptr = &rowptr[(entryfast->pre_post_skip >> 12) & 0x7]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entryfast = (FSMENTRYFAST *)GetFSMTableEntry(fsm, index); #if (0 && DEBUG) //DebugOutputFSMEntryFast(fsm, index, entryfast); PrintFSMEntryFast(fsm, index, entryfast, logfile); #endif // set the pointer to the next state UpdateFSM(fsm, (int)entryfast->next_state); // Skip the decoded zero runs rowptr = &rowptr[entryfast->pre_post_skip & 0x1ff]; // Write down the first decoded magnitude *((uint32_t *)rowptr) = entryfast->values; // Skip the decoded zero runs rowptr = &rowptr[(entryfast->pre_post_skip >> 12) & 0x7]; } offset = CurrentWord - startCurrentWord; stream->lpCurrentWord += offset; stream->nWordsUsed -= (int)offset; // Decode runs and magnitude values until the entire band is decoded #if ERROR_TOLERANT while(bandendptr >= rowptr) #else for (;;) #endif { #if (0 && DEBUG) if (!(rowptr < bandendptr)) { return true; } #endif #if (0 && DEBUG) PrintBitstreamPosition(stream, logfile); #endif // Read a byte from the bitstream #if ERROR_TOLERANT if(stream->nWordsUsed > 0) { byte = GetFastByte(stream); } else { break; } #else byte = GetFastByte(stream); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if (0 && DEBUG) //DebugOutputFSMEntry(fsm, index, entry); PrintFSMEntry(fsm, index, entry, logfile); #endif #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0x1ff]; // max zero run is 320 pre-skip // Write down the first decoded magnitude if ((value = entry->value0)) { #if ERROR_TOLERANT if (bandendptr > rowptr) #endif rowptr[0] = value;//SATURATE(value); } // Write down the second decoded magnitude if ((value = entry->value1)) { #if ERROR_TOLERANT if (bandendptr > rowptr+1) #endif rowptr[1] = value;//SATURATE(value); } // Skip the appropriate distance rowptr = &rowptr[(entry->pre_post_skip >> 12) & 0x7];// max zero post-skip 4 (nibble of zeros in the FSM) // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if (0 && DEBUG) //DebugOutputFSMEntry(fsm, index, entry); PrintFSMEntry(fsm, index, entry, logfile); #endif #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0x1ff]; // max zero run is 320 pre-skip // Write down the first decoded magnitude if ((value = entry->value0)) { #if ERROR_TOLERANT if (bandendptr > rowptr) #endif rowptr[0] = value;//SATURATE(value); } // Write down the second decoded magnitude if ((value = entry->value1)) { #if ERROR_TOLERANT if (bandendptr > rowptr+1) #endif rowptr[1] = value;//SATURATE(value); } // Skip the decoded zero runs rowptr = &rowptr[(entry->pre_post_skip >> 12) & 0x7];// max zero post-skip 4 (nibble of zeros in the FSM) } #if ERROR_TOLERANT // Reset the decoder ResetFSM(fsm); // Backup the bitstream to the beginning of the band stream->lpCurrentWord = startCurrentWord; stream->nWordsUsed = startWordsUsed; #if 0 AlignBitsTag(stream); // Read the debugging marker { TAGVALUE segment; do { segment = GetTagValue(stream); } while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER); stream->lpCurrentWord -= 4; stream->nWordsUsed += 4; } #else SkipSubband(stream); #endif #endif return true; } bool DecodeBandFSM16sNoGapWithPeaks(FSM *fsm, BITSTREAM *stream, PIXEL16S *image, int width, int height, int pitch, PIXEL *peaks, int level, int quant) { int index, byte; FSMENTRY *entry; PIXEL16S *rowptr = image; PIXEL16S *bandendptr; PIXEL16S *fastendptr; int32_t value; uint8_t *startCurrentWord = stream->lpCurrentWord; uint8_t *CurrentWord = stream->lpCurrentWord; int32_t startWordsUsed = stream->nWordsUsed; #if _FSMBUFFER __declspec(align(32)) FSMENTRY buffer; #endif // Reset the decoder ResetFSM(fsm); //This is been called with non-prequantized FSM if(quant>1) level /= quant; pitch /= sizeof(PIXEL16S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height*sizeof(PIXEL16S)); // This Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; #if 0 // test for errors. { if((rand() % 10) == 1) stream->lpCurrentWord[rand()%50] ^= 1; } #endif fastendptr = bandendptr; fastendptr -= 1000; // Decode runs and magnitude values until the entire band is decoded while(rowptr < fastendptr) { // Read a byte from the bitstream byte = *CurrentWord++; // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; if(abs(value) > level) rowptr[0] = *peaks++ / quant; else rowptr[0] = value;//SATURATE(value); value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; if(abs(value) > level) rowptr[0] = *peaks++ / quant; else rowptr[0] = value;//SATURATE(value); value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } stream->lpCurrentWord += ((intptr_t)CurrentWord - (intptr_t)startCurrentWord); stream->nWordsUsed -= (int)(((intptr_t)CurrentWord - (intptr_t)startCurrentWord)); // Decode runs and magnitude values until the entire band is decoded #if ERROR_TOLERANT while(((intptr_t)bandendptr - (intptr_t)rowptr) >= 0) #else for (;;) #endif { #if (0 && DEBUG) if (!(rowptr < bandendptr)) { return true; } #endif // Read a byte from the bitstream #if ERROR_TOLERANT if(stream->nWordsUsed) { byte = GetFastByte(stream); } else { break; } #else byte = GetFastByte(stream); #endif // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; if(abs(value) > level) rowptr[0] = *peaks++ / quant; else rowptr[0] = value;//SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->pre_post_skip >> 12]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip & 0xfff]; // Write down the first decoded magnitude value = entry->value0; if(abs(value) > level) rowptr[0] = *peaks++ / quant; else rowptr[0] = value;//SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = value;//SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_post_skip >> 12]; } #if ERROR_TOLERANT // Reset the decoder ResetFSM(fsm); // Backup the bitstream to the beginning of the band stream->lpCurrentWord = startCurrentWord; stream->nWordsUsed = startWordsUsed; #if 0 AlignBitsTag(stream); // Read the debugging marker { TAGVALUE segment; do { segment = GetTagValue(stream); } while(segment.tuple.tag != CODEC_TAG_BAND_TRAILER); stream->lpCurrentWord -= 4; stream->nWordsUsed += 4; } #else SkipSubband(stream); #endif #endif return true; } // This version of DecodeBandFSM() assumes that the gap between width and pitch has been coded as // zero runs. Therefore decoded magnitude values can be written down without the need to check // if the end of a row has been reached. Hence the total number of conditionals in DecodeBandFSM // can be significantly reduced. // Decode a subband using FSM. One byte is read from the bitstream each time and decoded in two steps // Original version that does not use a separate buffer for decoding #if !_INDIVIDUAL_ENTRY #if 0 //dan20041030 not used bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte; FSMENTRY *entry; PIXEL8S *rowptr = image; PIXEL8S *bandendptr; int32_t value; #if _FSMBUFFER __declspec(align(32)) FSMENTRY buffer; #endif pitch /= sizeof(PIXEL8S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height); // This version of Huffman decoder assumes that one byte // is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; // Decode runs and magnitude values until the entire band is decoded //while (rowptr < bandendptr) for (;;) { #if (0 && DEBUG) if (!(rowptr < bandendptr)) { return true; } #endif // Check that the decoder has not overrun the output array //assert(rowptr < bandendptr); // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif #if 1 // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } #endif // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->post_skip]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); #if _FSMBUFFER memcpy(&buffer, entry, sizeof(FSMENTRY)); entry = &buffer; #endif #if 1 // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSM(fsm); return true; } #endif // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->post_skip]; } } #endif #elif _SINGLE_FSM_TABLE bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte, i; FSMENTRY *entry,*firstentry = fsm->table->firstentry; PIXEL8S *rowptr = image; PIXEL8S *bandendptr; int32_t value; pitch /= sizeof(PIXEL8S); // Zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height); // The Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Decode runs and magnitude values until the entire band is decoded for (;;) { // Check that the decoder has not overrun the output array //assert(rowptr < bandendptr); // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN entry = firstentry+i; //DAN // Return if the subband is decoded completely if(entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSMIndividual(fsm); return true; } // set the pointer to the next state UpdateFSMIndividual(fsm, (entry->next_state)); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->post_skip]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state i = (fsm->next_state_index << FSM_INDEX_SIZE) | index;//DAN entry = firstentry+i; //DAN // Return if the subband is decoded completely if(entry->value0 == BAND_END_TRAILER) { assert(rowptr <= bandendptr); ResetFSMIndividual(fsm); return true; } // set the pointer to the next state UpdateFSMIndividual(fsm, (entry->next_state)); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->post_skip]; } } #else bool DecodeBandFSM8sNoGap(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte; FSMENTRY *entry; PIXEL8S *rowptr = image; PIXEL8S *bandendptr; int32_t value; #if 1 __declspec(align(4)) FSMENTRY buffer; #endif pitch /= sizeof(PIXEL8S); // zero out the entire subband ZeroHighPassRow((PIXEL *)rowptr, pitch*height); // The Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == 2 * FSM_INDEX_SIZE); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); bandendptr = rowptr + height * pitch; // Decode runs and magnitude values until the entire band is decoded for (;;) { #if (0 && DEBUG) if (!(rowptr < bandendptr)) { return true; } #endif // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntryIndividual(fsm, index); // Return if the subband is decoded completely if(entry == NULL) { assert(rowptr <= bandendptr); ResetFSMIndividual(fsm); return true; } // Set the pointer to the next state UpdateFSMIndividual(fsm, (entry->next_state)); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the appropriate distance rowptr = &rowptr[entry->post_skip]; // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntryIndividual(fsm, index); // Return if the subband is decoded completely if (entry == NULL) { assert(rowptr <= bandendptr); ResetFSMIndividual(fsm); return true; } // Set the pointer to the next state UpdateFSMIndividual(fsm, (entry->next_state)); // Skip the decoded zero runs rowptr = &rowptr[entry->pre_skip]; // Write down the first decoded magnitude value = entry->value0; rowptr[0] = SATURATE(value); // Write down the second decoded magnitude value = entry->value1; rowptr[1] = SATURATE(value); // Skip the decoded zero runs rowptr = &rowptr[entry->post_skip]; } } #endif // Decode the highpass band coefficients but do not write them out - used in SIF mode bool SkipBandFSM(FSM *fsm, BITSTREAM *stream, PIXEL8S *image, int width, int height, int pitch) { int index, byte; FSMENTRY *entry; pitch /= sizeof(PIXEL8S); // The Huffman decoder assumes each byte is processed as two 4-bit chunks assert(BITSTREAM_WORD_SIZE == FSM_INDEX_SIZE*2); assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Decode runs and magnitude values until the entire band is decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); // Decode the first 4-bit chunk index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // decode the second 4-bit chunk index = byte & ((1<<FSM_INDEX_SIZE)-1); // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); } } #if _TIMING extern TIMER tk_fastruns; #endif #if 0 //dan20041030 not used // New version of coefficient runs decoder that uses a finite state machine with a scaling factor bool DecodeFastRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height) { CODEC_ERROR error = CODEC_ERROR_OKAY; FILE *logfile = decoder->logfile; int result; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026 // All rows are treated as one long row that covers the entire band int size = fsm->table.num_states; PIXEL *rowptr; int row = 0; int pitch; int pixel_type = wavelet->pixel_type[band_index]; decoder->codec.active_codebook = 0; // reset CODEC state // Must have a valid wavelet assert(wavelet != NULL); if (wavelet == NULL) return false; //Must have a valid FSM assert(fsm != NULL); if(fsm == NULL) return false; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check if the band is intended for 8-bit pixels assert(pixel_type == PIXEL_TYPE_8S); START(tk_fastruns); rowptr = (PIXEL *)wavelet->band[band_index]; pitch = wavelet->pitch8s; // Use the 8-bit pitch //pitch = wavelet->pitch; // The finite state machine does not support a marker at the end of rows #if RUNS_ROWEND_MARKER assert(0); #endif // Get one byte from the bitstream and decode 4 bits at a time result = DecodeBandFSM8sNoGap(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch); assert(result == true); if (result != true) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } #if (0 && DEBUG && _WIN32) _CrtCheckMemory(); #endif #if (0 && DEBUG) if (logfile) DumpBand("Band", wavelet, band_index, NULL, logfile); #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "DecodeFastRunsFSM8s, band index: %d\n", band_index); DumpWaveletRow(wavelet, band_index, 0, logfile); } #endif end: STOP(tk_fastruns); return true; } #endif #if _DEQUANTIZE_IN_FSM void ReQuantFSM(FSM *fsm, int quant) { int count = 0; int i, j; short *restore = &fsm->restoreFSM[0]; #if !_INDIVIDUAL_ENTRY for (i = 0; i < fsm->table.num_states; i++) { FSMENTRY *entry = fsm->table.entries[i]; for (j = 0; j < (1 << FSM_INDEX_SIZE); j++) { entry[j].value0 = restore[count++]; entry[j].value1 = restore[count++]; } } #else for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++) { FSMENTRY *entry = fsm_table.entries_ind[i]; if(entry) { entry->value0 = restore[count++]; entry->value1 = restore[count++]; } } #endif } void DeQuantFSM(FSM *fsm, int quant) { int i, j; if(fsm->LastQuant > 1 && fsm->LastQuant != quant) { ReQuantFSM(fsm, fsm->LastQuant); } else if(fsm->LastQuant == quant) { return; } if(fsm->InitizedRestore == 0) { short *restore = &fsm->restoreFSM[0]; int count = 0; #if !_INDIVIDUAL_ENTRY for (i = 0; i < fsm->table.num_states; i++) { FSMENTRY *entry = fsm->table.entries[i]; for (j = 0; j < (1 << FSM_INDEX_SIZE); j++) { restore[count++] = entry[j].value0; restore[count++] = entry[j].value1; } } #else for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++) { FSMENTRY *entry = fsm->table.entries_ind[i]; if(entry) { restore[count++] = entry->value0; restore[count++] = entry->value1; } } #endif fsm->InitizedRestore = 1; } #if !_INDIVIDUAL_ENTRY for (i = 0; i < fsm->table.num_states; i++) { FSMENTRY *entry = fsm->table.entries[i]; for (j = 0; j < (1 << FSM_INDEX_SIZE); j++) { if(entry[j].value0 < 0x7ff0) // band end trailer entry[j].value0 *= quant; entry[j].value1 *= quant; } } #else for (i = 0; i < (fsm->table.num_states << FSM_INDEX_SIZE); i++) { FSMENTRY *entry = fsm->table.entries_ind[i]; if(entry) { if(entry->value0 < 0x7ff0) // band end trailer etc entry->value0 *= quant; entry->value1 *= quant; } } #endif fsm->LastQuant = quant; } #endif // _DEQUANTIZE_IN_FSM // New version of coefficient runs decoder that uses a finite state machine with a scaling factor //dan 7-11-03 bool DecodeFastRunsFSM16s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height, int threading) { //CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif int result = true; int quant = wavelet->quantization[band_index]; int active_codebook = decoder->codec.active_codebook; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm[active_codebook]; int size; PIXEL *rowptr; //int row = 0; int pitch; CODEC_STATE *codec = &decoder->codec; //int channel = codec->channel; //int subband = codec->band.subband; //int num_subbands = codec->num_subbands; //int pixel_type = wavelet->pixel_type[band_index]; int difference_coding = decoder->codec.difference_coding; //int localquant = 1; int peaklevel = 0; //int peaksize = 0; PIXEL *peakbase = NULL; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Subband: %d, active_codebook: %d, difference_coding: %d\n", subband, decoder->codec.active_codebook, difference_coding); } #endif decoder->codec.active_codebook = 0; // reset CODEC state decoder->codec.difference_coding = 0; //reset state for next subband // Must have a valid wavelet //assert(wavelet != NULL); if (wavelet == NULL) return false; //Must have a valid FSM //assert(fsm != NULL); if(fsm == NULL) return false; if(width==0 || height == 0) return false; if (fsm->InitizedRestore != 0 && fsm->InitizedRestore != 1) return false; // All rows are treated as one long row that covers the entire band size = fsm->table.num_states; //assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check if the band is intended for 8-bit pixels //assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_16S); if(wavelet->pixel_type[band_index] != PIXEL_TYPE_16S) { decoder->error = CODEC_ERROR_BAD_FRAME; return false; } START(tk_fastruns); rowptr = (PIXEL *)wavelet->band[band_index]; if (rowptr == NULL) { decoder->error = CODEC_ERROR_BAD_FRAME; return false; } //pitch = wavelet->pitch8s; // Use the 8-bit pitch pitch = wavelet->pitch; peaklevel = codec->peak_table.level; peakbase = codec->peak_table.base; #if _THREADED threading = decoder->entropy_worker_new.pool.thread_count > 1 ? threading : 0; if(threading) { decoder->entropy_worker_new.threads_used = 1; { //int start = stream->nWordsUsed; int end; struct entropy_data_new *data; int next_queue_num = decoder->entropy_worker_new.next_queue_num++; data = &decoder->entropy_worker_new.entropy_data[next_queue_num]; memcpy(&data->stream,stream, sizeof(BITSTREAM)); data->rowptr = rowptr; data->width = width; data->height = height; data->pitch = pitch; data->peaks = peakbase; data->level = peaklevel; data->quant = quant; data->wavelet = wavelet; data->band_index = band_index; data->active_codebook = active_codebook; data->difference_coding = difference_coding; // Start only a particular threadid if(next_queue_num == 0) { ThreadPoolSetWorkCount(&decoder->entropy_worker_new.pool, 1); #if _DELAYED_THREAD_START==0 ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START); #endif } else { // Set the work count to the number of rows to process ThreadPoolAddWorkCount(&decoder->entropy_worker_new.pool, 1); } { unsigned short tag = *(stream->lpCurrentWord-8) << 8; if(tag == (unsigned short)OPTIONALTAG(CODEC_TAG_SUBBAND_SIZE)) { int chunksize; int value = *(stream->lpCurrentWord-6) << 8; value |= *(stream->lpCurrentWord-5); tag |= *(stream->lpCurrentWord-7); tag = NEG(tag); chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); chunksize *= 4; chunksize -= 8; { uint32_t *ptr = (uint32_t *)stream->lpCurrentWord; ptr += (chunksize>>2); if(*ptr != 0x00003800) // bandend { goto continuesearch; } } stream->lpCurrentWord += chunksize; stream->nWordsUsed -= chunksize; end = stream->nWordsUsed; } else { continuesearch: while(*((uint32_t *)stream->lpCurrentWord) != 0x00003800) // bandend { stream->lpCurrentWord += 4; stream->nWordsUsed -= 4; } end = stream->nWordsUsed; } } } } else #endif // _THREADED { DeQuantFSM(fsm, quant); if (peaklevel && peakbase) { result = DecodeBandFSM16sNoGapWithPeaks(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, peakbase, peaklevel, 1); } else { #if _DEBUG result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch, logfile); #else result = DecodeBandFSM16sNoGap(fsm, stream, (PIXEL16S *)rowptr, width, height, pitch); #endif } if(difference_coding) { int x,y; PIXEL *line = rowptr; for(y=0;y<height;y++) { for(x=1;x<width;x++) { line[x] += line[x-1]; } line += pitch/2; } } if (result) { // Call thread safe routine to update the band valid flags UpdateWaveletBandValidFlags(decoder, wavelet, band_index); } } //assert(result == true); if (result != true) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } //end: STOP(tk_fastruns); return true; } bool SkipFastRunsFSM(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height) { //CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif int result; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm[decoder->codec.active_codebook]; //DAN20041026 // All rows are treated as one long row that covers the entire band int size = fsm->table.num_states; PIXEL *rowptr; //int row = 0; int pitch; //int pixel_type = wavelet->pixel_type[band_index]; decoder->codec.active_codebook = 0; // reset CODEC state // Must have a valid wavelet assert(wavelet != NULL); if (wavelet == NULL) return false; //Must have a valid FSM assert(fsm != NULL); if(fsm == NULL) return false; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check if the band is 8bit/pixel assert(wavelet->pixel_type[band_index] == PIXEL_TYPE_8S); START(tk_fastruns); rowptr = (PIXEL *)wavelet->band[band_index]; pitch = wavelet->pitch8s; // Use the 8-bit pitch // The finite state machine does not support a marker at the end of rows #if RUNS_ROWEND_MARKER assert(0); #endif #if 1 // Get one byte from the bitstream and decode 4 bits at a time result = SkipBandFSM(fsm, stream, (PIXEL8S *)rowptr, width, height, pitch); assert(result == true); if (result != true) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } #endif #if (0 && DEBUG && _WIN32) _CrtCheckMemory(); #endif #if (0 && DEBUG) if (logfile) DumpBand("Band", wavelet, band_index, NULL, logfile); #endif //end: STOP(tk_fastruns); return true; } // The third version is also based on the finite state machine decoder with // gaps between rows encoded as zero runs, but dequantization is performed as // the highpass values are read from the bitstream and placed into a row buffer. // The highpass values are not written into the wavelet highpass band. // Eventually this routine will be merged into the routine DecodeTemporalBand8s // since this routine contains code specific to the inverse temporal transform // and DecodeTemporalBand8s has become a shell. #if 0 bool DecodeBandRunsFSM8s(DECODER *decoder, BITSTREAM *stream, IMAGE *wavelet, int band_index, int width, int height, IMAGE *frame0, IMAGE *frame1) { CODEC_ERROR error = CODEC_ERROR_OKAY; FILE *logfile = decoder->logfile; int result; // Get the pointer to the finite state machine FSM *fsm = &decoder->fsm; // All rows are treated as one long row that covers the entire band int size = fsm->table.num_states; PIXEL *lowpass = wavelet->band[0]; int lowpass_pitch = wavelet->pitch; //PIXEL8S *rowptr; int row = 0; int pitch; int row_width; // Width of the encoded row of highpass coefficients PIXEL *even = frame0->band[0]; PIXEL *odd = frame1->band[0]; int even_pitch = frame0->pitch; int odd_pitch = frame1->pitch; int pixel_type = wavelet->pixel_type[band_index]; int quantization = wavelet->quantization[band_index]; PIXEL *buffer; size_t buffer_size; int index, byte; FSMENTRY *entry; int column = 0; int32_t value; int buffer_row_size; PIXEL *highpass; // Check that the wavelet into which the band will be decoded is valid assert(wavelet != NULL); if (wavelet == NULL) return false; // Check that the finite state machine is valid assert(fsm != NULL); if (fsm == NULL) return false; assert(size > 0); if (size == 0) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } // Check that the band was encoded using 8-bit signed coefficients assert(pixel_type == PIXEL_TYPE_8S); pitch = wavelet->pitch8s; // Use the pitch for 8-bit packed rows // Get the buffer for storing one row of dequantized highpass coefficients buffer = (PIXEL *)decoder->buffer; buffer_size = decoder->buffer_size; // The finite state machine does not support a marker at the end of each row assert(RUNS_ROWEND_MARKER == 0); /***** Start of code included from DecodeBandFSM8s() *****/ // Check that one byte can be processes as two 4-bit nibbles assert(BITSTREAM_WORD_SIZE == (2 * FSM_INDEX_SIZE)); // Check that the bitstream buffer is empty assert(stream->nBitsFree == BITSTREAM_BUFFER_SIZE); // Convert the pitch to units of pixels pitch /= sizeof(PIXEL8S); buffer_row_size = pitch * sizeof(PIXEL); lowpass_pitch /= sizeof(PIXEL); even_pitch /= sizeof(PIXEL); odd_pitch /= sizeof(PIXEL); // Compute the address of the row after the last row in the band //maxptr = rowptr + height * pitch; // Round up the row length (in bytes) to a multiple of 16 bytes //row_size = ALIGN16(row_size); // Check that the buffer is large enough to hold one row //assert(buffer_size >= row_size); assert(buffer_size >= buffer_row_size); // Use the buffer for the row or highpass coefficients highpass = buffer; #if 1 // The row spans the allocated width (pitch) of the band in no gap mode row_width = pitch; #else // For debugging row_width = wavelet->encoded_pitch/sizeof(PIXEL8S); #endif // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); // Decode zero runs and magnitude values (with appended sign bit) // until the marker for the band end trailer has been decoded for (;;) { // Read a byte from the bitstream byte = GetFastByte(stream); /***** Decode the first 4-bit nibble *****/ // Decode the first 4-bit nibble index = byte >> FSM_INDEX_SIZE; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return when the entire band is decoded if (entry->value0 == BAND_END_TRAILER) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Process the rest of the subband ZeroHighPassRow(highpass, buffer_row_size); while (++row < height) { // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; } ResetFSM(fsm); return true; } // set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { value = entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } // Fill in the decoded magnitude // Check the column before storing the value assert(0 <= column && column < row_width); // Dequantize the value and store it in the highpass row buffer highpass[column] = quantization * value; column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < row_width); if (column < (row_width - 1)) { // Store both values in the current row highpass[column++] = quantization * entry->value0; highpass[column++] = quantization * entry->value1; } else { value = entry->value0; highpass[column] = quantization * value; value = entry->value1; // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); column = 0; highpass[column++] = quantization * value; } } /***** Decode the second 4-bit nibble *****/ // Decode the second 4-bit nibble index = byte & FSM_INDEX_MASK; // Index into the lookup table at that state entry = GetFSMTableEntry(fsm, index); // Return if the subband is decoded completely if (entry->value0 == BAND_END_TRAILER) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Process the rest of the subband ZeroHighPassRow(highpass, buffer_row_size); while (++row < height) { // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; } ResetFSM(fsm); return true; } // Set the pointer to the next state UpdateFSM(fsm, (int)entry->next_state); // If no magnitude value is decoded if (entry->value0 == 0) { column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } } // If there is only one decoded magnitude value else if (entry->value1 == 0) { value = entry->value0; column += entry->pre_skip; // The run length scan can go past the end of the row if the row ends // with a run of zeros and the next row begins with a run of zeros // Did the scan go beyond the end of the row? while (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } // Fill in the decoded magnitude // Check the column before storing the value //assert(index < width); assert(0 <= column && column < row_width); highpass[column] = quantization * value; column += entry->post_skip; // Did the scan go beyond the end of the row? if (column >= row_width) { // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Compute the starting column for the next row column -= row_width; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); } } // If there are two decoded magnitude values else { // Check the column before storing values assert(0 <= column && column < row_width); if (column < (row_width - 1)) { // Store both highpass values in the current row highpass[column++] = quantization * entry->value0; highpass[column++] = quantization * entry->value1; } else { highpass[column] = quantization * entry->value0; value = entry->value1; // Dequantize the highpass coefficients //DequantizeBandRow(rowptr, width, quantization, highpass); // Apply the inverse temporal transform to the current row InvertTemporalRow16s(lowpass, highpass, even, odd, width); // Advance to the next lowpass input row lowpass += lowpass_pitch; // Advance to the next even and odd output rows even += even_pitch; odd += odd_pitch; // Advance to the next row row++; // Clear the highpass buffer before decoding the non-zero coefficients ZeroHighPassRow(highpass, buffer_row_size); column = 0; highpass[column++] = quantization * value; } } } /***** End of the code included from DecodeBandFSM8s() *****/ #if 0 assert(result == true); if (result != true) { decoder->error = CODEC_ERROR_RUN_DECODE; return false; } #endif #if (0 && DEBUG && _WIN32) _CrtCheckMemory(); #endif #if (0 && DEBUG) if (logfile) DumpBand("Band", wavelet, band_index, NULL, logfile); #endif #if 0 end: return true; #endif } #endif /***** End of the code for the finite state machine decoder *****/ #if 1 // The second version applies the horizontal inverse filters row by row, so the // memory access pattern is more efficient. The lowpass and highpass temporal // coefficients for each row are inverted and packed into the output in one pass. // Apply the inverse horizontal-temporal transform and pack the output into a buffer void TransformInverseFrameToYUV(TRANSFORM *transform[], int frame_index, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *frame, const SCRATCH *scratch, int chroma_offset, int precision) { // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Pointers to the rows in the temporal wavelet for each channel PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS]; PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; #if DEBUG size_t buffer_size = scratch->free_size; #endif // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); int field_pitch = 2 * output_pitch; int output_width; int channel; int row; // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Check that the buffer is large enough #if DEBUG assert((2 * num_channels * temporal_row_size) <= buffer_size); #endif // Allocate buffers for a single row of lowpass and highpass temporal coefficients // and initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; #if (0 && DEBUG) int static count = 0; if (count < 20) { char label[_MAX_PATH]; int i; sprintf(label, "Frame%d-%d-", frame_index, count); DumpPGM(label, wavelet, NULL); for (i = 1; i < wavelet->num_bands; i++) { sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count); DumpBandPGM(label, wavelet, i, NULL); } } count++; #endif // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Compute the 8-bit pitch in units of pixels horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL); //horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; //TODO: Need to recode the buffer allocations using the scratch space API // Divide the buffer into temporal lowpass and highpass rows temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size); temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size); } // Process one row at a time from each channel for (row = 0; row < half_height; row++) { PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size); // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; //int pitch8s = horizontal_pitch8s[channel]; // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel], (PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel], temporal_lowpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Invert the horizontal transform applied to the temporal highpass row //DAN20051004 -- possible reversiblity issue //InvertHorizontalRow8sBuffered //----------------------- Maybe bad InvertHorizontalRow16s8sTo16sBuffered(horizontal_highlow[channel], highlow_quantization[channel], (PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel], temporal_highpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Advance to the next row in each horizontal band in this channel horizontal_lowlow[channel] += pitch; horizontal_lowhigh[channel] += pitch; horizontal_highlow[channel] += pitch; horizontal_highhigh[channel] += pitch; } // The output width is twice the width of the wavelet bands output_width = 2 * horizontal_width[0]; // Adjust the frame width to fill to the end of each row //frame_width = output_pitch / 2; if (precision == CODEC_PRECISION_10BIT) { // Invert the temporal bands from all channels and pack output pixels switch (frame->format) { // Need to reduce the resolution from 10 bits to 8 bits during the inverse case DECODED_FORMAT_YUYV: InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, chroma_offset); break; case DECODED_FORMAT_UYVY: InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, chroma_offset); break; default: assert(0); break; } } else // Older code for 8-bit precision { int format; assert(precision == CODEC_PRECISION_8BIT); switch (frame->format) { case DECODED_FORMAT_YUYV: format = COLOR_FORMAT_YUYV; break; case DECODED_FORMAT_UYVY: format = COLOR_FORMAT_UYVY; break; } // Invert the temporal bands from all channels and pack output pixels InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, chroma_offset, format); } // Advance to the next row in the packed output image output += field_pitch; } } #endif #if _INTERLACED_WORKER_THREADS void TransformInverseFrameSectionToYUV(DECODER *decoder, int thread_index, int frame_index, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *frame, int chroma_offset, int precision) { FILE *logfile = decoder->logfile; TRANSFORM **transform = decoder->transform; const SCRATCH *scratch = &decoder->scratch; // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Pointers to the rows in the temporal wavelet for each channel PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS]; PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; size_t buffer_size = scratch->free_size; uint8_t *output_row_ptr = output; // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); int field_pitch = 2 * output_pitch; int output_width; int channel; int row; HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore; int return_value; // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); // Divide the buffer space between the four threads buffer_size /= 4; buffer += buffer_size * thread_index; // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Check that the buffer is large enough assert((2 * num_channels * temporal_row_size) <= buffer_size); // Allocate buffers for a single row of lowpass and highpass temporal coefficients // and initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; #if (0 && DEBUG) int static count = 0; if (count < 20) { char label[_MAX_PATH]; int i; sprintf(label, "Frame%d-%d-", frame_index, count); DumpPGM(label, wavelet, NULL); for (i = 1; i < wavelet->num_bands; i++) { sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count); DumpBandPGM(label, wavelet, i, NULL); } } count++; #endif // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Compute the 8-bit pitch in units of pixels horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL); //horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; //TODO: Need to recode the buffer allocations using the scratch space API // Divide the buffer into temporal lowpass and highpass rows temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size); temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size); } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output); } #endif /* if (thread_index == 0) { row = 0; row_step = 1; } else if (thread_index == 1) { row = half_height - 1; row_step = -1; // Move to the bottom of the transform and process moving up for (channel = 0; channel < num_channels; channel++) { int offset = horizontal_pitch[channel] * (half_height - 1); horizontal_lowlow[channel] += offset; horizontal_lowhigh[channel] += offset; horizontal_highlow[channel] += offset; horizontal_highhigh[channel] += offset; horizontal_pitch[channel] = NEG(horizontal_pitch[channel]); horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]); } output += field_pitch * (half_height - 1); field_pitch = NEG(field_pitch); } else { assert(0); // what about middle threads? } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n", thread_index, row, row_step, field_pitch); } #endif */ // Loop until all of the rows have been processed for (;;) { // Wait for one row from each channel to invert the transform return_value = WaitForSingleObject(row_semaphore, 0); // Determine the index of this worker thread if (return_value == WAIT_OBJECT_0) { if(decoder->interlaced_worker.lock_init) { EnterCriticalSection(&decoder->interlaced_worker.lock); } row = decoder->interlaced_worker.current_row++; if(decoder->interlaced_worker.lock_init) LeaveCriticalSection(&decoder->interlaced_worker.lock); output_row_ptr = output; output_row_ptr += row * 2 * output_pitch; for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; IMAGE *wavelet = transform[channel]->wavelet[frame_index]; horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; horizontal_lowlow[channel] += pitch*row; horizontal_lowhigh[channel] += pitch*row; horizontal_highlow[channel] += pitch*row; horizontal_highhigh[channel] += pitch*row; } } if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height) { //PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size); PIXEL *line_buffer = (PIXEL *)(buffer + 2 * num_channels * temporal_row_size); // assert(0 <= row && row < half_height); #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Processing row: %d, thread index: %d, output: %d (0x%p)\n", row, thread_index, output_row_ptr); } #endif // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; //int pitch8s = horizontal_pitch8s[channel]; #if (0 && DEBUG) // Invert the horizontal transform by duplicating the lowpass pixels InvertHorizontalRowDuplicated16s(horizontal_lowlow[channel], lowlow_quantization[channel], (PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel], temporal_lowpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); #else // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel], (PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel], temporal_lowpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); #endif // Invert the horizontal transform applied to the temporal highpass row InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel], (PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel], temporal_highpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Advance to the next row in each horizontal band in this channel //horizontal_lowlow[channel] += pitch; //horizontal_lowhigh[channel] += pitch; //horizontal_highlow[channel] += pitch; //horizontal_highhigh[channel] += pitch; } // The output width is twice the width of the wavelet bands output_width = 2 * horizontal_width[0]; // Adjust the frame width to fill to the end of each row //frame_width = output_pitch / 2; if (precision == CODEC_PRECISION_10BIT) { // Invert the temporal bands from all channels and pack output pixels switch (frame->format) { // Need to reduce the resolution from 10 bits to 8 bits during the inverse case DECODED_FORMAT_YUYV: InvertInterlacedRow16s10bitToYUV(temporal_lowpass, temporal_highpass, num_channels, output_row_ptr, output_pitch, output_width, frame_width, chroma_offset); break; case DECODED_FORMAT_UYVY: InvertInterlacedRow16s10bitToUYVY(temporal_lowpass, temporal_highpass, num_channels, output_row_ptr, output_pitch, output_width, frame_width, chroma_offset); break; default: assert(0); break; } } else // Older code for 8-bit precision { int format; assert(precision == CODEC_PRECISION_8BIT); switch (frame->format) { case DECODED_FORMAT_YUYV: format = COLOR_FORMAT_YUYV; break; case DECODED_FORMAT_UYVY: format = COLOR_FORMAT_UYVY; break; } // Invert the temporal bands from all channels and pack output pixels InvertInterlacedRow16sToYUV(temporal_lowpass, temporal_highpass, num_channels, output_row_ptr, output_pitch, output_width, frame_width, chroma_offset, format); } // Advance to the next row in the input transforms //row += row_step; // Advance to the next row in the packed output image //output += field_pitch; } else { // No more rows to process break; } } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Finished transform, thread index: %d\n", thread_index); } #endif } #endif //#if BUILD_PROSPECT // Apply the inverse horizontal-temporal transform and output rows of luma and chroma #if 0 void TransformInverseFrameToRow16u(TRANSFORM *transform[], int frame_index, int num_channels, PIXEL16U *output, int output_pitch, FRAME_INFO *frame, char *buffer, size_t buffer_size, int chroma_offset, int precision) #else void TransformInverseFrameToRow16u(DECODER *decoder, TRANSFORM *transform[], int frame_index, int num_channels, PIXEL16U *output, int output_pitch, FRAME_INFO *frame, const SCRATCH *scratch, int chroma_offset, int precision) #endif { // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; #if DEBUG size_t buffer_size = scratch->free_size; #endif // Buffers for the rows in the temporal wavelet (reused for each channel) PIXEL *temporal_lowpass; PIXEL *temporal_highpass; int output_row_width[TRANSFORM_MAX_CHANNELS]; // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); int field_pitch = 2 * output_pitch; int luma_width = frame_width; int chroma_width = luma_width/2; int channel; int row; #if (1 && DEBUG_ROW16U) PIXEL16U *output_buffer; #endif // This routine should only be called to decode rows of 16-bit luma and chroma //assert(frame->format == DECODED_FORMAT_YR16); // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass) // plus the buffer used by the inverse horizontal transform for its intermediate results #if DEBUG assert((2 * temporal_row_size) <= buffer_size); #endif // Allocate buffers for one row of lowpass and highpass temporal coefficients temporal_lowpass = (PIXEL *)&buffer[0]; temporal_highpass = (PIXEL *)&buffer[temporal_row_size]; #if (1 && DEBUG_ROW16U) output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size]; #endif // Initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; #if (0 && DEBUG) int static count = 0; if (count < 20) { char label[_MAX_PATH]; int i; sprintf(label, "Frame%d-%d-", frame_index, count); DumpPGM(label, wavelet, NULL); for (i = 1; i < wavelet->num_bands; i++) { sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count); DumpBandPGM(label, wavelet, i, NULL); } } count++; #endif // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; // Compute the width of each row of output pixels output_row_width[channel] = (channel == 0) ? luma_width : chroma_width; } // Process one row at a time from each channel for (row = 0; row < half_height; row++) { #if (1 && DEBUG_ROW16U) PIXEL16U *output_row_ptr = output_buffer; PIXEL16U *planar_output[TRANSFORM_MAX_CHANNELS]; int planar_pitch[TRANSFORM_MAX_CHANNELS]; ROI strip = {luma_width, 2}; uint8_t *yuv_output = (uint8_t *)output; uint8_t *output1 = yuv_output; uint8_t *output2 = yuv_output + output_pitch; #else PIXEL16U *output_row_ptr = output; #endif // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { // Invert the horizontal transform applied to the temporal lowpass row BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel], temporal_lowpass, horizontal_width[channel]); // Invert the horizontal transform applied to the temporal highpass row BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel], temporal_highpass, horizontal_width[channel]); } else { // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel], temporal_lowpass, horizontal_width[channel]); // Invert the horizontal transform applied to the temporal highpass row InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel], temporal_highpass, horizontal_width[channel]); } //***DEBUG*** //ZeroMemory(temporal_highpass, temporal_row_size); //FillPixelMemory(temporal_highpass, temporal_row_size/sizeof(PIXEL), 50); // Advance to the next row in each horizontal band in this channel horizontal_lowlow[channel] += pitch; horizontal_lowhigh[channel] += pitch; horizontal_highlow[channel] += pitch; horizontal_highhigh[channel] += pitch; #if (1 && DEBUG_ROW16U) // Write the rows of 16-bit pixels to a temporary buffer planar_output[channel] = output_row_ptr; planar_pitch[channel] = output_pitch * sizeof(PIXEL); // Invert the temporal transform and output two rows of luma or chroma InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass, planar_output[channel], planar_pitch[channel], output_row_width[channel], frame_width, chroma_offset, precision); //if (channel > 0) if (0) { uint8_t *output3 = (uint8_t *)planar_output[channel]; uint8_t *output4 = (uint8_t *)output3 + planar_pitch[channel]; int output_size = output_row_width[channel] * sizeof(PIXEL); int fill_value = (128 << 8); //ZeroMemory(output3, output_size); //ZeroMemory(output4, output_size); FillPixelMemory((PIXEL *)output3, output_row_width[channel], fill_value); FillPixelMemory((PIXEL *)output4, output_row_width[channel], fill_value); } #else // Invert the temporal transform and output two rows of luma or chroma InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass, output_row_ptr, output_pitch, output_row_width[channel], frame_width, chroma_offset, precision); #endif // Advance the output row pointer to the next channel output_row_ptr += output_row_width[channel]; // Check the output row alignment assert(ISALIGNED16(output_row_ptr)); } // Advance to the next group of rows in the output image output += field_pitch/sizeof(PIXEL16U); } } //#endif #if _INTERLACED_WORKER_THREADS void TransformInverseFrameSectionToRow16u(DECODER *decoder, int thread_index, int frame_index, int num_channels, PIXEL16U *output, int output_pitch, FRAME_INFO *frame, int chroma_offset, int precision) { FILE *logfile = decoder->logfile; TRANSFORM **transform = decoder->transform; const SCRATCH *scratch = &decoder->scratch; // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; size_t buffer_size = scratch->free_size; // Buffers for the rows in the temporal wavelet (reused for each channel) PIXEL *temporal_lowpass; PIXEL *temporal_highpass; int output_row_width[TRANSFORM_MAX_CHANNELS]; // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); int field_pitch = 2 * output_pitch; int luma_width = frame_width; int chroma_width = luma_width/2; int channel; int row; HANDLE row_semaphore = decoder->interlaced_worker.row_semaphore; int return_value; #if (1 && DEBUG_ROW16U) PIXEL16U *output_buffer; #endif // This routine should only be called to decode rows of 16-bit luma and chroma //assert(frame->format == DECODED_FORMAT_YR16); // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); #if 0 if (thread_index == 1) { // Skip over the buffer space used by the other thread size_t buffer_usage = 2 * temporal_row_size; buffer += buffer_usage; buffer_size -= buffer_usage; } #else // Divide the buffer space between the two threads buffer_size /= 4; buffer += buffer_size * thread_index; #endif // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Buffer must be large enough for two rows of temporal coefficients (lowpass and highpass) // plus the buffer used by the inverse horizontal transform for its intermediate results assert((2 * temporal_row_size) <= buffer_size); // Allocate buffers for one row of lowpass and highpass temporal coefficients temporal_lowpass = (PIXEL *)&buffer[0]; temporal_highpass = (PIXEL *)&buffer[temporal_row_size]; #if (1 && DEBUG_ROW16U) output_buffer = (PIXEL16U *)&buffer[2 * temporal_row_size]; #endif // Initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; #if (0 && DEBUG) int static count = 0; if (count < 20) { char label[_MAX_PATH]; int i; sprintf(label, "Frame%d-%d-", frame_index, count); DumpPGM(label, wavelet, NULL); for (i = 1; i < wavelet->num_bands; i++) { sprintf(label, "Frame-%d-band%d-%d-", frame_index, i, count); DumpBandPGM(label, wavelet, i, NULL); } } count++; #endif // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; // Compute the width of each row of output pixels output_row_width[channel] = (channel == 0) ? luma_width : chroma_width; } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Output buffer: %d (0x%p)\n", output, output); } #endif /* if (thread_index == 0) { row = 0; row_step = 1; } else if (thread_index == 1) { row = half_height - 1; row_step = -1; // Move to the bottom of the transform and process moving up for (channel = 0; channel < num_channels; channel++) { int offset = horizontal_pitch[channel] * (half_height - 1); horizontal_lowlow[channel] += offset; horizontal_lowhigh[channel] += offset; horizontal_highlow[channel] += offset; horizontal_highhigh[channel] += offset; horizontal_pitch[channel] = NEG(horizontal_pitch[channel]); //horizontal_pitch8s[channel] = NEG(horizontal_pitch8s[channel]); } //output += field_pitch * (half_height - 1); output += (frame_height - 1) * output_pitch/sizeof(PIXEL16U); output_pitch = NEG(output_pitch); field_pitch = NEG(field_pitch); } else { assert(0); // middle threads } */ #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread index: %d, start row: %d, row step: %d, field_pitch: %d\n", thread_index, row, row_step, field_pitch); } #endif // Loop until all of the rows have been processed for (;;) { PIXEL16U *output_row_ptr; // Wait for one row from each channel to invert the transform return_value = WaitForSingleObject(row_semaphore, 0); // Determine the index of this worker thread if (return_value == WAIT_OBJECT_0) { if(decoder->interlaced_worker.lock_init) { EnterCriticalSection(&decoder->interlaced_worker.lock); } row = decoder->interlaced_worker.current_row++; if(decoder->interlaced_worker.lock_init) LeaveCriticalSection(&decoder->interlaced_worker.lock); output_row_ptr = output; output_row_ptr += row * output_pitch; for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; IMAGE *wavelet = transform[channel]->wavelet[frame_index]; horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; horizontal_lowlow[channel] += pitch*row; horizontal_lowhigh[channel] += pitch*row; horizontal_highlow[channel] += pitch*row; horizontal_highhigh[channel] += pitch*row; } } if (return_value == WAIT_OBJECT_0 && 0 <= row && row < half_height) { assert(0 <= row && row < half_height); if(decoder->frame.resolution == DECODED_RESOLUTION_FULL) { // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel], temporal_lowpass, horizontal_width[channel]); // Invert the horizontal transform applied to the temporal highpass row InvertHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel], temporal_highpass, horizontal_width[channel]); // Invert the temporal transform and output two rows of luma or chroma InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass, output_row_ptr, output_pitch, output_row_width[channel], frame_width, chroma_offset, precision); // Advance the output row pointer to the next channel output_row_ptr += output_row_width[channel]; } } else if(decoder->frame.resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; // Invert the horizontal transform applied to the temporal lowpass row BypassHorizontalRow16s(horizontal_lowlow[channel], horizontal_lowhigh[channel], temporal_lowpass, horizontal_width[channel]); // Invert the horizontal transform applied to the temporal highpass row BypassHorizontalRow16s(horizontal_highlow[channel], horizontal_highhigh[channel], temporal_highpass, horizontal_width[channel]); // Invert the temporal transform and output two rows of luma or chroma InvertInterlacedRow16sToRow16u(temporal_lowpass, temporal_highpass, output_row_ptr, output_pitch, output_row_width[channel], frame_width, chroma_offset, precision); // Advance the output row pointer to the next channel output_row_ptr += output_row_width[channel]; } } } else { // No more rows to process break; } } #if (1 && DEBUG) if (logfile) { fprintf(logfile, "Finished transform, thread index: %d\n", thread_index); } #endif } #endif #if 0 DWORD WINAPI TransformInverseFrameToRow16utopThread(LPVOID param) { struct data { TRANSFORM *transform[3]; int frame_index; int num_channels; uint8_t *output; int output_pitch; FRAME_INFO *info; SCRATCH *scratch; int chroma_offset; int precision; } *dptr; dptr = (struct data *)param; TransformInverseFrameToRow16utop(dptr->transform, dptr->frame_index, dptr->num_channels, (PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info, dptr->scratch, dptr->chroma_offset, dptr->precision); return 0; } DWORD WINAPI TransformInverseFrameToRow16ubottomThread(LPVOID param) { struct data { TRANSFORM *transform[3]; int frame_index; int num_channels; uint8_t *output; int output_pitch; FRAME_INFO *info; SCRATCH *scratch; int chroma_offset; int precision; } *dptr; dptr = (struct data *)param; TransformInverseFrameToRow16ubottom(dptr->transform, dptr->frame_index, dptr->num_channels, (PIXEL16U *)dptr->output, dptr->output_pitch, dptr->info, dptr->scratch, dptr->chroma_offset, dptr->precision); return 0; } #endif extern void fast_srand( int seed ); // Apply the inverse horizontal-temporal transform and pack the output into a buffer #if 0 void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *frame, char *buffer, size_t buffer_size, int chroma_offset, int precision) #else void TransformInverseFrameToBuffer(TRANSFORM *transform[], int frame_index, int num_channels, uint8_t *output, int output_pitch, FRAME_INFO *frame, const SCRATCH *scratch, int chroma_offset, int precision) #endif { // Pointers to the rows in the horizontal wavelet for each channel PIXEL *horizontal_lowlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_lowhigh[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highlow[TRANSFORM_MAX_CHANNELS]; PIXEL *horizontal_highhigh[TRANSFORM_MAX_CHANNELS]; // Horizontal wavelet band width and pitch int horizontal_width[TRANSFORM_MAX_CHANNELS]; int horizontal_pitch[TRANSFORM_MAX_CHANNELS]; //int horizontal_pitch8s[TRANSFORM_MAX_CHANNELS]; // Quantization factors int lowlow_quantization[TRANSFORM_MAX_CHANNELS]; int lowhigh_quantization[TRANSFORM_MAX_CHANNELS]; int highlow_quantization[TRANSFORM_MAX_CHANNELS]; int highhigh_quantization[TRANSFORM_MAX_CHANNELS]; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; size_t buffer_size = scratch->free_size; // Pointers to the rows in the temporal wavelet for each channel PIXEL *temporal_lowpass[TRANSFORM_MAX_CHANNELS]; PIXEL *temporal_highpass[TRANSFORM_MAX_CHANNELS]; // Dimensions of the reconstructed frame int frame_width = frame->width; int frame_height = frame->height; int half_height = frame_height / 2; size_t temporal_row_size = frame_width * sizeof(PIXEL); size_t temporal_buffer_size = 2 * num_channels * temporal_row_size; #if DEBUG size_t yuv_row_size = frame_width * 2; #endif char *yuv_buffer; size_t yuv_buffer_size; int field_pitch = 2 * output_pitch; int format = frame->format; bool inverted = (format == DECODED_FORMAT_RGB24 || format == DECODED_FORMAT_RGB32); int output_width; int channel; int row; // Round up the temporal row size to an integral number of cache lines temporal_row_size = ALIGN(temporal_row_size, _CACHE_LINE_SIZE); // Check that the buffer starts on a cache line boundary assert(ISALIGNED(buffer, _CACHE_LINE_SIZE)); // Check that the number of channels is reasonable assert(0 < num_channels && num_channels <= TRANSFORM_MAX_CHANNELS); // Check that the buffer is large enough assert((2 * num_channels * temporal_row_size) <= buffer_size); // Allocate buffers for a single row of lowpass and highpass temporal coefficients // and initialize the arrays of row pointers into the horizontal transform bands for (channel = 0; channel < num_channels; channel++) { IMAGE *wavelet = transform[channel]->wavelet[frame_index]; // Initialize the row pointers into the horizontal bands horizontal_lowlow[channel] = wavelet->band[LL_BAND]; horizontal_lowhigh[channel] = wavelet->band[LH_BAND]; horizontal_highlow[channel] = wavelet->band[HL_BAND]; horizontal_highhigh[channel] = wavelet->band[HH_BAND]; lowlow_quantization[channel] = wavelet->quantization[LL_BAND]; lowhigh_quantization[channel] = wavelet->quantization[LH_BAND]; highlow_quantization[channel] = wavelet->quantization[HL_BAND]; highhigh_quantization[channel] = wavelet->quantization[HH_BAND]; // Compute the pitch in units of pixels horizontal_pitch[channel] = wavelet->pitch/sizeof(PIXEL); // Compute the 8-bit pitch in units of pixels //horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL); //horizontal_pitch8s[channel] = wavelet->pitch8s/sizeof(PIXEL8S); // Remember the width of the horizontal wavelet rows for this channel horizontal_width[channel] = wavelet->width; // Divide the buffer into temporal lowpass and highpass rows temporal_lowpass[channel] = (PIXEL *)(buffer + (2 * channel) * temporal_row_size); temporal_highpass[channel] = (PIXEL *)(buffer + (2 * channel + 1) * temporal_row_size); } // Allocate buffer space for the intermediate YUV data yuv_buffer = buffer + temporal_buffer_size; yuv_buffer_size = buffer_size - temporal_buffer_size; #if DEBUG assert(yuv_buffer_size >= 2 * yuv_row_size); #endif if (inverted) { output += (frame_height - 1) * output_pitch; output_pitch = (- output_pitch); field_pitch = (- field_pitch); } // Process one row at a time from each channel for (row = 0; row < half_height; row++) { PIXEL *line_buffer = (PIXEL *)(buffer + (2 * num_channels + 2) * temporal_row_size); // Invert the horizontal transform applied to the temporal bands in each channel for (channel = 0; channel < num_channels; channel++) { int pitch = horizontal_pitch[channel]; //int pitch8s = horizontal_pitch8s[channel]; // Invert the horizontal transform applied to the temporal lowpass row InvertHorizontalRow16s8sTo16sBuffered(horizontal_lowlow[channel], lowlow_quantization[channel], (PIXEL8S *)horizontal_lowhigh[channel], lowhigh_quantization[channel], temporal_lowpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Invert the horizontal transform applied to the temporal highpass row InvertHorizontalRow8sBuffered((PIXEL8S *)horizontal_highlow[channel], highlow_quantization[channel], (PIXEL8S *)horizontal_highhigh[channel], highhigh_quantization[channel], temporal_highpass[channel], horizontal_width[channel], (PIXEL *)line_buffer); // Advance to the next row in each horizontal band in this channel horizontal_lowlow[channel] += pitch; horizontal_lowhigh[channel] += pitch; horizontal_highlow[channel] += pitch; horizontal_highhigh[channel] += pitch; } // The output width is twice the width of the wavelet bands output_width = 2 * horizontal_width[0]; // Adjust the frame width to fill to the end of each row //frame_width = output_pitch / 2; //#if BUILD_PROSPECT if (format == DECODED_FORMAT_V210 || format == DECODED_FORMAT_YU64) { // Invert the temporal bands from all channels and pack as V210 output InvertInterlacedRow16sToV210(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, yuv_buffer, yuv_buffer_size, format, chroma_offset, precision); } else //#endif { // Invert the temporal bands from all channels and pack as 8-bit output InvertInterlacedRow16s(temporal_lowpass, temporal_highpass, num_channels, output, output_pitch, output_width, frame_width, yuv_buffer, yuv_buffer_size, format, frame->colorspace, chroma_offset, precision, row); } // Advance to the next row in the packed output image output += field_pitch; } } void CopyImageToBuffer(IMAGE *image, uint8_t *output_buffer, int32_t output_pitch, int format) { bool inverted = false; size_t output_size; START(tk_convert); // Determine the type of conversion switch (format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB24, inverted); break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: ConvertImageToRGB(image, output_buffer, output_pitch, COLOR_FORMAT_RGB32, inverted); break; #if 0 case DECODED_FORMAT_YUYV_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_YUYV: ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_YUYV, inverted); break; #if 0 case DECODED_FORMAT_UYVY_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_UYVY: ConvertImageToYUV(image, output_buffer, output_pitch, COLOR_FORMAT_UYVY, inverted); break; default: // Unsupported format (return a blank frame) assert(0); output_size = image->height * output_pitch; memset(output_buffer, COLOR_CHROMA_ZERO, output_size); break; } STOP(tk_convert); } void SideLowpass16s10bitToYUYV(IMAGE *images[], uint8_t *output_buffer, int output_width, int output_height, int output_pitch, bool inverted) { IMAGE *y_image = images[0]; IMAGE *u_image = images[1]; IMAGE *v_image = images[2]; int width = y_image->width; int height = output_height; PIXEL *y_row_ptr = y_image->band[0]; PIXEL *u_row_ptr = u_image->band[0]; PIXEL *v_row_ptr = v_image->band[0]; int y_pitch = y_image->pitch/sizeof(PIXEL); int u_pitch = u_image->pitch/sizeof(PIXEL); int v_pitch = v_image->pitch/sizeof(PIXEL); uint8_t *outrow = output_buffer; uint8_t *outptr; int row, column; // Definitions for optimization //const int column_step = 2 * sizeof(__m64); // Column at which post processing must begin //int post_column = width - (width % column_step); // The output pitch should be a positive number before inversion assert(output_pitch > 0); // Should the image be inverted? if (inverted) { outrow += (height - 1) * output_pitch; // Start at the bottom row output_pitch = NEG(output_pitch); // Negate the pitch to go up } for (row = 0; row < height; row++) { outptr = outrow; // Fill the rest of the output row for (column = 0; column < width; column+=4) { int chroma_column = column>>1; *(outptr++) = SATURATE_8U((y_row_ptr[column]+y_row_ptr[column+1])>>5); *(outptr++) = SATURATE_8U((v_row_ptr[chroma_column]+v_row_ptr[chroma_column+1])>>5); *(outptr++) = SATURATE_8U((y_row_ptr[column+2]+y_row_ptr[column+3])>>5); *(outptr++) = SATURATE_8U((u_row_ptr[chroma_column]+u_row_ptr[chroma_column+1])>>5); } // Advance to the next rows in the input and output images y_row_ptr += y_pitch;// 3D Work u_row_ptr += u_pitch; v_row_ptr += v_pitch; outrow += output_pitch; } } // Convert 16-bit signed lowpass data into packed RGB/YUV and store it in the output buffer void CopyLowpass16sToBuffer(DECODER *decoder, IMAGE *images[], int num_channels, uint8_t *output_buffer, int32_t output_pitch, FRAME_INFO *info, int chroma_offset, int precision, int encode_format, int whitebitdepth) { //IMAGE *image = frame->channel[0]; bool inverted = false; int output_width = info->width; int output_height = info->height; int descale = precision - 8; // Get the color format from the decoded format int color_format = info->format & COLOR_FORMAT_MASK; // Must compile this routine with switches set for decoding to 8-bit unsigned pixels #if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0) assert(0); return; #endif START(tk_convert); #if 0 // Fill the output buffer with blank values EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format); #endif // Determine the type of conversion switch (info->format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_RGB24, info->colorspace, inverted, descale, num_channels); } else { ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_RGB24, info->colorspace, inverted, descale); } break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { ConvertLowpass16sRGB48ToRGB(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_RGB32, info->colorspace, inverted, descale, num_channels); } else { ConvertLowpass16sToRGBNoIPPFast(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_RGB32, info->colorspace, inverted, descale); } break; case DECODED_FORMAT_RG48: if(encode_format == ENCODED_FORMAT_BAYER) { ConvertLowpass16sBayerToRGB48(images, output_buffer, output_width, output_height, output_pitch, 2, num_channels); } else if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { int scale = 1; if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) scale = 2; ConvertLowpass16sRGB48ToRGB48(images, output_buffer, output_width, output_height, output_pitch, scale, num_channels); } else { ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width, output_height, output_pitch, info->colorspace, inverted, descale, info->format, whitebitdepth); } break; case DECODED_FORMAT_RG64: if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch, descale, num_channels, info->format & 0xffff); } else { assert(0); } break; case DECODED_FORMAT_B64A: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: if(encode_format == ENCODED_FORMAT_RGB_444 || encode_format == ENCODED_FORMAT_RGBA_4444) { ConvertLowpass16sRGBA64ToRGBA64(images, output_buffer, output_width, output_height, output_pitch, descale, num_channels, info->format & 0xffff); } else { ConvertLowpass16sYUVtoRGB48(images, (uint8_t *)output_buffer, output_width, output_height, output_pitch, info->colorspace, inverted, descale, info->format, whitebitdepth); } break; #if 0 case DECODED_FORMAT_YUYV_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: if (precision == CODEC_PRECISION_10BIT) { int lineskip = 1; // 3D Work int pitch = output_pitch; if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) { if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work { lineskip = 2; if(decoder->channel_blend_type == 3) pitch *= 2; } } if((decoder->channel_blend_type == BLEND_SIDEBYSIDE_ANAMORPHIC || decoder->channel_blend_type == BLEND_FREEVIEW) && decoder->frame.format == DECODED_FORMAT_YUYV) //side by side { SideLowpass16s10bitToYUYV(images, output_buffer, output_width, output_height, pitch, inverted); } else { //ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, COLOR_FORMAT_YUYV, inverted, lineskip); ConvertLowpass16s10bitToYUV(images, output_buffer, output_width, output_height, pitch, color_format, inverted, lineskip); } } else { //ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YUYV, inverted); ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, color_format, inverted); } break; #if 0 case DECODED_FORMAT_UYVY_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif #if 0 case DECODED_FORMAT_UYVY: ConvertLowpass16sToYUV(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_UYVY, inverted); break; #endif //#if BUILD_PROSPECT case DECODED_FORMAT_V210: if (precision == CODEC_PRECISION_10BIT) { ConvertLowpass16s10bitToV210(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_V210, inverted); } else { //ConvertLowpass16sToV210(images, output_buffer, output_width, output_pitch, COLOR_FORMAT_V210, inverted); assert(0); } break; //#endif case DECODED_FORMAT_YU64: // DAN04262004 ConvertLowpass16sToYUV64(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YU64, inverted, precision); break; //#if BUILD_PROSPECT case DECODED_FORMAT_YR16: ConvertLowpass16sToYR16(images, output_buffer, output_width, output_height, output_pitch, COLOR_FORMAT_YR16, inverted, precision); break; //#endif default: // Unsupported format (output a blank frame) assert(0); break; } STOP(tk_convert); } void ConvertYUVStripPlanarToBuffer(uint8_t *planar_output[], int planar_pitch[], ROI roi, uint8_t *output_buffer, int output_pitch, int frame_width, int format, int colorspace) { bool inverted = false; int output_width = roi.width; #if !defined(_DECODE_FRAME_8U) || (_DECODE_FRAME_8U == 0) #error Must set compile-time switches to decode to 8-bit pixels #endif START(tk_convert); #if _ENCODE_CHROMA_OFFSET #error Cannot handle images encoded with a non-zero chroma offset #endif // Determine the type of conversion switch(format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_RGB24, colorspace, inverted); break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: ConvertPlanarYUVToRGB(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_RGB32, colorspace, inverted); break; #if 0 case DECODED_FORMAT_YUYV_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_YUYV: ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi, output_buffer, output_pitch, frame_width, format); break; #if 0 case DECODED_FORMAT_UYVY_INVERTED: inverted = true; // Fall through and convert to YUV (first image row displayed at the bottom) #endif case DECODED_FORMAT_UYVY: ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_UYVY, colorspace, inverted); break; default: // Unsupported format (output a blank frame) assert(0); break; } STOP(tk_convert); } void ConvertRow16uToDitheredBuffer(DECODER *decoder, uint8_t *planar_output[], int planar_pitch[], ROI roi, uint8_t *output_buffer, int output_pitch, int frame_width, int format, int colorspace) { bool inverted = false; int output_width = roi.width; START(tk_convert); // Determine the type of conversion switch(format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: //ConvertPlanarYUVToRGB ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_RGB24, colorspace, inverted); break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: ConvertRow16uToDitheredRGB(decoder, planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_RGB32, colorspace, inverted); break; case COLOR_FORMAT_WP13: case COLOR_FORMAT_B64A: case COLOR_FORMAT_RG48: case COLOR_FORMAT_R210: case COLOR_FORMAT_DPX0: case COLOR_FORMAT_RG30: case COLOR_FORMAT_AR10: case COLOR_FORMAT_AB10: ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, format, colorspace, NULL, NULL); break; case DECODED_FORMAT_YUYV: assert(0);// These routines are not yet updated for ROW16u inputs ConvertYUVStripPlanarToPacked(planar_output, planar_pitch, roi, output_buffer, output_pitch, frame_width, format); break; case DECODED_FORMAT_UYVY: assert(0);// These routines are not yet updated for ROW16u inputs ConvertPlanarYUVToUYVY(planar_output, planar_pitch, roi, output_buffer, output_width, output_pitch, COLOR_FORMAT_UYVY, colorspace, inverted); break; default: // Unsupported format (output a blank frame) assert(0); break; } STOP(tk_convert); } // Convert one row of packed YUYV to the specified color void ConvertRowYUYV(uint8_t *input, uint8_t *output, int length, int format, int colorspace, int precision) { size_t row_size = 2 * length; bool inverted = false; START(tk_convert); // Determine the type of color conversion switch (format) { case DECODED_FORMAT_RGB24: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB24_INVERTED: ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB24, colorspace, precision); break; case DECODED_FORMAT_RGB32: inverted = true; // Fall through and convert to RGB (first image row displayed at the bottom) case DECODED_FORMAT_RGB32_INVERTED: ConvertYUYVRowToRGB(input, output, length, COLOR_FORMAT_RGB32, colorspace, precision); break; case DECODED_FORMAT_YUYV: if(precision == 8) memcpy(output, input, row_size); else { //need to dither to 8-bit assert(0); } break; case DECODED_FORMAT_UYVY: if(precision == 8) ConvertYUYVRowToUYVY(input, output, length, COLOR_FORMAT_UYVY); else { //need to dither to 8-bit assert(0); } break; //#if BUILD_PROSPECT case DECODED_FORMAT_V210: assert(0); // should get here with 8bit data. //ConvertYUYVRowToV210(input, output, length, COLOR_FORMAT_V210); break; case DECODED_FORMAT_YU64: assert(0); // should get here with 8bit data. //ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64); break; case DECODED_FORMAT_BYR3: case DECODED_FORMAT_BYR4: assert(0); // should get here with 8bit data. //ConvertYUYVRowToYU64(input, output, length, COLOR_FORMAT_YU64); break; //#endif default: // Unsupported format (output a blank frame) assert(0); memset(output, 0, row_size); break; } STOP(tk_convert); } #if _THREADED_DECODER IMAGE *GetWaveletThreadSafe(DECODER *decoder, TRANSFORM *transform, int index, int width, int height, int level, int type) { IMAGE *wavelet = transform->wavelet[index]; assert(decoder != NULL && transform != NULL); if (decoder != NULL && transform != NULL) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif // Lock access to the wavelet data #if _DELAYED_THREAD_START==0 Lock(&decoder->entropy_worker_new.lock); #endif // Get the wavelet from the transform data structure (thread safe) wavelet = transform->wavelet[index]; // Allocate (or reallocate) the wavelet #if _ALLOCATOR wavelet = ReallocWaveletEx(decoder->allocator, wavelet, width, height, level, type); #else wavelet = ReallocWaveletEx(wavelet, width, height, level, type); #endif // Save this wavelet in the transform data structure transform->wavelet[index] = wavelet; // Unlock access to the wavelet data #if _DELAYED_THREAD_START==0 Unlock(&decoder->entropy_worker_new.lock); #endif } return wavelet; } // Update the codec state with the information in a tag value pair CODEC_ERROR UpdateCodecState(DECODER *decoder, BITSTREAM *input, CODEC_STATE *codec, TAGWORD tag, TAGWORD value) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif bool optional = false; int chunksize = 0; bool result; // Is this an optional tag? if (tag < 0) { tag = NEG(tag); optional = true; } #if (0 && DEBUG) if (logfile) { fprintf(logfile, "UpdateCodecState tag: %d, value: %d, optional: %d\n", tag, value, optional); } #endif switch (tag) { case CODEC_TAG_ZERO: // Used internally //assert(0); // Should not occur in the bitstream error = CODEC_ERROR_INVALID_BITSTREAM; break; case CODEC_TAG_SAMPLE: // Type of sample //assert(0); if (value == SAMPLE_TYPE_CHANNEL) { result = DecodeSampleChannelHeader(decoder, input); if (!result) error = CODEC_ERROR_DECODE_SAMPLE_CHANNEL_HEADER; else error = CODEC_ERROR_OKAY; } break; case CODEC_TAG_INDEX: // Sample index table //assert(0); // Need to figure out how to return the group index { uint32_t count = (uint32_t)value; if (count <= TRANSFORM_MAX_CHANNELS) { int i; uint32_t* index = (uint32_t*)(&codec->channel_size[0]); DecodeGroupIndex(input, index, count); for (i = 0; i < (int)count; i++) { if(index[i] > (uint32_t)input->dwBlockLength) error = CODEC_ERROR_SAMPLE_INDEX; } codec->num_channels = count; } else error = CODEC_ERROR_SAMPLE_INDEX; } break; case CODEC_TAG_SUBBAND: // Has the decoder encountered a subband? if(value>=0 && value < CODEC_MAX_SUBBANDS) { // This tag is obsolete and not used in modern streams int subband = value; // Check that the subband number makes sense //assert(0 <= subband && subband <= codec->max_subband); if (! (0 <= subband && subband <= codec->max_subband)) { error = CODEC_ERROR_DECODING_SUBBAND; break; } // Decompress the subband result = DecodeSampleSubband(decoder, input, subband); if (!result) error = CODEC_ERROR_DECODING_SUBBAND; else error = CODEC_ERROR_OKAY; } else error = CODEC_ERROR_DECODING_SUBBAND; break; case CODEC_TAG_BAND_HEADER: //CODEC_TAG_BAND_DIVISOR: // Band divisor. this is last TAG before subband data so act. codec->band.divisor = value; // This tag value pair encodes the band divisor which is obsolete { // This tag value pair marks the beginning of the encoded coefficients // The subband number has already been decoded int subband = codec->band.subband; result = DecodeSampleSubband(decoder, input, subband); if (!result) error = CODEC_ERROR_DECODING_SUBBAND; else error = CODEC_ERROR_OKAY; } break; case CODEC_TAG_ENTRY: // Entry in sample index //assert(0); // Need to figure out how to return the group index error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_MARKER: // Bitstream marker { int marker = value; uint8_t *current_position; // Save the current bitstream position current_position = GetBitstreamPosition(input); current_position -= 4; // Step back to before the GetSegment i.e. the TAG if (IsLowPassHeaderMarker(marker)) { // Save the bitstream position for the start of the channel codec->channel_position = current_position; } else if (IsLowPassBandMarker(marker)) { int subband = 0; result = DecodeSampleSubband(decoder, input, subband); if (!result) error = CODEC_ERROR_DECODING_SUBBAND; else error = CODEC_ERROR_OKAY; } } break; case CODEC_TAG_VERSION_MAJOR: // Version //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_VERSION_MINOR: // Minor version number //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_VERSION_REVISION: // Revision number //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_VERSION_EDIT: // Edit number //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_SEQUENCE_FLAGS: // Video sequence flags //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_TRANSFORM_TYPE: // Type of transform //assert(TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST); if (TRANSFORM_TYPE_FIRST <= value && value <= TRANSFORM_TYPE_LAST) { int i; codec->transform_type = value; for(i=0;i<TRANSFORM_MAX_CHANNELS;i++) { TRANSFORM *transform = decoder->transform[i]; if(transform) { GetTransformPrescale(transform, codec->transform_type, codec->precision); } } } else error = CODEC_ERROR_TRANSFORM_TYPE; break; case CODEC_TAG_NUM_FRAMES: // Number of frames in the group //assert(0 <= value && value <= TRANSFORM_NUM_FRAMES); if (0 <= value && value <= TRANSFORM_NUM_FRAMES) codec->num_frames = value; else error = CODEC_ERROR_NUM_FRAMES; break; case CODEC_TAG_NUM_CHANNELS: // Number of channels in the transform //assert(value <= CODEC_MAX_CHANNELS); if (value <= CODEC_MAX_CHANNELS) codec->num_channels = value; else error = CODEC_ERROR_NUM_CHANNELS; break; case CODEC_TAG_NUM_WAVELETS: // Number of wavelets in the transform //assert(0 < value && value <= TRANSFORM_NUM_WAVELETS); if (0 < value && value <= TRANSFORM_NUM_WAVELETS) codec->num_wavelets = value; else error = CODEC_ERROR_NUM_WAVELETS; break; case CODEC_TAG_NUM_SUBBANDS: // Number of encoded subbands //assert(0 < value && value <= TRANSFORM_NUM_SUBBANDS); if (0 < value && value <= TRANSFORM_NUM_SUBBANDS) codec->num_subbands = value; else error = CODEC_ERROR_NUM_SUBBANDS; break; case CODEC_TAG_NUM_SPATIAL: // Number of spatial levels //assert(0 < value && value <= TRANSFORM_NUM_SPATIAL); if (0 < value && value <= TRANSFORM_NUM_SPATIAL) codec->num_spatial = value; else error = CODEC_ERROR_NUM_SPATIAL; break; case CODEC_TAG_FIRST_WAVELET: // Type of the first wavelet //assert(value == TRANSFORM_FIRST_WAVELET); if (value == TRANSFORM_FIRST_WAVELET) codec->first_wavelet = value; else error = CODEC_ERROR_FIRST_WAVELET; break; case CODEC_TAG_CHANNEL_SIZE: // Number of bytes in each channel //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_GROUP_TRAILER: // Group trailer and checksum codec->sample_done = true; break; case CODEC_TAG_FRAME_TYPE: // Type of frame marks the frame start codec->frame.type = value; break; case CODEC_TAG_FRAME_WIDTH: // Width of the frame if (value > 0 && value <= 32768) codec->frame.width = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_FRAME_HEIGHT: // Height of the frame if (value > 0 && value <= 32768) { codec->frame.height = value; //DAN20080729 -- Initialize the default colorspace based on clip resolution if ((decoder->frame.colorspace & COLORSPACE_MASK) == COLOR_SPACE_UNDEFINED) { int internalheight = value; int internalwidth = codec->frame.width; if (decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) { internalwidth *= 2; internalheight *= 2; } if (internalheight > 576 || internalwidth > 720) decoder->frame.colorspace |= COLOR_SPACE_CG_709; else decoder->frame.colorspace |= COLOR_SPACE_CG_601; } //if(decoder->frame.colorspace_filedefault) // decoder->frame.colorspace = decoder->frame.colorspace_filedefault; if (decoder->frame.colorspace_override) decoder->frame.colorspace = decoder->frame.colorspace_override; } else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_ENCODED_COLORSPACE: //DAN20080729 if(decoder->codec.encoded_format == ENCODED_FORMAT_BAYER) value &= ~(COLOR_SPACE_BT_601|COLOR_SPACE_BT_709); // Bayer has no 601 vs 709, //there was a bug in 3.9.4 that had bayer flagged as 601. if(decoder->frame.colorspace_override) decoder->frame.colorspace = decoder->frame.colorspace_override; else { if(decoder->codec.encoded_format == ENCODED_FORMAT_YUV_422) { decoder->frame.colorspace &= ~(COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709); decoder->frame.colorspace |= (value & (COLOR_SPACE_BT_601 | COLOR_SPACE_BT_709)); //Let the VSRGB status be controllable by the calling application (e.g. Vegas) } else { decoder->frame.colorspace &= ~(COLOR_SPACE_VS_RGB); decoder->frame.colorspace |= (value & (COLOR_SPACE_VS_RGB)); } } decoder->frame.colorspace_filedefault = value; break; case CODEC_TAG_FRAME_FORMAT: // Format of the encoded pixels (GRAY, YUV, RGB, RGBA) //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_INPUT_FORMAT: // Format of the original pixels codec->input_format = value; // Set the encoded format if it has not already been set // error = UpdateEncodedFormat(codec, (COLOR_FORMAT)value); break; case CODEC_TAG_ENCODED_FORMAT: // Internal format of the encoded data case CODEC_TAG_OLD_ENCODED_FORMAT: if (value >= ENCODED_FORMAT_MINIMUM && value <= ENCODED_FORMAT_MAXIMUM) { codec->encoded_format = value; if (codec->encoded_format == ENCODED_FORMAT_RGBA_4444 && codec->num_channels == 3) codec->encoded_format = ENCODED_FORMAT_RGB_444; } else error = CODEC_ERROR_BADFORMAT; break; case CODEC_TAG_FRAME_INDEX: // Position of frame within the group codec->frame.group_index = value; break; case CODEC_TAG_FRAME_TRAILER: // Frame trailer and checksum codec->sample_done = true; break; case CODEC_TAG_LOWPASS_SUBBAND: // Subband number of the lowpass band codec->lowpass.subband = value; error = SetDefaultEncodedFormat(codec); break; case CODEC_TAG_NUM_LEVELS: // Number of wavelet levels if(value > 0 && value <= 4) codec->lowpass.level = value; else error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_LOWPASS_WIDTH: // Width of the lowpass band if(value > 0 && value < codec->frame.width/4) codec->lowpass.width = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_LOWPASS_HEIGHT: // Height of the lowpass band if (value > 0 && value < codec->frame.height/4) codec->lowpass.height = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_MARGIN_TOP: // Margins that define the encoded subset codec->lowpass.margin.top = value; break; case CODEC_TAG_MARGIN_BOTTOM: codec->lowpass.margin.bottom = value; break; case CODEC_TAG_MARGIN_LEFT: codec->lowpass.margin.left = value; break; case CODEC_TAG_MARGIN_RIGHT: codec->lowpass.margin.right = value; break; case CODEC_TAG_PIXEL_OFFSET: // Quantization parameters codec->lowpass.pixel_offset = value; break; case CODEC_TAG_QUANTIZATION: // Quantization divisor used during encoding if(value > 0) codec->lowpass.quantization = value; else error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_PIXEL_DEPTH: // Number of bits per pixel if(value >=8 && value <= 16) codec->lowpass.bits_per_pixel = value; else error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_LOWPASS_TRAILER: // Lowpass trailer //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_WAVELET_TYPE: // Type of wavelet if(value >= 1 && value <= WAVELET_TYPE_HIGHEST) codec->highpass.wavelet_type = value; else error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_WAVELET_NUMBER: // Number of the wavelet in the transform if (value >= 0 && value <= 6) codec->highpass.wavelet_number = value; else error = CODEC_ERROR_NUM_WAVELETS; break; case CODEC_TAG_WAVELET_LEVEL: // Level of the wavelet in the transform if (value >= 0 && value <= 4) codec->highpass.wavelet_level = value; else error = CODEC_ERROR_NUM_WAVELETS; break; case CODEC_TAG_NUM_BANDS: // Number of wavelet bands if (value >= 0 && value <= 4) codec->highpass.num_bands = value; else error = CODEC_ERROR_NUM_SUBBANDS; break; case CODEC_TAG_HIGHPASS_WIDTH: // Width of each highpass band if (value > 0 && value <= codec->frame.width / 2) codec->highpass.width = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_HIGHPASS_HEIGHT: // Height of each highpass band if (value > 0 && value <= codec->frame.height / 2) codec->highpass.height = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_LOWPASS_BORDER: // Dimensions of lowpass border (obsolete) codec->highpass.lowpass_border = value; break; case CODEC_TAG_HIGHPASS_BORDER: // Dimensions of highpass border (obsolete) codec->highpass.highpass_border = value; break; case CODEC_TAG_LOWPASS_SCALE: // Scale factor for lowpass band codec->highpass.lowpass_scale = value; break; case CODEC_TAG_LOWPASS_DIVISOR: // Divisor for the lowpass band codec->highpass.lowpass_divisor = value; break; case CODEC_TAG_HIGHPASS_TRAILER: // Highpass trailer //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_BAND_NUMBER: // Identifying number of a wavelet band if (value < IMAGE_NUM_BANDS) codec->band.number = value; else error = CODEC_ERROR_BAND_NUMBER; break; case CODEC_TAG_BAND_WIDTH: // Band data width if (value > 0 && (codec->frame.width / value) <= 16 && (codec->frame.width / value) * value == codec->frame.width) // true for a 3 level wavelet (with 4:2:2 sampling) codec->band.width = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_BAND_HEIGHT: // Band data height if (value > 0 && (codec->frame.height / value) <= 16 && (codec->frame.height / value) * value == codec->frame.height) // true for a 3 level wavelet (with 4:2:2 sampling) codec->band.height = value; else error = CODEC_ERROR_RESOLUTION; break; case CODEC_TAG_BAND_SUBBAND: // Subband number of this wavelet band if (value == 0xff || (value >= 0 && value < CODEC_MAX_SUBBANDS)) codec->band.subband = value; else error = CODEC_ERROR_BAND_NUMBER; break; case CODEC_TAG_BAND_ENCODING: // Encoding method for this band if(value >= BAND_ENCODING_ZEROTREE && value <= BAND_ENCODING_LOSSLESS) codec->band.encoding = value; else error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_BAND_QUANTIZATION: // Quantization applied to band if (value >= 1) codec->band.quantization = value; else error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_BAND_SCALE: // Band scale factor codec->band.scale = value; break; case CODEC_TAG_BAND_TRAILER: // Band trailer //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_NUM_ZEROVALUES: // Number of zero values //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_NUM_ZEROTREES: // Number of zerotrees //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_NUM_POSITIVES: // Number of positive values //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_NUM_NEGATIVES: // Number of negative values //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_NUM_ZERONODES: // Number of zerotree nodes //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_CHANNEL: // Channel number //assert(0); error = CODEC_ERROR_BAD_FRAME; break; case CODEC_TAG_INTERLACED_FLAGS: // Interlaced structure of the video stream //assert(0); break; case CODEC_TAG_PROTECTION_FLAGS: // Copy protection bits //assert(0); break; case CODEC_TAG_PICTURE_ASPECT_X: // Numerator of the picture aspect ratio codec->picture_aspect_x = value; //assert(0); break; case CODEC_TAG_PICTURE_ASPECT_Y: // Denominator of the picture aspect ratio codec->picture_aspect_y = value; //assert(0); break; case CODEC_TAG_SAMPLE_FLAGS: // Flag bits that control sample decoding // Progressive versus interlaced decoding is specified by the sample flags error = UpdateCodecFlags(codec, value); break; case CODEC_TAG_FRAME_NUMBER: // Sequence number of the frame in the bitstream codec->frame_number = value; break; // This TAG is now support as part of the universal decoder. // Only Prospect HD builds can decode 10bit. case CODEC_TAG_PRECISION: // Number of bits in the video source if (value == CODEC_PRECISION_8BIT || value == CODEC_PRECISION_10BIT || value == CODEC_PRECISION_12BIT) { codec->precision = value; { int i; for (i = 0; i < TRANSFORM_MAX_CHANNELS; i++) { TRANSFORM* transform = decoder->transform[i]; if (transform) { GetTransformPrescale(transform, codec->transform_type, codec->precision); } } } } else error = CODEC_ERROR_INVALID_PRECICION; break; case CODEC_TAG_PRESCALE_TABLE: { int i; int prescale[TRANSFORM_MAX_WAVELETS] = {0}; for(i=0;i<TRANSFORM_MAX_WAVELETS;i++) prescale[i] = value >> (14-i*2) & 0x3; for(i=0;i<TRANSFORM_MAX_CHANNELS;i++) { TRANSFORM *transform = decoder->transform[i]; if(transform) { memcpy(transform->prescale, prescale, sizeof(prescale)); } } } break; case CODEC_TAG_VERSION: // Version number of the encoder used in each GOP. codec->version[0] = (value>>12) & 0xf; codec->version[1] = (value>>8) & 0xf; codec->version[2] = value & 0xff; break; case CODEC_TAG_QUALITY_L: // codec->encode_quality &= 0xffff0000; codec->encode_quality |= value; break; case CODEC_TAG_QUALITY_H: // codec->encode_quality &= 0xffff; codec->encode_quality |= value<<16; break; case CODEC_TAG_BAND_CODING_FLAGS: codec->active_codebook = value & 0xf; // 0-15 valid code books if(codec->active_codebook > CODEC_NUM_CODESETS) error = CODEC_ERROR_BAD_FRAME; codec->difference_coding = (value>>4) & 1; break; // Peak table processing case CODEC_TAG_PEAK_TABLE_OFFSET_L: codec->peak_table.offset &= ~0xffff; codec->peak_table.offset |= (value & 0xffff); codec->peak_table.base = (PIXEL *)(input->lpCurrentWord); codec->peak_table.level = 0; // reset for the next subband break; case CODEC_TAG_PEAK_TABLE_OFFSET_H: codec->peak_table.offset &= 0xffff; codec->peak_table.offset |= (value & 0xffff)<<16; codec->peak_table.level = 0; // reset for the next subband break; case CODEC_TAG_PEAK_LEVEL: codec->peak_table.level = value; codec->peak_table.base += codec->peak_table.offset / sizeof(PIXEL); break; case CODEC_TAG_PEAK_TABLE: //this is the chunk header, so we have peak data codec->peak_table.level = 0; // reset for the next subband //Just skip as the data was read ahead chunksize = value; chunksize &= 0xffff; input->lpCurrentWord += chunksize*4; input->nWordsUsed -= chunksize*4; break; #if (1 && DEBUG) case CODEC_TAG_SAMPLE_END: // Marks the end of the sample (for debugging only) //assert(0); error = CODEC_ERROR_BAD_FRAME; break; #endif default: // Unknown tag if(tag & 0x4000) { if(tag & 0x2000) // i.e. 0x6xxx = 24bit size. { chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); } else // 16bit size { chunksize = value; chunksize &= 0xffff; } } else if(tag & 0x2000) //24bit LONGs chunk size { optional = true; // Fixes a weird seneraio where the size fields in SizeTagPop() has not // updated the size and turned the tag to optional. TODO : WHY chunksize = 0; // not not skip // chunksize = value + ((tag & 0xff)<<16); // do not skip an unknown but optional chunk // These are only use to size subbands, but the data within should not be skipped // unless if((tag & 0xff00) == CODEC_TAG_UNCOMPRESS) { optional = true; chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); decoder->uncompressed_chunk = (uint32_t *)input->lpCurrentWord; decoder->uncompressed_size = chunksize*4; decoder->sample_uncompressed = 1; } } //assert(optional); if(!optional) { error = CODEC_ERROR_UNKNOWN_REQUIRED_TAG; } else if(chunksize > 0) // skip this option chunk { input->lpCurrentWord += chunksize*4; input->nWordsUsed -= chunksize*4; } break; } return error; } void UpdateWaveletBandValidFlags(DECODER *decoder, IMAGE *wavelet, int band) { assert(decoder != NULL); assert(wavelet != NULL); if (decoder != NULL && wavelet != NULL) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif #if _THREADED_DECODER // Lock access to the wavelet data if(decoder->entropy_worker_new.pool.thread_count) Lock(&decoder->entropy_worker_new.lock); #endif #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Changing band valid flags: 0x%04X, mask: 0x%04X\n", wavelet->band_valid_flags, BAND_VALID_MASK(band)); } #endif // Update the wavelet band flags wavelet->band_valid_flags |= BAND_VALID_MASK(band); wavelet->band_started_flags |= BAND_VALID_MASK(band); #if _THREADED_DECODER // Unlock access to the wavelet data if(decoder->entropy_worker_new.pool.thread_count) Unlock(&decoder->entropy_worker_new.lock); #endif } } void UpdateWaveletBandStartedFlags(DECODER *decoder, IMAGE *wavelet, int band) { assert(decoder != NULL); assert(wavelet != NULL); if (decoder != NULL && wavelet != NULL) { // Update the wavelet band flags #if _DELAYED_THREAD_START==0 if(decoder->entropy_worker_new.pool.thread_count) Lock(&decoder->entropy_worker_new.lock); #endif wavelet->band_started_flags |= BAND_VALID_MASK(band); #if _DELAYED_THREAD_START==0 if(decoder->entropy_worker_new.pool.thread_count) Unlock(&decoder->entropy_worker_new.lock); #endif } } bool DecodedBandsValid(IMAGE *wavelet, int index, int transform_type) { uint32_t threaded_band_mask; uint32_t wavelet_band_mask; uint32_t decoded_band_mask; bool decoded_bands_valid; // Has this wavelet been created? if (wavelet == NULL) { // Too soon to wait for the wavelet bands to be decoded return false; } // Is this a fieldplus transform? if (transform_type == TRANSFORM_TYPE_FIELDPLUS) { // Is this the temporal wavelet? if (index == 2) { assert(wavelet->wavelet_type == WAVELET_TYPE_TEMPORAL); assert(wavelet->num_bands == 2); // Earlier transforms in the queue will compute both wavelet bands return true; } // Is this wavelet at the end of a chain of transforms? if (index == 3 || index == 5) { // Must wait for all bands to be decoded threaded_band_mask = 0; } else { // The lowpass band will be computed by transforms earlier in the queue threaded_band_mask = BAND_VALID_MASK(0); } } // Is this a spatial transform? else if (transform_type == TRANSFORM_TYPE_SPATIAL) { // Is this wavelet at the top of the pyramid? if (index == 2) { // Must wait for all bands to be decoded threaded_band_mask = 0; } #if 0 // Is this wavelet at the bottom of the pyramid? else if (index == 0) { // Must wait for all bands to be decoded threaded_band_mask = 0; } #endif else { // The lowpass band will be computed by transforms earlier in the queue threaded_band_mask = BAND_VALID_MASK(0); } } else { // Unknown type of transform assert(0); // Assume that the bands are not valid return false; } // Compute the mask for the bands in this wavelet decoded_band_mask = ((1 << wavelet->num_bands) - 1); // Clear the bit for the band computed by the threaded transform decoded_band_mask &= ~threaded_band_mask; // Compute the wavelet bands that have been decoded wavelet_band_mask = (wavelet->band_valid_flags & decoded_band_mask); // Have all of the bands not computed by the transform thread been decoded? decoded_bands_valid = (wavelet_band_mask == decoded_band_mask); return decoded_bands_valid; } void QueueThreadedTransform(DECODER *decoder, int channel, int index) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; TRANSFORM *transform = decoder->transform[channel]; //IMAGE *wavelet = transform->wavelet[index]; int precision = codec->precision; // The transform data structure must exist assert(transform != NULL); // The transform thread variables should have been created { int free_entry; #if _DELAYED_THREAD_START==0 // Lock access to the transform queue Lock(&decoder->entropy_worker_new.lock); #endif // Copy the transform parameters into the next queue entry free_entry = decoder->transform_queue.free_entry; assert(0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH); if (0 <= free_entry && free_entry < DECODING_QUEUE_LENGTH) { assert(transform != NULL); assert(0 <= channel && channel < TRANSFORM_MAX_CHANNELS); assert(0 <= index && index < TRANSFORM_MAX_WAVELETS); // Note: The wavelet may not exist when the transform is queued decoder->transform_queue.queue[free_entry].transform = transform; decoder->transform_queue.queue[free_entry].channel = channel; decoder->transform_queue.queue[free_entry].index = index; decoder->transform_queue.queue[free_entry].precision = precision; decoder->transform_queue.queue[free_entry].done = 0; // Update the transform request queue decoder->transform_queue.free_entry++; decoder->transform_queue.num_entries++; #if (1 && DEBUG) if (logfile) { fprintf(logfile, "Queued transform, channel: %d, index: %d\n", channel, index); } #endif } #if _DELAYED_THREAD_START==0 Unlock(&decoder->entropy_worker_new.lock); #endif } } #if _THREADED_DECODER void WaitForTransformThread(DECODER *decoder) { if(decoder->entropy_worker_new.pool.thread_count) { #if _DELAYED_THREAD_START ThreadPoolSendMessage(&decoder->entropy_worker_new.pool, THREAD_MESSAGE_START); #endif ThreadPoolWaitAllDone(&decoder->entropy_worker_new.pool); decoder->transform_queue.started = 0; decoder->transform_queue.num_entries = 0; decoder->transform_queue.next_entry = 0; decoder->transform_queue.free_entry = 0; } } #endif #endif #if _INTERLACED_WORKER_THREADS void TransformInverseFrameThreadedToYUV(DECODER *decoder, int frame_index, int num_channels, uint8_t *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision) { int32_t lPreviousCount,i; // There are half as many input rows as output rows int transform_height = (((info->height+7)/8)*8) / 2; int middle_row_count = transform_height; // Post a message to the mailbox struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data; mailbox->type = THREAD_TRANSFORM_FRAME_YUV; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; // Set the semaphore to the number of rows decoder->interlaced_worker.current_row = 0; ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount); assert(lPreviousCount == 0); // Wake up both worker threads for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { SetEvent(decoder->interlaced_worker.start_event[i]); } // Wait for both worker threads to finish WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE); } void TransformInverseFrameThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels, PIXEL16U *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision) { int32_t lPreviousCount,i; // There are half as many input rows as output rows int transform_height = (((info->height+7)/8)*8) / 2; int middle_row_count = transform_height; // Post a message to the mailbox struct interlace_data *mailbox = &decoder->interlaced_worker.interlace_data; mailbox->type = THREAD_TRANSFORM_FRAME_ROW16U; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = (uint8_t *)output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; // Set the semaphore to the number of rows decoder->interlaced_worker.current_row = 0; ReleaseSemaphore(decoder->interlaced_worker.row_semaphore, middle_row_count, &lPreviousCount); assert(lPreviousCount == 0); // Wake up both worker threads for(i=0; i<THREADS_IN_LAST_WAVELET; i++) { SetEvent(decoder->interlaced_worker.start_event[i]); } // Wait for both worker threads to finish WaitForMultipleObjects(THREADS_IN_LAST_WAVELET, decoder->interlaced_worker.done_event, true, INFINITE); } DWORD WINAPI InterlacedWorkerThreadProc(LPVOID lpParam) { DECODER *decoder = (DECODER *)lpParam; FILE *logfile = decoder->logfile; struct interlace_data *data = &decoder->interlaced_worker.interlace_data; int thread_index; HANDLE hObjects[2]; DWORD dwReturnValue; if(decoder->thread_cntrl.affinity) { HANDLE hCurrentThread = GetCurrentThread(); SetThreadAffinityMask(hCurrentThread,decoder->thread_cntrl.affinity); } // Set the handler for system exceptions #ifdef _WIN32 SetDefaultExceptionHandler(); #endif // Determine the index of this worker thread if(decoder->interlaced_worker.lock_init) { EnterCriticalSection(&decoder->interlaced_worker.lock); } thread_index = decoder->interlaced_worker.thread_count++; if(decoder->interlaced_worker.lock_init) LeaveCriticalSection(&decoder->interlaced_worker.lock); // The transform worker variables should have been created assert(decoder->interlaced_worker.start_event[thread_index] != NULL); assert(decoder->interlaced_worker.row_semaphore != NULL); assert(decoder->interlaced_worker.done_event[thread_index] != NULL); assert(decoder->interlaced_worker.stop_event != NULL); if (!(decoder->interlaced_worker.start_event[thread_index] != NULL && decoder->interlaced_worker.row_semaphore != NULL && decoder->interlaced_worker.done_event[thread_index] != NULL && decoder->interlaced_worker.stop_event != NULL)) { return 1; } hObjects[0] = decoder->interlaced_worker.start_event[thread_index]; hObjects[1] = decoder->interlaced_worker.stop_event; for (;;) { // Wait for the signal to begin processing a transform dwReturnValue = WaitForMultipleObjects(2, hObjects, false, INFINITE); // Received a signal to begin inverse transform processing? if (dwReturnValue == WAIT_OBJECT_0) { int type; // Type of inverse transform to perform int frame_index; // Index of output frame to produce int num_channels; // Number of channels in the transform array uint8_t *output; // Output frame buffer int pitch; // Output frame pitch FRAME_INFO info; // Format of the output frame int chroma_offset; // Offset for the output chroma int precision; // Source pixel bit depth // Lock access to the transform data if(decoder->interlaced_worker.lock_init) { EnterCriticalSection(&decoder->interlaced_worker.lock); } // Get the processing parameters type = data->type; frame_index = data->frame; num_channels = data->num_channels; output = data->output; pitch = data->pitch; memcpy(&info, &data->info, sizeof(FRAME_INFO)); chroma_offset = data->chroma_offset; precision = data->precision; // Unlock access to the transform data if(decoder->interlaced_worker.lock_init) LeaveCriticalSection(&decoder->interlaced_worker.lock); // Select the type of inverse transform to perform switch (type) { case THREAD_TRANSFORM_FRAME_YUV: //TODO: more to new _THREADED model TransformInverseFrameSectionToYUV(decoder, thread_index, frame_index, num_channels, output, pitch, &info, chroma_offset, precision); break; case THREAD_TRANSFORM_FRAME_ROW16U: //TODO: more to new _THREADED model TransformInverseFrameSectionToRow16u(decoder, thread_index, frame_index, num_channels, (PIXEL16U *)output, pitch, &info, chroma_offset, precision); break; default: assert(0); break; } // Signal that this thread is done SetEvent(decoder->interlaced_worker.done_event[thread_index]); } else { // Should have a condition that causes the thread to terminate assert(dwReturnValue == WAIT_OBJECT_0+1 || dwReturnValue == WAIT_ABANDONED); break; } } return 0; } #endif void GetDecodedFrameDimensions(TRANSFORM **transform_array, int num_channels, int frame_index, int resolution, int *decoded_width_out, int *decoded_height_out) { IMAGE *wavelet = NULL; int decoded_scale = 0; int wavelet_width; int wavelet_height; int decoded_width; int decoded_height; // Get the decoding scale switch(resolution) { case DECODED_RESOLUTION_FULL_DEBAYER: case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: #if DEBUG assert(AllTransformBandsValid(transform_array, num_channels, frame_index)); #endif decoded_scale = 2; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_FULL: #if DEBUG assert(AllTransformBandsValid(transform_array, num_channels, frame_index)); #endif decoded_scale = 2; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_HALF_NODEBAYER: case DECODED_RESOLUTION_HALF: #if DEBUG assert(AllLowpassBandsValid(transform_array, num_channels, frame_index)); #endif decoded_scale = 1; wavelet = transform_array[0]->wavelet[0]; break; case DECODED_RESOLUTION_QUARTER: decoded_scale = 1; wavelet = transform_array[0]->wavelet[3]; break; case DECODED_RESOLUTION_LOWPASS_ONLY: decoded_scale = 1; wavelet = transform_array[0]->wavelet[5]; // Is this an intra frame? if (wavelet == NULL) { wavelet = transform_array[0]->wavelet[2]; } break; default: assert(0); break; } // Compute the decoded frame dimensions assert(wavelet != NULL); wavelet_width = wavelet->width; wavelet_height = wavelet->height; decoded_width = decoded_scale * wavelet_width; decoded_height = decoded_scale * wavelet_height; if (decoded_width_out) { *decoded_width_out = decoded_width; } if (decoded_height_out) { *decoded_height_out = decoded_height; } } // Reconstruct Bayer format to the requested output format CODEC_ERROR UncompressedSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int precision = codec->precision; int format = info->format; int width = info->width; int height = info->height; //int resolution = info->resolution; // Compute the number of bytes between each row of Bayer data //int bayer_pitch = 2 * width * sizeof(PIXEL16U); // Compute the pitch between pairs of rows of bayer data (one pair per image row) //int raw_bayer_pitch = 2 * bayer_pitch; //int chroma_offset = decoder->codec.chroma_offset; error = CODEC_ERROR_UNSUPPORTED_FORMAT; switch (format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here. case DECODED_FORMAT_RG64: //DAN20101207 added not sure why they weren't here. case DECODED_FORMAT_WP13: //DAN20090120 "" case DECODED_FORMAT_W13A: //DAN20101207 "" case DECODED_FORMAT_B64A: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_YR16: case DECODED_FORMAT_V210: case DECODED_FORMAT_YU64: case DECODED_FORMAT_YUYV: //? case DECODED_FORMAT_UYVY: //? case DECODED_FORMAT_R408: case DECODED_FORMAT_V408: error = CODEC_ERROR_OKAY; break; case DECODED_FORMAT_BYR2: case DECODED_FORMAT_BYR4: { //bool linearRestore = false; unsigned short *curve = NULL; if(decoder->BYR4LinearRestore && decoder->frame.format == DECODED_FORMAT_BYR4 && decoder->cfhddata.encode_curve_preset == 0) { curve = decoder->BYR4LinearRestore; } ConvertPackedToBYR2(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch, curve); } decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; break; case DECODED_FORMAT_BYR3: ConvertPackedToBYR3(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, output_buffer, output_pitch); decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; break; } if(error) return error; //int row; //int column; // Need to allocate a scratch buffer for decoding the Bayer frame? if (decoder->RawBayer16 == NULL) { // Four Bayer data samples at each 2x2 quad in the grid int pixel_size = 4 * sizeof(PIXEL16U); int frame_size; const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif frame_size = width * height * pixel_size; #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = frame_size; if(decoder->RGBFilterBuffer16 == NULL) { int size = frame_size*3; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) size = frame_size*4; #if _ALLOCATOR decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16); #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size*3; } } // Using the RGBFilterBuffer16 as scratch space ConvertPackedToRawBayer16(width, height, decoder->uncompressed_chunk, decoder->uncompressed_size, decoder->RawBayer16, decoder->RGBFilterBuffer16, info->resolution); decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; #if _THREADED //DemosaicRAW { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int inverted = false; uint8_t *output = output_buffer; int pitch = output_pitch; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; inverted = true; } // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) height *= 2; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else error = CODEC_ERROR_UNSUPPORTED_FORMAT; #endif return error; } // Reconstruct uncompressed v210 YUV format to the requested output format CODEC_ERROR UncompressedSampleFrameYUVToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int precision = codec->precision; int format = info->format; int width = info->width; int height = info->height; int resolution = info->resolution; // Compute the number of bytes between each row of Bayer data //int bayer_pitch = 2 * width * sizeof(PIXEL16U); // Compute the pitch between pairs of rows of bayer data (one pair per image row) //int raw_bayer_pitch = 2 * bayer_pitch; //int chroma_offset = decoder->codec.chroma_offset; error = CODEC_ERROR_UNSUPPORTED_FORMAT; if(format == DECODED_FORMAT_V210 && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false) { int smallest_Stride = output_pitch; int unc_Stride = decoder->uncompressed_size / height; if(unc_Stride < smallest_Stride) smallest_Stride = unc_Stride; if(unc_Stride == output_pitch) memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size); else { int y; uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; uint8_t *dst = (uint8_t *)output_buffer; for(y=0; y<height; y++) { memcpy(dst, src, smallest_Stride); src += unc_Stride; dst += output_pitch; } } decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; } if((format == DECODED_FORMAT_YUYV || format == DECODED_FORMAT_UYVY) && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false) { int smallest_Stride = output_pitch; int unc_Stride = decoder->uncompressed_size / height; if(unc_Stride < smallest_Stride) smallest_Stride = unc_Stride; { int y; uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; uint8_t *dst = (uint8_t *)output_buffer; for(y=0; y<height; y++) { uint32_t *input_ptr = (uint32_t *)src; int pos = 0; int column=0,length = width; length -= length % 6; //DAN03252004 -- fix a memory overflow. for (column=0; column < length; column += 6) { uint32_t yuv; int y; int u; int v; // Read the first word yuv = *(input_ptr++); u = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK; y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK; v = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK; // Expand the pixels to sixteen bits u <<= 6; y <<= 6; v <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(u)>>8; // Read the second word yuv = *(input_ptr++); y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(v)>>8; u = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK; y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK; u <<= 6; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(u)>>8; // Read the third word yuv = *(input_ptr++); v = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK; y = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK; v <<= 6; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(v)>>8; u = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK; u <<= 6; // Read the fourth word yuv = *(input_ptr++); y = (yuv >> V210_VALUE1_SHIFT) & V210_VALUE_MASK; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(u)>>8; v = (yuv >> V210_VALUE2_SHIFT) & V210_VALUE_MASK; y = (yuv >> V210_VALUE3_SHIFT) & V210_VALUE_MASK; v <<= 6; y <<= 6; dst[pos++] = SATURATE_16U(y)>>8; dst[pos++] = SATURATE_16U(v)>>8; } if(format == DECODED_FORMAT_UYVY) { for (column=0; column < pos; column += 2) { int t = dst[column]; dst[column] = dst[column+1]; dst[column+1] = t; } } src += unc_Stride; dst += output_pitch; } } decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; } { // Expand YUV at the target resolution, and use the ActiveMetadata engine. // Need to allocate a scratch buffer for decoding the frame? if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer { //int pixel_size = 2 * sizeof(PIXEL16U); const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif int orig_width = width; if(resolution == DECODED_RESOLUTION_HALF) orig_width *= 2; if(resolution == DECODED_RESOLUTION_QUARTER) orig_width *= 4; if(decoder->RawBayer16) { #if _ALLOCATOR FreeAligned(allocator, decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = 0; #else MEMORY_ALIGNED_FREE(decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = 0; #endif } #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = orig_width * 64; } } // unpack source original YUV into YU64? if(decoder->RawBayer16) { //uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; //uint8_t *dst = (uint8_t *)output_buffer; #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output_buffer; mailbox->pitch = output_pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else { int orig_width = width; int orig_height = height; int row,lines = 1; int start,end; if(resolution == DECODED_RESOLUTION_HALF) { orig_width *= 2; orig_height *= 2; lines = 2; } if(resolution == DECODED_RESOLUTION_QUARTER) { orig_width *= 4; orig_height *= 4; lines = 4; } start = 0; end = height; if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) { start = height-1; end = -1; } for (row = start; row != end; end > start ? row++ : row--) { int whitebitdepth = 16; int flags = 0; uint8_t *planar_output[3]; int planar_pitch[3]; ROI roi; PIXEL16U *y_row_ptr; PIXEL16U *u_row_ptr; PIXEL16U *v_row_ptr; PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16; PIXEL16U *scanline2 = scanline + orig_width * 8; unsigned short *sptr; int i,unc_Stride = decoder->uncompressed_size / orig_height; y_row_ptr = (PIXEL16U *)scanline; u_row_ptr = y_row_ptr + orig_width; v_row_ptr = u_row_ptr + orig_width/2; for(i=0; i<lines; i++) { src = (uint8_t *)decoder->uncompressed_chunk; src += row * unc_Stride; // Repack the row of 10-bit pixels into 16-bit pixels ConvertV210RowToYUV16((uint8_t *)src, y_row_ptr, u_row_ptr, v_row_ptr, orig_width, scanline2); // Advance to the next rows in the input and output images y_row_ptr += orig_width*2; u_row_ptr = y_row_ptr + orig_width; v_row_ptr = u_row_ptr + orig_width/2; } y_row_ptr = (PIXEL16U *)scanline; u_row_ptr = y_row_ptr + width; v_row_ptr = u_row_ptr + width/2; if(lines == 2) { for(i=0; i<width*2;i++) y_row_ptr[i] = (y_row_ptr[i*2] + y_row_ptr[i*2+1] + y_row_ptr[orig_width*2+i*2] + y_row_ptr[orig_width*2+i*2+1]) >> 2; } else if(lines == 4) { for(i=0; i<width*2;i++) y_row_ptr[i] = (y_row_ptr[i*4] + y_row_ptr[i*4+2] + y_row_ptr[orig_width*2*2+i*4] + y_row_ptr[orig_width*2*2+i*4+2]) >> 2; } roi.width = width; roi.height = 1; planar_output[0] = (uint8_t *)y_row_ptr; planar_output[1] = (uint8_t *)v_row_ptr; planar_output[2] = (uint8_t *)u_row_ptr; planar_pitch[0] = 0; planar_pitch[1] = 0; planar_pitch[2] = 0; if(decoder->apply_color_active_metadata) { ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, (unsigned char *)scanline2, width, output_pitch, COLOR_FORMAT_RGB_8PIXEL_PLANAR, decoder->frame.colorspace, &whitebitdepth, &flags); sptr = scanline2; sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline2, scanline, info->format, &whitebitdepth, &flags); } else { ConvertYUVRow16uToBGRA64(planar_output, planar_pitch, roi, (unsigned char *)scanline2, width, output_pitch, COLOR_FORMAT_WP13, decoder->frame.colorspace, &whitebitdepth, &flags); sptr = scanline2; } ConvertLinesToOutput(decoder, width, 1, row, sptr, dst, output_pitch, format, whitebitdepth, flags); dst += output_pitch; } } #endif } error = CODEC_ERROR_OKAY; return error; } // Reconstruct uncompressed DPX0 RGB format to the requested output format CODEC_ERROR UncompressedSampleFrameRGBToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int precision = codec->precision; int format = info->format; //int output_format = info->output_format; // used by image_dev_only decodes int width = info->width; int height = info->height; int resolution = info->resolution; //int chroma_offset = decoder->codec.chroma_offset; error = CODEC_ERROR_UNSUPPORTED_FORMAT; if( (format == DECODED_FORMAT_DPX0 || format == DECODED_FORMAT_AR10 || format == DECODED_FORMAT_AB10 || format == DECODED_FORMAT_RG30 || format == DECODED_FORMAT_R210) && resolution == DECODED_RESOLUTION_FULL && decoder->use_active_metadata_decoder == false) { int smallest_Stride = output_pitch; int unc_Stride = decoder->uncompressed_size / height; if(unc_Stride < smallest_Stride) smallest_Stride = unc_Stride; if(format != DECODED_FORMAT_DPX0) { int unc_Stride = decoder->uncompressed_size / height; ConvertDPX0ToRGB10((uint8_t *)decoder->uncompressed_chunk, unc_Stride, width, height, format); } if(unc_Stride == output_pitch) memcpy(output_buffer, decoder->uncompressed_chunk, decoder->uncompressed_size); else { int y; uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; uint8_t *dst = (uint8_t *)output_buffer; for(y=0; y<height; y++) { memcpy(dst, src, smallest_Stride); src += unc_Stride; dst += output_pitch; } } decoder->uncompressed_chunk = 0; decoder->uncompressed_size = 0; return CODEC_ERROR_OKAY; } { // Expand YUV at the target resolution, and use the ActiveMetadata engine. // Need to allocate a scratch buffer for decoding the frame? if (decoder->RawBayer16 == NULL || decoder->RawBayerSize < width * 64) //RawBayer used as a scratch buffer { //int pixel_size = 2 * sizeof(PIXEL16U); const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif int orig_width = width; if(resolution == DECODED_RESOLUTION_HALF) orig_width *= 2; if(resolution == DECODED_RESOLUTION_QUARTER) orig_width *= 4; if(decoder->RawBayer16) { #if _ALLOCATOR FreeAligned(allocator, decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = 0; #else MEMORY_ALIGNED_FREE(decoder->RawBayer16); decoder->RawBayer16 = NULL; decoder->RawBayerSize = 0; #endif } #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, orig_width * 64, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(orig_width * 64, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = orig_width * 64; } } // unpack source original YUV into YU64? if(decoder->RawBayer16) { //uint8_t *src = (uint8_t *)decoder->uncompressed_chunk; //uint8_t *dst = (uint8_t *)output_buffer; #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output_buffer; mailbox->pitch = output_pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT_UNCOMPRESSED; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else { int orig_width = width; int orig_height = height; int row,lines = 1; int start,end; if(resolution == DECODED_RESOLUTION_HALF) { orig_width *= 2; orig_height *= 2; lines = 2; } if(resolution == DECODED_RESOLUTION_QUARTER) { orig_width *= 4; orig_height *= 4; lines = 4; } start = 0; end = height; if(format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) // Can this work, all the code below expects 10-bit { start = height-1; end = -1; } for (row = start; row != end; end > start ? row++ : row--) { int whitebitdepth = 16; int flags = 0; uint8_t *planar_output[3]; int planar_pitch[3]; ROI roi; PIXEL16U *y_row_ptr; PIXEL16U *u_row_ptr; PIXEL16U *v_row_ptr; PIXEL16U *scanline = (PIXEL16U *)decoder->RawBayer16; PIXEL16U *scanline2 = scanline + orig_width * 8; unsigned short *sptr; int i,unc_Stride = decoder->uncompressed_size / orig_height; whitebitdepth = 13; if(decoder->apply_color_active_metadata) flags = ACTIVEMETADATA_SRC_8PIXEL_PLANAR; else flags = 0; roi.width = width; roi.height = 1; if(lines == 1) { uint16_t *sptr; uint32_t j,*lptr = (uint32_t *)decoder->uncompressed_chunk; PIXEL16U *ptr = (PIXEL16U *)scanline; lptr += row * (unc_Stride>>2); sptr = (uint16_t *)lptr; for(i=0; i<width;i+=8) { int val,r,g,b; if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR) { if(decoder->image_dev_only) // HACK, currently assuming RG48 input data. { for(j=0; j<8; j++) { ptr[j] = sptr[0] >> 3; ptr[j+8] = sptr[1] >> 3; ptr[j+16] = sptr[2] >> 3; sptr += 3; } } else { for(j=0; j<8; j++) { val = SwapInt32(*lptr++); val >>= 2; b = (val & 0x3ff) << 3; val >>= 10; g = (val & 0x3ff) << 3; val >>= 10; r = (val & 0x3ff) << 3; ptr[j] = r; ptr[j+8] = g; ptr[j+16] = b; } } } else { if(decoder->image_dev_only) // HACK, currently assuming RG48 input data. { for(j=0; j<8*3; j+=3) { ptr[j] = sptr[0] >> 3; ptr[j+1] = sptr[1] >> 3; ptr[j+2] = sptr[2] >> 3; sptr += 3; } } else { for(j=0; j<8*3; j+=3) { val = SwapInt32(*lptr++); val >>= 2; b = (val & 0x3ff) << 3; val >>= 10; g = (val & 0x3ff) << 3; val >>= 10; r = (val & 0x3ff) << 3; ptr[j] = r; ptr[j+1] = g; ptr[j+2] = b; } } } ptr += 24; } } else if(lines == 2) { uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk; PIXEL16U *ptr = (PIXEL16U *)scanline; lptr += row * (unc_Stride>>2) * lines; for(i=0; i<width;i+=8) { int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4; for(j=0; j<8; j++) { val = SwapInt32(lptr[0]); val >>= 2; b = (val & 0x3ff) << 3; val >>= 10; g = (val & 0x3ff) << 3; val >>= 10; r = (val & 0x3ff) << 3; val = SwapInt32(lptr[1]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; val = SwapInt32(lptr[unc_Stride>>2]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; val = SwapInt32(lptr[(unc_Stride>>2)+1]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR) { ptr[j] = r>>2; ptr[j+8] = g>>2; ptr[j+16] = b>>2; } else { ptr[j*3] = r>>2; ptr[j*3+1] = g>>2; ptr[j*3+2] = b>>2; } lptr += lines; } ptr += 24; } } else if(lines == 4) { uint32_t j,*lptr = (uint32_t)decoder->uncompressed_chunk; PIXEL16U *ptr = (PIXEL16U *)scanline; lptr += row * (unc_Stride>>2) * lines; for(i=0; i<width;i+=8) { int val,r,g,b,r2,g2,b2,r3,g3,b3,r4,g4,b4; for(j=0; j<8; j++) { val = SwapInt32(lptr[0]); val >>= 2; b = (val & 0x3ff) << 3; val >>= 10; g = (val & 0x3ff) << 3; val >>= 10; r = (val & 0x3ff) << 3; val = SwapInt32(lptr[2]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; val = SwapInt32(lptr[unc_Stride>>1]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; val = SwapInt32(lptr[(unc_Stride>>1)+2]); val >>= 2; b += (val & 0x3ff) << 3; val >>= 10; g += (val & 0x3ff) << 3; val >>= 10; r += (val & 0x3ff) << 3; if(flags == ACTIVEMETADATA_SRC_8PIXEL_PLANAR) { ptr[j] = r>>2; ptr[j+8] = g>>2; ptr[j+16] = b>>2; } else { ptr[j*3] = r>>2; ptr[j*3+1] = g>>2; ptr[j*3+2] = b>>2; } lptr += lines; } ptr += 24; } } sptr = scanline; if(decoder->apply_color_active_metadata) sptr = ApplyActiveMetaData(decoder, width, 1, row, scanline, scanline2, info->format, &whitebitdepth, &flags); ConvertLinesToOutput(decoder, width, 1, row, sptr, dst, output_pitch, format, whitebitdepth, flags); dst += output_pitch; } } #endif } error = CODEC_ERROR_OKAY; return error; } // Reconstruct Bayer format to the requested output format CODEC_ERROR ReconstructSampleFrameBayerToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output, int pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int progressive = codec->progressive; //int precision = codec->precision; //TRANSFORM **transform_array = decoder->transform; int resolution = info->resolution; //int format = info->format; // Switch to the subroutine for the requested resolution switch (resolution) { case DECODED_RESOLUTION_FULL_DEBAYER: case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: //error = CODEC_ERROR_UNSUPPORTED_FORMAT; return ReconstructSampleFrameDeBayerFullToBuffer(decoder, info, frame, output, pitch); break; case DECODED_RESOLUTION_FULL: //return ReconstructSampleFrameBayerFullToBuffer(decoder, info, frame, output, pitch); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; //case DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER: case DECODED_RESOLUTION_HALF_NODEBAYER: case DECODED_RESOLUTION_HALF: //return ReconstructSampleFrameBayerHalfToBuffer(decoder, info, frame, output, pitch); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; case DECODED_RESOLUTION_QUARTER: //return ReconstructSampleFrameBayerQuarterToBuffer(decoder, frame, output, pitch); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; case DECODED_RESOLUTION_LOWPASS_ONLY: error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; default: // The decoded resolution is not supported by this routine assert(0); error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } return error; } // Reconstruct Bayer encoded data to full resolution CODEC_ERROR ReconstructSampleFrameBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; //int progressive = codec->progressive; //int precision = codec->precision; //TRANSFORM **transform_array = decoder->transform; //int decoded_width = 0; //int decoded_height = 0; //int resolution = info->resolution; int format = info->format; //int width = info->width; //int height = info->height; // Compute the number of bytes between each row of Bayer data //int bayer_pitch = 2 * width * sizeof(PIXEL16U); // Compute the pitch between pairs of rows of bayer data (one pair per image row) //int raw_bayer_pitch = 2 * bayer_pitch; //int chroma_offset = decoder->codec.chroma_offset; //int row; //int column; // Need to allocate a scratch buffer for decoding the Bayer frame? if (decoder->RawBayer16 == NULL) { TRANSFORM **transform_array = decoder->transform; int decoded_width = 0; int decoded_height = 0; int resolution = info->resolution; //int format = info->format; // Four Bayer data samples at each 2x2 quad in the grid int pixel_size = 4 * sizeof(PIXEL16U); int frame_size; const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif // Compute the decoded width and height for the specified resolution GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height); assert(decoded_width > 0 && decoded_height > 0); if (! (decoded_width > 0 && decoded_height > 0)) { return CODEC_ERROR_UNSUPPORTED_FORMAT; } frame_size = decoded_width * decoded_height * pixel_size; #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = frame_size; //#ifdef SHARPENING if(decoder->RGBFilterBuffer16 == NULL) { int size = frame_size*3; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) size = frame_size*4; #if _ALLOCATOR decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16); #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size*3; } //#endif } //TODO: Need to add more output formats to this routine switch (format) { case DECODED_FORMAT_RGB32: error = CODEC_ERROR_UNSUPPORTED_FORMAT; // Decode the last transform to rows of Bayer data (one row per channel) // TransformInverseSpatialToRow16u(transform_array, frame, num_channels, // decoder->RawBayer16, raw_bayer_pitch, info, // &decoder->scratch, chroma_offset, precision); // ConvertPackedBayerToRGB32(decoder->RawBayer16, info, bayer_pitch, // output_buffer, output_pitch, // width, height); break; case DECODED_FORMAT_RGB24: error = CODEC_ERROR_UNSUPPORTED_FORMAT; // Decode the last transform to rows of Bayer data (one row per channel) //TransformInverseSpatialToRow16u(transform_array, frame, num_channels, // decoder->RawBayer16, raw_bayer_pitch, info, // &decoder->scratch, chroma_offset, precision); //ConvertPackedBayerToRGB24(decoder->RawBayer16, info, bayer_pitch, // output_buffer, output_pitch, // width, height); break; default: error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } return error; } // Reconstruct Bayer encoded data and demosaic to full resolution CODEC_ERROR ReconstructSampleFrameDeBayerFullToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; //int progressive = codec->progressive; int precision = codec->precision; //TRANSFORM **transform_array = decoder->transform; //int decoded_width = 0; //int decoded_height = 0; //int resolution = info->resolution; int format = info->format; int width = info->width; //int height = info->height; // Compute the number of bytes between each row of Bayer data int bayer_pitch = 2 * width * sizeof(PIXEL16U); // Compute the pitch between pairs of rows of bayer data (one pair per image row) //int raw_bayer_pitch = 2 * bayer_pitch; int chroma_offset = decoder->codec.chroma_offset; error = CODEC_ERROR_UNSUPPORTED_FORMAT; switch (format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RG48: //DAN20090120 added not sure why they weren't here. case DECODED_FORMAT_WP13: //DAN20090120 "" case DECODED_FORMAT_B64A: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_YR16: case DECODED_FORMAT_V210: case DECODED_FORMAT_YU64: error = CODEC_ERROR_OKAY; break; } if(error) return error; //int row; //int column; // Need to allocate a scratch buffer for decoding the Bayer frame? if (decoder->RawBayer16 == NULL) { TRANSFORM **transform_array = decoder->transform; int decoded_width = 0; int decoded_height = 0; int resolution = info->resolution; //int format = info->format; // Four Bayer data samples at each 2x2 quad in the grid int pixel_size = 4 * sizeof(PIXEL16U); int frame_size; const size_t alignment = 16; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif // Compute the decoded width and height for the specified resolution GetDecodedFrameDimensions(transform_array, num_channels, frame, resolution, &decoded_width, &decoded_height); assert(decoded_width > 0 && decoded_height > 0); if (! (decoded_width > 0 && decoded_height > 0)) { return CODEC_ERROR_UNSUPPORTED_FORMAT; } frame_size = decoded_width * decoded_height * pixel_size; #if _ALLOCATOR decoder->RawBayer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)frame_size, alignment); #else decoder->RawBayer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, alignment); #endif assert(decoder->RawBayer16 != NULL); if (! (decoder->RawBayer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RawBayerSize = frame_size; //#ifdef SHARPENING if(decoder->RGBFilterBuffer16 == NULL) { int size = frame_size*3; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) size = frame_size*4; #if _ALLOCATOR decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, (size_t)size, 16); #else decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size*3; } //#endif } #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)decoder->RawBayer16, bayer_pitch*sizeof(PIXEL), info, chroma_offset, precision); //DemosaicRAW { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; int inverted = false; uint8_t *output = output_buffer; int pitch = output_pitch; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; inverted = true; } // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; if(info->resolution == DECODED_RESOLUTION_FULL_DEBAYER || info->resolution == DECODED_RESOLUTION_HALF_HORIZONTAL_DEBAYER) height *= 2; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } #else error = CODEC_ERROR_UNSUPPORTED_FORMAT; #endif return error; } // Reconstruct Bayer encoded data to half resolution CODEC_ERROR ReconstructSampleFrameBayerHalfToBuffer(DECODER *decoder, FRAME_INFO *info, int frame, uint8_t *output_buffer, int output_pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int progressive = codec->progressive; //int precision = codec->precision; TRANSFORM **transform_array = decoder->transform; int frame_width = info->width; int frame_height = info->height; //int resolution = info->resolution; int format = info->format; //IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; PIXEL16U *g1_plane; PIXEL16U *rg_plane; PIXEL16U *bg_plane; PIXEL16U *g2_plane; int g1_pitch; int rg_pitch; int bg_pitch; int g2_pitch; #if 0 int channel; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[frame]; #if (0 && DEBUG) if (logfile) { char label[_MAX_PATH]; char *format = decoded_format_string[info->format]; sprintf(label, "Output, channel: %d, format: %s", channel, format); DumpImageStatistics(label, lowpass_images[channel], logfile); } #endif } #endif // Get the lowpass bands in the wavelet coresponding to the output frame g1_plane = (PIXEL16U *)transform_array[0]->wavelet[frame]->band[0]; rg_plane = (PIXEL16U *)transform_array[1]->wavelet[frame]->band[0]; bg_plane = (PIXEL16U *)transform_array[2]->wavelet[frame]->band[0]; if(transform_array[3]->wavelet[frame]) //half res don't decode g1-g2 //HACK { g2_plane = (PIXEL16U *)transform_array[3]->wavelet[frame]->band[0]; g2_pitch = transform_array[3]->wavelet[frame]->pitch; } else { g2_plane = NULL; g2_pitch = 0; } // Get the pitch of each plane g1_pitch = transform_array[0]->wavelet[frame]->pitch; rg_pitch = transform_array[1]->wavelet[frame]->pitch; bg_pitch = transform_array[2]->wavelet[frame]->pitch; switch (format) { case DECODED_FORMAT_RGB32: ConvertPlanarBayerToRGB32(g1_plane, g1_pitch, rg_plane, rg_pitch, bg_plane, bg_pitch, g2_plane, g2_pitch, output_buffer, output_pitch, frame_width, frame_height); break; default: error = CODEC_ERROR_UNSUPPORTED_FORMAT; break; } return error; } // Reconstruct Bayer encoded data to quarter resolution CODEC_ERROR ReconstructSampleFrameBayerQuarterToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //FRAME_INFO *info = &decoder->frame; //CODEC_STATE *codec = &decoder->codec; //int num_channels = codec->num_channels; //int progressive = codec->progressive; //int precision = codec->precision; //TRANSFORM **transform_array = decoder->transform; //int decoded_width = 0; //int decoded_height = 0; //int resolution = info->resolution; //int format = info->format; //TODO: Need to finish this routine assert(0); return error; } // Reconstruct the original YUV 4:2:2 encoded format to the requested output format CODEC_ERROR ReconstructSampleFrameYUV422ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif FRAME_INFO *info = &decoder->frame; CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; int progressive = codec->progressive; int precision = codec->precision; TRANSFORM **transform_array = decoder->transform; //int decoded_width = 0; //int decoded_height = 0; int resolution = info->resolution; int format = info->format; //int color_space = decoder->frame.colorspace; //TODO: Eliminate use of the chroma offset int chroma_offset = decoder->codec.chroma_offset; #if _THREADED // Type of threaded inverse transform //int type; #endif #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif if (decoder == NULL) { return CODEC_ERROR_INVALID_ARGUMENT; } //TODO: Split this routine into subroutines for progressive versus interlaced video //TODO: Split progressive and interlaced routines into subroutines for each resolution if(resolution == DECODED_RESOLUTION_HALF) { bool inverted = false; FRAME_INFO info2; memcpy(&info2, info, sizeof(FRAME_INFO)); format = info2.format; if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; info2.format = format; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; info2.format = format; inverted = true; } #if 1 // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } #endif if(decoder->use_active_metadata_decoder) { #if _THREADED WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; mailbox->framenum = frame; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 1; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; return CODEC_ERROR_OKAY; #endif } else { int precision = codec->precision; TRANSFORM **transform_array = decoder->transform; int channel; IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[frame]; } CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, &info2, chroma_offset, precision, decoder->codec.encoded_format, decoder->frame.white_point); } return CODEC_ERROR_OKAY; } // Was the video source interlaced or progressive? if (progressive) { // The video source was progressive (the first transform was a spatial transform) if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { FRAME_INFO info2; int format; bool inverted = false; int precision = codec->precision; memcpy(&info2, info, sizeof(FRAME_INFO)); format = info2.format; if (format == DECODED_FORMAT_RGB24) { format = DECODED_FORMAT_RGB24_INVERTED; info2.format = format; inverted = true; } else if (format == DECODED_FORMAT_RGB32) { format = DECODED_FORMAT_RGB32_INVERTED; info2.format = format; inverted = true; } #if 1 // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } #endif /*if(decoder->use_active_metadata_decoder) { switch (format & 0x7ffffff) { case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for case DECODED_FORMAT_YUYV: // computing the active metadata. case DECODED_FORMAT_UYVY: return CODEC_ERROR_OKAY; break; } }*/ switch (format & 0x7ffffff) { case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for if(decoder->use_active_metadata_decoder) { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; #endif } else { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sYUVtoRGB); return CODEC_ERROR_OKAY; #endif } break; case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: if(decoder->use_active_metadata_decoder) { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; #endif } else { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sToYUV); return CODEC_ERROR_OKAY; #endif } break; //Handle sizes that are smaller than the interim decode buffer //DAN20081222 case DECODED_FORMAT_CbYCrY_10bit_2_8: decoder->upper_plane = output; decoder->lower_plane = output + decoder->frame.width * decoder->frame.height / 2; // Use the address and pitch of the lower plane output = decoder->lower_plane; pitch = decoder->frame.width * 2; // Fall through and compute the inverse spatial transform case DECODED_FORMAT_CbYCrY_16bit_2_14: case DECODED_FORMAT_CbYCrY_16bit_10_6: case DECODED_FORMAT_CbYCrY_8bit: case DECODED_FORMAT_CbYCrY_16bit: if(decoder->use_active_metadata_decoder) { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; } else { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sToOutput); return CODEC_ERROR_OKAY; } break; case DECODED_FORMAT_V210: if(decoder->use_active_metadata_decoder) { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; } else { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalYUVStrip16sToYUVOutput); return CODEC_ERROR_OKAY; } break; case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB32_INVERTED: // As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works. case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_B64A: case DECODED_FORMAT_R408: case DECODED_FORMAT_V408: case DECODED_FORMAT_YU64: case DECODED_FORMAT_YR16: case DECODED_FORMAT_WP13: case DECODED_FORMAT_W13A: if((format & 0x7FFFFFFF) == DECODED_FORMAT_RGB32 && decoder->use_active_metadata_decoder == false) { #if _THREADED TransformInverseSpatialThreadedYUV422ToBuffer(decoder, frame, num_channels, output, pitch, &info2, chroma_offset, precision); #elif 0 TransformInverseSpatialToBuffer(decoder, transform_array, frame, num_channels, output, pitch, &info2, &decoder->scratch, chroma_offset, precision); #else TransformInverseSpatialYUV422ToOutput(decoder, transform_array, frame, num_channels, output, pitch, &info2, &decoder->scratch, chroma_offset, precision, InvertHorizontalStripYUV16sToPackedRGB32); #endif return CODEC_ERROR_OKAY; } #if _THREADED if(decoder->use_active_metadata_decoder) { TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; } else { TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, output, pitch, &info2, chroma_offset, precision); ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch, &info2, chroma_offset, precision); return CODEC_ERROR_OKAY; } #endif break; default: if(decoder->use_active_metadata_decoder) { #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sThruActiveMetadata); return CODEC_ERROR_OKAY; #endif } // else Return the error code for unsupported output format break; } } } else { // The video source was interlaced (the first transform was a frame transform) if (resolution == DECODED_RESOLUTION_FULL || resolution == DECODED_RESOLUTION_HALF_HORIZONTAL) { bool inverted = false; if (format == DECODED_FORMAT_RGB32 || format == DECODED_FORMAT_RGB24) { // info->format = DECODED_FORMAT_RGB32_INVERTED; //DAN20080702 vertically flips QT decodes if active. inverted = true; } #if 1 // Have the output location and pitch been inverted? if (inverted && pitch > 0) { int height = info->height; output += (height - 1) * pitch; // Start at the bottom row pitch = NEG(pitch); // Negate the pitch to go up } #endif switch (format & 0x7ffffff) { case DECODED_FORMAT_NV12: case DECODED_FORMAT_RGB24: // Output buffer is too small to decode into for case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: case DECODED_FORMAT_V210: // only supported with use_active_metadata_decoder if(decoder->use_active_metadata_decoder) { int frame_size = info->width * info->height * 4; if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size) { #if _ALLOCATOR if(decoder->RGBFilterBuffer16) { FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16); #else if(decoder->RGBFilterBuffer16) { MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size; } //TransformInverseSpatialUniversalThreadedToRow16u( // decoder, frame, num_channels, // (uint8_t *)decoder->RGBFilterBuffer16, info->width * 3 * 2, // info, chroma_offset, precision); #if _INTERLACED_WORKER_THREADS StartInterlaceWorkerThreads(decoder); //TODO: support new threading // Send the upper and lower rows of the transforms to the worker threads TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels, (PIXEL16U *)decoder->RGBFilterBuffer16, info->width * 4, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels, (PIXEL16U *)decoder->RGBFilterBuffer16, info->width * 4, info, &decoder->scratch, chroma_offset, precision); #endif #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 2; // yuv // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; } #endif return CODEC_ERROR_OKAY; } } switch (format) { // As long as the outpitch is greater or equal to 4:2:2 16-bit YR16 this works. case DECODED_FORMAT_WP13: //DAN20110203 - missing case DECODED_FORMAT_W13A: //DAN20110203 - missing case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_B64A: case DECODED_FORMAT_RGB32: //32-bit format can fit the interim YR16 decode into case DECODED_FORMAT_R408: //the output buffer case DECODED_FORMAT_V408: case DECODED_FORMAT_YU64: case DECODED_FORMAT_YR16: #if _INTERLACED_WORKER_THREADS StartInterlaceWorkerThreads(decoder); //TODO: support new threading // Send the upper and lower rows of the transforms to the worker threads TransformInverseFrameThreadedToRow16u(decoder, frame, num_channels, (PIXEL16U *)output, pitch, info, chroma_offset, precision); ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision); #else // Transform the wavelets for each channel to the output image (not threaded) TransformInverseFrameToRow16u(decoder, transform_array, frame, num_channels, (PIXEL16U *)output, pitch, info, &decoder->scratch, chroma_offset, precision); ConvertRow16uToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision); //Old code converts 4:2:2 directly to RGBA (single threaded.) //TransformInverseFrameToBuffer(transform_array, frame, num_channels, output, pitch, // info, &decoder->scratch, chroma_offset, precision); #endif return CODEC_ERROR_OKAY; default: // else Return the error code for unsupported output format break; } } } // The output format is not supported by this routine error = CODEC_ERROR_UNSUPPORTED_FORMAT; return error; } // Routines for converting the new encoded formats to the requested output format CODEC_ERROR ReconstructSampleFrameRGB444ToBuffer(DECODER *decoder, int frame, uint8_t *output, int pitch) { CODEC_ERROR error = CODEC_ERROR_OKAY; #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif FRAME_INFO *info = &decoder->frame; CODEC_STATE *codec = &decoder->codec; int num_channels = codec->num_channels; //int progressive = codec->progressive; TRANSFORM **transform_array = decoder->transform; //IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; //IMAGE *wavelet; //int wavelet_width; //int wavelet_height; int decoded_width = 0; int decoded_height = 0; int resolution = info->resolution; //int chroma_offset = decoder->codec.chroma_offset; //int decoded_scale; #if _ALLOCATOR ALLOCATOR *allocator = decoder->allocator; #endif //TODO: Eliminate use of the chroma offset if (decoder == NULL) { return CODEC_ERROR_INVALID_ARGUMENT; } // This routine should only be called for progressive frames assert(codec->progressive); // The decoder can decode a video sample without returning a frame if (output == NULL || pitch == 0) { return CODEC_ERROR_OKAY; } // Does this frame have to be reconstructed? if ((decoder->flags & DECODER_FLAGS_RENDER) == 0) { return CODEC_ERROR_OKAY; } // Check that the requested frame is within the limits of the group of frames assert(0 <= frame && frame < decoder->gop_length); // Check that the frame resolution is valid assert(IsValidFrameResolution(resolution)); if (!IsValidFrameResolution(resolution)) { return CODEC_ERROR_RESOLUTION; } // Compute the decoded width and height ComputeOutputDimensions(decoder, frame, &decoded_width, &decoded_height); assert(decoded_width > 0 && decoded_height > 0); if (info->format == DECODED_FORMAT_RGB24 || info->format == DECODED_FORMAT_RGB32) { output += (info->height-1)*pitch; pitch = -pitch; } #if (0 && DEBUG) if (logfile) { IMAGE *wavelet = transform[0]->wavelet[frame]; int band = 0; fprintf(logfile, "Luminance wavelet, frame: %d, band: %d\n", frame, band); DumpArray16s("Lowpass Band", wavelet->band[band], wavelet->width, wavelet->height, wavelet->pitch, logfile); } #endif // Check that the requested frame is large enough to hold the decoded frame #if (0 && DEBUG) //if (! (info->width >= decoded_width)) { if (logfile) { //fprintf(logfile, "Requested frame not large enough to hold decoded frame: %d < %d\n", info->width, decoded_width); fprintf(logfile, "Output frame width: %d, decoded frame width: %d\n", info->width, decoded_width); } } #endif assert(info->width >= decoded_width); if (!(info->width >= decoded_width)) { return CODEC_ERROR_FRAMESIZE; } // assert((info->height+7)/8 >= (decoded_height+7)/8); // if (!(info->height+7)/8 >= (decoded_height+7)/8) { // return CODEC_ERROR_FRAMESIZE; // } START(tk_convert); if (resolution == DECODED_RESOLUTION_LOWPASS_ONLY) { //int precision = codec->precision; int scale = 13; int channel; IMAGE *lowpass_images[TRANSFORM_MAX_CHANNELS]; int chroma_offset = decoder->codec.chroma_offset; //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; for (channel = 0; channel < num_channels; channel++) { lowpass_images[channel] = transform_array[channel]->wavelet[5]; if(lowpass_images[channel] == NULL) // therefore IntreFrame compressed. { scale = 12; lowpass_images[channel] = transform_array[channel]->wavelet[2]; } } CopyLowpass16sToBuffer(decoder, lowpass_images, num_channels, output, pitch, info, chroma_offset, scale, decoder->codec.encoded_format, decoder->frame.white_point); } else // Quarter resolution if (resolution == DECODED_RESOLUTION_QUARTER) { // Output quarter resolution for the two frame GOP int precision = codec->precision; // Reconstruct the frame to quarter resolution ReconstructQuarterFrame(decoder, num_channels, frame, output, pitch, info, &decoder->scratch, precision); // Quarter resolution one frame GOP is handled in DecodeSampleIntraFrame } else // Half resolution if (resolution == DECODED_RESOLUTION_HALF) { IMAGE *wavelet_array[TRANSFORM_MAX_CHANNELS]; int precision = codec->precision; int chroma_offset = 0; int channel; if(decoder->use_active_metadata_decoder) { #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; mailbox->framenum = frame; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 1; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; } #endif } else { //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; // Get the first level wavelet in each channel for (channel = 0; channel < num_channels; channel++) { wavelet_array[channel] = transform_array[channel]->wavelet[frame]; } // Pack the pixels from the lowpass band in each channel into the output buffer CopyLowpassRGB444ToBuffer(decoder, wavelet_array, num_channels, output, pitch, info, chroma_offset, precision); } } // Full resolution or half horizontal else { int chroma_offset = 0; int precision = codec->precision; // Reconstruct the output frame from a full resolution decode //assert(resolution == DECODED_RESOLUTION_FULL); if(decoder->use_active_metadata_decoder) { int frame_size, channels = 3; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) channels = 4; frame_size = info->width * info->height * channels * 2; if(decoder->RGBFilterBuffer16==NULL || decoder->RGBFilterBufferSize < frame_size) { #if _ALLOCATOR if(decoder->RGBFilterBuffer16) { FreeAligned(decoder->allocator, decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } decoder->RGBFilterBuffer16 = (PIXEL16U *)AllocAligned(allocator, frame_size, 16); #else if(decoder->RGBFilterBuffer16) { MEMORY_ALIGNED_FREE(decoder->RGBFilterBuffer16); decoder->RGBFilterBuffer16 = NULL; } decoder->RGBFilterBuffer16 = (PIXEL16U *)MEMORY_ALIGNED_ALLOC(frame_size, 16); #endif assert(decoder->RGBFilterBuffer16 != NULL); if (! (decoder->RGBFilterBuffer16 != NULL)) { return CODEC_ERROR_MEMORY_ALLOC; } decoder->RGBFilterBufferSize = frame_size; } #if _THREADED TransformInverseSpatialUniversalThreadedToRow16u(decoder, frame, num_channels, (uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2, info, chroma_offset, precision); #else // Decode that last transform to rows of Bayer data (one row per channel) TransformInverseSpatialToRow16u(transform_array, frame, num_channels, (uint8_t *)decoder->RGBFilterBuffer16, info->width * channels * 2, info, &decoder->scratch, chroma_offset, precision); #endif #if _THREADED { WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->jobType = JOB_TYPE_OUTPUT; decoder->RGBFilterBufferPhase = 1; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, info->height); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); decoder->RGBFilterBufferPhase = 0; } #endif } else { //DAN20081203 -- fix for 444 decodes in AE32-bit float decoder->frame.white_point = 16; //decoder->frame.signed_pixels = 0; switch (info->format) { case DECODED_FORMAT_B64A: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2B64A); #else TransformInverseRGB444ToB64A(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif break; case DECODED_FORMAT_YU64: //TODO : Threading TransformInverseRGB444ToYU64(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); break; case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB24_INVERTED: case DECODED_FORMAT_RGB32: case DECODED_FORMAT_RGB32_INVERTED://TODO, needs to be threaded. WIP TransformInverseRGB444ToRGB32(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); break; case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: //TODO, needs to be threaded. WIP TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); break; case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2RG30); #else TransformInverseRGB444ToRGB48(transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision); #endif break; case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2YUV); #else TransformInverseSpatialYUV422ToOutput(decoder, transform_array, frame, num_channels, output, pitch, info, &decoder->scratch, chroma_offset, precision, InvertHorizontalStripRGB16sToPackedYUV8u); #endif break; case DECODED_FORMAT_R408: case DECODED_FORMAT_V408: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGBA2YUVA); #else assert(0); #endif break; case DECODED_FORMAT_YR16: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2YR16); #else assert(0);// missing non-threaded version #endif break; case DECODED_FORMAT_V210: #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2v210); #else assert(0);// missing non-threaded version #endif break; case DECODED_FORMAT_CbYCrY_8bit: // DECODED_FORMAT_CT_UCHAR #if _THREADED TransformInverseSpatialUniversalThreadedToOutput(decoder, frame, num_channels, output, pitch, info, chroma_offset, precision, InvertHorizontalStrip16sRGB2YUV); #else assert(0);// missing non-threaded version #endif break; //TODO: Add code to handle other Avid pixel formats case DECODED_FORMAT_CbYCrY_16bit: // DECODED_FORMAT_CT_SHORT case DECODED_FORMAT_CbYCrY_10bit_2_8: // DECODED_FORMAT_CT_10Bit_2_8 case DECODED_FORMAT_CbYCrY_16bit_2_14: // DECODED_FORMAT_CT_SHORT_2_14 case DECODED_FORMAT_CbYCrY_16bit_10_6: // DECODED_FORMAT_CT_USHORT_10_6 assert(0); break; default: #if (1 && DEBUG) if (logfile) { fprintf(logfile, "Invalid decoded format: %d\n", info->format); } #endif assert(0); error = CODEC_ERROR_INVALID_FORMAT; break; } } } STOP(tk_convert); return error; } // Convert 16-bit signed lowpass data into the requested output format void CopyLowpassRGB444ToBuffer(DECODER *decoder, IMAGE *image_array[], int num_channels, uint8_t *output_buffer, int32_t output_pitch, FRAME_INFO *info, int chroma_offset, int precision) { bool inverted = false; int output_width = info->width; int output_height = info->height; int format = info->format; // Left shift to scale the pixels to 16 bits minus the shift already in the lowpass values const int shift = 16 - precision - PRESCALE_LUMA; START(tk_convert); #if 0 // Fill the output buffer with blank values EraseOutputBuffer(output_buffer, info->width, info->height, output_pitch, info->format); #endif // Determine the type of conversion switch (info->format) { case DECODED_FORMAT_RGB24: case DECODED_FORMAT_RGB32: inverted = true; case DECODED_FORMAT_RGB24_INVERTED: case DECODED_FORMAT_RGB32_INVERTED: case DECODED_FORMAT_B64A: case DECODED_FORMAT_R210: case DECODED_FORMAT_DPX0: case DECODED_FORMAT_RG30: case DECODED_FORMAT_AR10: case DECODED_FORMAT_AB10: case DECODED_FORMAT_RG48: case DECODED_FORMAT_RG64: //WIP ConvertLowpassRGB444ToRGB(image_array, output_buffer, output_width, output_height, output_pitch, format, inverted, shift, num_channels); break; case DECODED_FORMAT_YUYV: case DECODED_FORMAT_UYVY: { IMAGE *g_image = image_array[0]; IMAGE *r_image = image_array[1]; IMAGE *b_image = image_array[2]; if (info->format == COLOR_FORMAT_YUYV) { ConvertRGB2YUV(r_image->band[0], g_image->band[0], b_image->band[0], r_image->pitch, g_image->pitch, b_image->pitch, output_buffer, output_pitch, output_width, output_height, 14, info->colorspace, info->format); } else if (info->format == COLOR_FORMAT_UYVY) { ConvertRGB2UYVY(r_image->band[0], g_image->band[0], b_image->band[0], r_image->pitch, g_image->pitch, b_image->pitch, output_buffer, output_pitch, output_width, output_height, 14, info->colorspace, info->format); } } break; default: { int y; IMAGE *g_image = image_array[0]; IMAGE *r_image = image_array[1]; IMAGE *b_image = image_array[2]; IMAGE *a_image = image_array[3]; unsigned short *scanline = (unsigned short *)decoder->scratch.free_ptr; //unsigned short *scanline2 = scanline + output_width*3; uint8_t *newline = (uint8_t *)output_buffer; unsigned short *Rptr,*Gptr,*Bptr,*Aptr = NULL; Rptr = (unsigned short *)r_image->band[0]; Gptr = (unsigned short *)g_image->band[0]; Bptr = (unsigned short *)b_image->band[0]; if(decoder->codec.encoded_format == ENCODED_FORMAT_RGBA_4444 && ALPHAOUTPUT(decoder->frame.format)) { Aptr = (unsigned short *)a_image->band[0]; for(y=0; y<output_height; y++) { int flags = (ACTIVEMETADATA_PLANAR); int whitebitdepth = 14; memcpy(scanline, Rptr, info->width*2); memcpy(scanline+info->width, Gptr, info->width*2); memcpy(scanline+info->width*2, Bptr, info->width*2); memcpy(scanline+info->width*3, Aptr, info->width*2); Rptr += r_image->pitch/2; Gptr += g_image->pitch/2; Bptr += b_image->pitch/2; Aptr += a_image->pitch/2; Convert4444LinesToOutput(decoder, info->width, 1, y, scanline, newline, output_pitch, info->format, whitebitdepth, flags); newline += output_pitch; } } else { for(y=0; y<output_height; y++) { int flags = (ACTIVEMETADATA_PLANAR); int whitebitdepth = 14; memcpy(scanline, Rptr, info->width*2); memcpy(scanline+info->width, Gptr, info->width*2); memcpy(scanline+info->width*2, Bptr, info->width*2); Rptr += r_image->pitch/2; Gptr += g_image->pitch/2; Bptr += b_image->pitch/2; ConvertLinesToOutput(decoder, info->width, 1, y, scanline, newline, output_pitch, info->format, whitebitdepth, flags); newline += output_pitch; } } } //assert(0); break; } STOP(tk_convert); } #if _THREADED // Threaded inverse transform using the new threads API void TransformInverseSpatialThreadedYUV422ToBuffer(DECODER *decoder, int frame_index, int num_channels, uint8_t *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif //TODO: Add support for more output formats int format = DECODED_FORMAT_RGB32; // The upper and lower spatial transforms only share the middle rows int transform_height = (((info->height + 7) / 8) * 8) / 2; int middle_row_count = transform_height; // Data structure for passing information to the worker threads WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; // Inverse horizontal filter that outputs the desired format HorizontalInverseFilterOutputProc horizontal_filter_proc; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Choose the correct inverse horizontal filter for the output format switch (format) { case DECODED_FORMAT_RGB32: horizontal_filter_proc = InvertHorizontalStripYUV16sToPackedRGB32; break; default: assert(0); return; } // Post a message to the mailbox mailbox->horizontal_filter_proc = horizontal_filter_proc; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; mailbox->jobType = JOB_TYPE_WAVELET; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); #if (1 && DEBUG) if (logfile) { fprintf(logfile, "All worker threads signalled done\n"); } #endif } // Threaded inverse transform using the new threads API // Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format void TransformInverseSpatialUniversalThreadedToRow16u(DECODER *decoder, int frame_index, int num_channels, uint8_t *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif // The upper and lower spatial transforms only share the middle rows int transform_height = (((info->height + 7) / 8) * 8) / 2; int middle_row_count = transform_height; // Data structure for passing information to the worker threads WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; // Inverse horizontal filter that outputs the desired format HorizontalInverseFilterOutputProc horizontal_filter_proc; horizontal_filter_proc = InvertHorizontalStrip16sToRow16uPlanar; #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->horizontal_filter_proc = horizontal_filter_proc; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; mailbox->jobType = JOB_TYPE_WAVELET; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } // Threaded inverse transform using the new threads API // Convert RGB RGBA or BAYER (4 channel) data to a 16-bit planar format void TransformInverseSpatialUniversalThreadedToOutput( DECODER *decoder, int frame_index, int num_channels, uint8_t *output, int pitch, FRAME_INFO *info, int chroma_offset, int precision, HorizontalInverseFilterOutputProc horizontal_filter_proc) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif // The upper and lower spatial transforms only share the middle rows int transform_height = (((info->height + 7) / 8) * 8) / 2; int middle_row_count = transform_height; // Data structure for passing information to the worker threads WORKER_THREAD_DATA *mailbox = &decoder->worker_thread.data; // Inverse horizontal filter that outputs the desired format #if _DELAY_THREAD_START if(decoder->worker_thread.pool.thread_count == 0) { CreateLock(&decoder->worker_thread.lock); // Initialize the pool of transform worker threads ThreadPoolCreate(&decoder->worker_thread.pool, decoder->thread_cntrl.capabilities >> 16/*cpus*/, WorkerThreadProc, decoder); } #endif // Post a message to the mailbox mailbox->horizontal_filter_proc = horizontal_filter_proc; mailbox->frame = frame_index; mailbox->num_channels = num_channels; mailbox->output = output; mailbox->pitch = pitch; memcpy(&mailbox->info, info, sizeof(FRAME_INFO)); mailbox->chroma_offset = chroma_offset; mailbox->precision = precision; mailbox->jobType = JOB_TYPE_WAVELET; // Set the work count to the number of rows to process ThreadPoolSetWorkCount(&decoder->worker_thread.pool, middle_row_count); // Start the transform worker threads ThreadPoolSendMessage(&decoder->worker_thread.pool, THREAD_MESSAGE_START); // Wait for all of the worker threads to finish ThreadPoolWaitAllDone(&decoder->worker_thread.pool); } // Routines for the worker threads that use the new threads API void TransformInverseSpatialSectionToOutput(DECODER *decoder, int thread_index, int frame_index, int num_channels, uint8_t *output_buffer, int output_pitch, FRAME_INFO *info, int chroma_offset, int precision, HorizontalInverseFilterOutputProc horizontal_filter_proc) { #if (1 && DEBUG) FILE *logfile = decoder->logfile; #endif TRANSFORM **transform = decoder->transform; const SCRATCH *scratch = &decoder->scratch; PIXEL *lowlow_band[CODEC_MAX_CHANNELS]; PIXEL *lowhigh_band[CODEC_MAX_CHANNELS]; PIXEL *highlow_band[CODEC_MAX_CHANNELS]; PIXEL *highhigh_band[CODEC_MAX_CHANNELS]; int lowlow_pitch[CODEC_MAX_CHANNELS]; int lowhigh_pitch[CODEC_MAX_CHANNELS]; int highlow_pitch[CODEC_MAX_CHANNELS]; int highhigh_pitch[CODEC_MAX_CHANNELS]; int channel_width[CODEC_MAX_CHANNELS]; uint8_t *output_row_ptr; uint8_t *plane_array[TRANSFORM_MAX_CHANNELS]; int plane_pitch[TRANSFORM_MAX_CHANNELS]; int output_width = info->width; int output_height = info->height; int half_height = output_height/2; int luma_band_width; ROI strip; char *bufptr; int last_row; int last_display_row; int last_line; int channel; int row; int odd_display_lines = 0; THREAD_ERROR error; // Push the scratch space state to allocate a new section char *buffer = scratch->free_ptr; size_t buffer_size = scratch->free_size; //TODO: Replace uses of buffer variables with calls to the scratch space API // This version is for 16-bit pixels assert(sizeof(PIXEL) == 2); // Must have a valid inverse horizontal filter assert(horizontal_filter_proc != NULL); // Check for enough space in the local array allocations // assert(num_channels <= CODEC_NUM_CHANNELS); // assert(num_channels <= TRANSFORM_MAX_CHANNELS); if(num_channels < 3 || num_channels > TRANSFORM_MAX_CHANNELS) { decoder->error = CODEC_ERROR_BAD_FRAME; return; } // Divide the buffer space between the four threads buffer_size /= decoder->worker_thread.pool.thread_count; // used to assume max of 4 buffer += buffer_size * thread_index; // Round the buffer pointer up to the next cache line buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)buffer & _CACHE_LINE_MASK)); bufptr = (char *)ALIGN(buffer, _CACHE_LINE_SIZE); // Allocate buffer space for the output rows from each channel for (channel = 0; channel < num_channels; channel++) { // Get the row width for this channel IMAGE *wavelet = transform[channel]->wavelet[frame_index]; int width = wavelet->width; int height = wavelet->height; //int pitch = wavelet->pitch; size_t channel_buffer_size; // Compute the width and pitch for the output rows stored in this buffer int buffer_width = 2 * width; int buffer_height = 2; int buffer_pitch = ALIGN16(buffer_width); // Compute the total allocation for this channel channel_buffer_size = buffer_height * buffer_pitch; // Check that there is enough space available assert(channel_buffer_size <= buffer_size); // Allocate the buffer for this channel plane_array[channel] = (uint8_t *)bufptr; // Remember the pitch for rows in this channel plane_pitch[channel] = buffer_pitch; // Advance the buffer pointer past the allocated space for this channel bufptr += channel_buffer_size; // Reduce the amount of space remaining in the buffer buffer_size -= channel_buffer_size; // The dimensions of the output image are the same as the luma channel if (channel == 0) { strip.width = buffer_width; strip.height = buffer_height; last_row = height; //DAN20050606 Added to fix issue with non-div by 8 heihts. last_display_row = (info->height+1)/2; // DAN20090215 -- fix for odd display lines. odd_display_lines = info->height & 1; // Remember the width of the wavelet bands for luma luma_band_width = width; } // Save the bands per channel for routines that process all channels at once lowlow_band[channel] = wavelet->band[0]; lowhigh_band[channel] = wavelet->band[1]; highlow_band[channel] = wavelet->band[2]; highhigh_band[channel] = wavelet->band[3]; lowlow_pitch[channel] = wavelet->pitch; lowhigh_pitch[channel] = wavelet->pitch; highlow_pitch[channel] = wavelet->pitch; highhigh_pitch[channel] = wavelet->pitch; // Remember the width of the wavelet for this channel channel_width[channel] = width; } // Use the remaining buffer space for intermediate results buffer_size -= (_CACHE_LINE_SIZE - ((uintptr_t)bufptr & _CACHE_LINE_MASK)); buffer = (char *)ALIGN(bufptr, _CACHE_LINE_SIZE); if (last_row == last_display_row) { last_line = half_height - 1; } else { last_line = half_height; } if(odd_display_lines) last_line++; if (thread_index == TRANSFORM_WORKER_TOP_THREAD) { // Process the first row row = 0; output_row_ptr = output_buffer; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row); } #endif // Process the first row using special border filters for the top row InvertSpatialTopRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch, lowhigh_band, lowhigh_pitch, highlow_band, highlow_pitch, highhigh_band, highhigh_pitch, output_row_ptr, output_pitch, output_width, info->format, info->colorspace, row, channel_width, (PIXEL *)buffer, buffer_size, precision, horizontal_filter_proc); } if (thread_index == TRANSFORM_WORKER_BOTTOM_THREAD || decoder->worker_thread.pool.thread_count == 1) { if(last_row == last_display_row) //DAN20071218 -- Added as old 1080 RAW files would crash { int pitch = output_pitch; // Process the last row row = last_row - 1; if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) pitch >>= 1; // Begin filling the last output row with results output_row_ptr = output_buffer + row * 2 * pitch; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row); } #endif // Process the last row using special border filters for the bottom row if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC || decoder->channel_blend_type == BLEND_LINE_INTERLEAVED) // 3d Work TODO Fix output_row_ptr -= output_pitch; InvertSpatialBottomRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch, lowhigh_band, lowhigh_pitch, highlow_band, highlow_pitch, highhigh_band, highhigh_pitch, output_row_ptr, output_pitch, output_width, info->format, info->colorspace, row, channel_width, (PIXEL *)buffer, buffer_size, precision, odd_display_lines, horizontal_filter_proc); } } // Loop until all of the middle rows have been processed for (;;) { int work_index; int row; // Wait for one row from each channel to process error = PoolThreadWaitForWork(&decoder->worker_thread.pool, &work_index, thread_index); // Is there another row to process? if (error == THREAD_ERROR_OKAY) { int pitch = output_pitch; // Compute the next row to process from the work index row = work_index + 1; if(decoder->channel_decodes > 1 && decoder->frame.format == DECODED_FORMAT_YUYV) // 3d work if(decoder->channel_blend_type == BLEND_STACKED_ANAMORPHIC) // stacked pitch >>= 1; // Compute the output row corresponding to this row index output_row_ptr = output_buffer + row * 2 * pitch; } else { // No more work to do return; } // Is the row inside the top and bottom border? if (0 < row && row < last_line) { int outputlines = 2; #if (0 && DEBUG) if (logfile) { fprintf(logfile, "Thread: %d, processing row: %d\n", thread_index, row); } #endif if(odd_display_lines && row==last_line-1) { outputlines = 1; } // Process the middle row using the normal wavelet filters InvertSpatialMiddleRow16sToOutput(decoder, thread_index, lowlow_band, lowlow_pitch, lowhigh_band, lowhigh_pitch, highlow_band, highlow_pitch, highhigh_band, highhigh_pitch, output_row_ptr, output_pitch, output_width, info->format, info->colorspace, row, channel_width, (PIXEL *)buffer, buffer_size, precision, horizontal_filter_proc, outputlines); } } } #endif //_THREADED bool GetTuplet(unsigned char *data, int datasize, unsigned short findtag, unsigned short *retvalue) { bool ret = false; BITSTREAM myinput, *pinput; TAGVALUE segment; TAGWORD tag,value; int error = 0; //char t[100]; InitBitstream(&myinput); myinput.lpCurrentWord = data; myinput.nWordsUsed = datasize; pinput = &myinput; do { bool optional = false; int chunksize = 0; // Read the next tag value pair from the bitstream segment = GetSegment(pinput); tag = segment.tuple.tag; value = segment.tuple.value; // Is this an optional tag? if (tag < 0) { tag = NEG(tag); optional = true; } if(tag & 0x2000) { chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); } else if(tag & 0x4000) { chunksize = value; chunksize &= 0xffff; } else if(tag == CODEC_TAG_INDEX) { chunksize = value; chunksize &= 0xffff; } else { chunksize = 0; } if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000) { int skip = 1; error = 0; if(tag == (int)findtag) { *retvalue = value; ret = true; break; } if((tag & 0xff00) == 0x2200) //sample size { chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only. skip = 0; } if((tag & 0xff00) == 0x2300) //uncompressed sample size { skip = 1; } if((tag & 0xff00) == 0x2100) //level skip = 0; if(chunksize) { if(chunksize*4 > pinput->nWordsUsed || chunksize < 0) { break; } if(skip) { //unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord; pinput->lpCurrentWord += chunksize*4; pinput->nWordsUsed -= chunksize*4; } } } else { error = 1; } } while(tag != CODEC_TAG_GROUP_TRAILER && tag != CODEC_TAG_FRAME_TRAILER && pinput->nWordsUsed>0 && !error); return ret; } /*! Copied from metadata.cpp in the cedoc common directory */ uint8_t *GetTupletAddr(uint8_t *data, int datasize, uint16_t findtag, int16_t *retvalue) { unsigned char *ret = NULL; BITSTREAM myinput, *pinput; TAGVALUE segment; TAGWORD tag,value; int error = 0; if (data == NULL || datasize == 0) { return NULL; } //InitBitstream(&myinput); memset(&myinput, 0, sizeof(BITSTREAM)); myinput.lpCurrentWord = data; myinput.nWordsUsed = datasize; myinput.nBitsFree = BITSTREAM_LONG_SIZE; pinput = &myinput; do { //BOOL optional = FALSE; bool optional = false; int chunksize = 0; // Read the next tag value pair from the bitstream segment = GetSegment(pinput); tag = segment.tuple.tag; value = segment.tuple.value; // Is this an optional tag? if (tag < 0) { tag = NEG(tag); //optional = TRUE; optional = true; } if(tag & 0x2000) { chunksize = value; chunksize &= 0xffff; chunksize += ((tag&0xff)<<16); } else if(tag & 0x4000) { chunksize = value; chunksize &= 0xffff; } else if(tag == CODEC_TAG_INDEX) { chunksize = value; chunksize &= 0xffff; } else { chunksize = 0; } if((int)(tag) <= ((int)CODEC_TAG_LAST_NON_SIZED) || tag & 0x6000) { int skip = 1; error = 0; if(tag == (int)findtag) { *retvalue = value; ret = pinput->lpCurrentWord; break; } if((tag & 0xff00) == 0x2200) //sample size { chunksize = 0; // don't test against pinput->nWordsUsed, as we might be only reader enough for metadata only. skip = 0; } if((tag & 0xff00) == 0x2300) //uncompressed sample size { skip = 1; } if((tag & 0xff00) == 0x2100) //level skip = 0; if(chunksize) { if(chunksize*4 > pinput->nWordsUsed || chunksize < 0) { break; } if(skip) { //unsigned int *iptr = (unsigned int *)pinput->lpCurrentWord; pinput->lpCurrentWord += chunksize*4; pinput->nWordsUsed -= chunksize*4; } } } else { error = 1; } } while(tag != CODEC_TAG_GROUP_TRAILER && tag != CODEC_TAG_FRAME_TRAILER && pinput->nWordsUsed>0 && !error); return ret; }
MergePreparator.h
// // Created by kilian on 10/03/17. // #ifndef STERMPARSER_MERGEPREPARATOR_H #define STERMPARSER_MERGEPREPARATOR_H #include <memory> #include "GrammarInfo.h" #include "LatentAnnotation.h" #include "TrainingCommon.h" #include <numeric> #include <omp.h> namespace Trainer { typedef std::function<double(const::std::vector<double>&)> ThresholdFunction; class MergePreparator { protected: std::shared_ptr<const GrammarInfo2> grammarInfo; const bool debug; /** * Builds MergeInfo according to merge-Δs and threshold. * If (merge-Δ > 1 or the split of a start symbol is concerned) * then the split is always merged. */ MergeInfo build_merge_info( const std::vector<std::vector<double>> &&merge_factors , const double merge_threshold , const std::vector<std::vector<double>> &&merge_delta , const std::vector<size_t> &nontSplits ) { std::vector<std::vector<std::vector<size_t>>> mergeSelection; std::vector<size_t> nontSplitsAfterMerge; unsigned nont = 0; unsigned merges = 0; unsigned splits = 0; if (debug) std::cerr << "merge deltas: "; for (const auto &delta : merge_delta) { if (debug) std::cerr << " { "; mergeSelection.push_back(std::vector<std::vector<size_t >>()); const size_t halfSplits = nontSplits[nont] / 2; for (size_t split = 0; split < halfSplits; ++split) { if (debug) std::cerr << delta[split] << " "; // merge if Δ >= merge_thershold * 0.999, i.e. log(Δ) >= log(θ) + log(0.999) (logarithmic) if (delta[split] >= merge_threshold + std::log(0.999) // always merge if Δ >= 1 // i.e. log(Δ) >= 0 + log(0.999) || delta[split] >= std::log(0.999) // always merge initial symbol || grammarInfo->start == nont) { mergeSelection.back().emplace_back(); mergeSelection.back().back().push_back(split); mergeSelection.back().back().push_back(split + halfSplits); ++merges; } else { mergeSelection.back().emplace_back(1, split); mergeSelection.back().emplace_back(1, split + halfSplits); ++splits; } } if (debug) std::cerr << " } "; ++nont; nontSplitsAfterMerge.push_back(mergeSelection.back().size()); } if (debug) std::cerr << std::endl; std::cerr << "Merging " << merges << " of " << merges + splits << " splits. Merge threshold is " << merge_threshold << std::endl; return MergeInfo(std::move(mergeSelection), std::move(nontSplitsAfterMerge), std::move(merge_factors)); } public: MergePreparator(std::shared_ptr<const GrammarInfo2> grammarInfo, bool debug = false) : grammarInfo(grammarInfo), debug(debug) {} virtual MergeInfo merge_prepare(const LatentAnnotation &latentAnnotation) = 0; virtual void setMergeThresholdFunction(ThresholdFunction /*thresholdFunction*/) {}; }; /** * Merges none of the splits, except for start symbol whose splits are always merged. */ class MergeNothingMergePreparator : public MergePreparator { public: MergeNothingMergePreparator(std::shared_ptr<const GrammarInfo2> grammarInfo, bool debug = false) : MergePreparator(grammarInfo, debug) {}; MergeInfo merge_prepare(const LatentAnnotation &latentAnnotation) { std::vector<std::vector<double>> mergeFactors; std::vector<std::vector<double>> mergeDelta; for (auto splits : latentAnnotation.nonterminalSplits) { mergeFactors.emplace_back(splits, 0.5); mergeDelta.emplace_back(splits / 2, std::log(0.4)); } double merge_threshold = std::log(0.5); return build_merge_info( std::move(mergeFactors) , merge_threshold , std::move(mergeDelta) , latentAnnotation.nonterminalSplits ); } }; template<typename Nonterminal, typename TraceID> class DefaultMergePreparator : public MergePreparator { protected: using TraceIterator = ConstManagerIterator<Trace < Nonterminal, TraceID>>; const TraceManagerPtr <Nonterminal, EdgeLabelT> traceManager; std::shared_ptr<StorageManager> storageManager; const unsigned threads; std::vector<MAPTYPE<Element<Node<Nonterminal>>, WeightVector>> tracesInsideWeights; std::vector<MAPTYPE<Element<Node<Nonterminal>>, WeightVector>> tracesOutsideWeights; public: DefaultMergePreparator( TraceManagerPtr <Nonterminal, EdgeLabelT> traceManager , std::shared_ptr<StorageManager> storageManager , std::shared_ptr<const GrammarInfo2> grammarInfo , unsigned threads = 1 , bool debug = false ) : MergePreparator(grammarInfo, debug), traceManager(traceManager), storageManager(storageManager), threads(threads) {} virtual MergeInfo merge_prepare(const LatentAnnotation &latentAnnotation) { // setup temporary data structures if (tracesInsideWeights.size() < traceManager->size()) tracesInsideWeights.resize(traceManager->size()); if (tracesOutsideWeights.size() < traceManager->size()) tracesOutsideWeights.resize(traceManager->size()); std::vector<WeightVector> nonterminalFrequencies{estimateNontFreqLA(latentAnnotation)}; std::vector<std::vector<double>> mergeFactors{computeMergeFactors(nonterminalFrequencies)}; std::vector<std::vector<double>> mergeDelta; for (auto split : latentAnnotation.nonterminalSplits) { mergeDelta.emplace_back(split / 2, std::log(1.0)); } computeMergeDeltas( mergeFactors , latentAnnotation.nonterminalSplits , mergeDelta ); const double merge_threshold = computeMergeThreshold(mergeDelta); // clean up storageManager->free_weight_maps(tracesInsideWeights); storageManager->free_weight_maps(tracesOutsideWeights); for (WeightVector &weightVector : nonterminalFrequencies) { storageManager->free_weight_vector(weightVector); } nonterminalFrequencies.clear(); return build_merge_info( std::move(mergeFactors) , merge_threshold , std::move(mergeDelta) , latentAnnotation.nonterminalSplits ); } protected: /** * What this function computes corresponds to the mergeWeights of the Berkeley parser. * @param latentAnnotation * @return */ inline std::vector<WeightVector> estimateNontFreqLA(const LatentAnnotation &latentAnnotation) { struct NontFreq { std::shared_ptr<StorageManager> storageManager; std::vector<WeightVector> nonterminalFrequencies; NontFreq( std::shared_ptr<StorageManager> storageManager , std::vector<WeightVector> &&nonterminalFrequencies ) : storageManager(storageManager), nonterminalFrequencies(nonterminalFrequencies) {}; NontFreq(const NontFreq &other) : storageManager(other.storageManager) { for (const WeightVector &vector : other.nonterminalFrequencies) { nonterminalFrequencies.push_back(storageManager->create_weight_vector<WeightVector>(vector.size())); nonterminalFrequencies.back() = vector; } } NontFreq &operator+=(const NontFreq &other) { std::transform( other.nonterminalFrequencies.cbegin() , other.nonterminalFrequencies.cend() , nonterminalFrequencies.begin() , nonterminalFrequencies.begin() , [](const WeightVector &x, const WeightVector &y) { return x + y; } ); return *this; } }; NontFreq nonterminalFrequencies(storageManager, initialize_nonterminal_frequencies(latentAnnotation)); // computing in(A_x) * out(A_x) for every A ∈ N and x ∈ X_A #ifdef _OPENMP omp_set_num_threads(threads); #endif #pragma omp declare reduction(+ : NontFreq : omp_out += omp_in) initializer (omp_priv = omp_orig) #pragma omp parallel for schedule(dynamic, 10) reduction(+:nonterminalFrequencies) for (TraceIterator traceIterator = traceManager->cbegin(); traceIterator < traceManager->cend(); ++traceIterator) { if (tracesInsideWeights[traceIterator - traceManager->cbegin()].size() != traceIterator->get_hypergraph()->size() or tracesOutsideWeights[traceIterator - traceManager->cbegin()].size() != traceIterator->get_hypergraph()->size()) { tracesInsideWeights[traceIterator - traceManager->cbegin()].clear(); tracesOutsideWeights[traceIterator - traceManager->cbegin()].clear(); for (const auto &node : *(traceIterator->get_hypergraph())) { tracesInsideWeights[traceIterator - traceManager->cbegin()].emplace( node , storageManager->create_weight_vector<WeightVector>(latentAnnotation.nonterminalSplits[node->get_label_id()])); tracesOutsideWeights[traceIterator - traceManager->cbegin()].emplace( node , storageManager->create_weight_vector<WeightVector>(latentAnnotation.nonterminalSplits[node->get_label_id()])); } } traceIterator->io_weights_la( latentAnnotation , tracesInsideWeights[traceIterator - traceManager->cbegin()] , tracesOutsideWeights[traceIterator - traceManager->cbegin()] , true ); const auto &insideWeights = tracesInsideWeights[traceIterator - traceManager->cbegin()]; const auto &outsideWeights = tracesOutsideWeights[traceIterator - traceManager->cbegin()]; for (const Element<Node<Nonterminal>> &node : *(traceIterator->get_hypergraph())) { const auto &insideWeight = insideWeights.at(node); const auto &outsideWeight = outsideWeights.at(node); const auto vals = insideWeight * outsideWeight; Eigen::Tensor<double, 0> denominator = vals.sum(); Eigen::Tensor<double, 1> fraction = vals.unaryExpr([denominator](double x) { return x / denominator(0); }); Eigen::Tensor<bool, 0> nan = fraction.isnan().any(); Eigen::Tensor<bool, 0> inf = fraction.isinf().any(); if (not nan(0) and not inf(0)) { auto &target = nonterminalFrequencies.nonterminalFrequencies[node->get_label_id()]; target += fraction * traceIterator->get_frequency(); } } } return nonterminalFrequencies.nonterminalFrequencies; } inline std::vector<WeightVector> initialize_nonterminal_frequencies(const LatentAnnotation &latentAnnotation) { std::vector<WeightVector> nonterminalFrequencies; for (size_t nont = 0; nont < latentAnnotation.nonterminalSplits.size(); ++nont) { WeightVector mw = storageManager->create_weight_vector<WeightVector>(latentAnnotation.nonterminalSplits[nont]); mw.setZero(); nonterminalFrequencies.push_back(mw); } return nonterminalFrequencies; } /** * @param nontFreqLA (== mergeWeight in Berkeley parser) * @return the p from the Berkeley parser */ inline std::vector<std::vector<double>> computeMergeFactors(const std::vector<WeightVector> &nontFreqLA) { std::cerr << "Computing merge factors." << std::endl; std::vector<std::vector<double>> p; for (auto las_weights : nontFreqLA) { p.emplace_back(std::vector<double>(las_weights.dimension(0))); const long int half_splits{las_weights.dimension(0) / 2}; for (unsigned i = 0; i < half_splits; ++i) { double combined_weight = las_weights(i) + las_weights(i + half_splits); if ((not std::isnan(combined_weight)) and combined_weight > 0) { p.back()[i] = las_weights(i) / combined_weight; p.back()[i + half_splits] = las_weights(i + half_splits) / combined_weight; } else { p.back()[i] = 0.5; p.back()[i + half_splits] = 0.5; } } } return p; } /** * Compute merge-Δ for each split. This is an approximation of likelihood after merge * divided by likelihood before merge. * Splits with high merge-Δ should be merged, splits with low merge-Δ should be kept. */ inline void computeMergeDeltas( const std::vector<std::vector<double>> &p , const std::vector<size_t> &nontDimensions , std::vector<std::vector<double>> &mergeDelta ) const { // prefix and postfix sums are used for efficient computation of // s(i) = sum_{j ∈ {0, …, i-1, i+1, …, n-1}} a_j // for each i ∈ {0, …, n-1} std::vector<double> prefixSums; std::vector<double> postfixSums; for (TraceIterator trace_id = traceManager->cbegin(); trace_id < traceManager->cend(); ++trace_id) { const MAPTYPE<Element<Node<Nonterminal>>, WeightVector> &insideWeights = tracesInsideWeights[ trace_id - traceManager->cbegin()]; const MAPTYPE<Element<Node<Nonterminal>>, WeightVector> &outsideWeights = tracesOutsideWeights[ trace_id - traceManager->cbegin()]; for (const Element<Node<Nonterminal>> &node : *(trace_id->get_hypergraph())) { const size_t nont = node->get_label_id(); const size_t nontDim = nontDimensions[nont]; const size_t halfDim = nontDim / 2; const auto &insideWeight = insideWeights.at(node); const auto &outsideWeight = outsideWeights.at(node); prefixSums.resize(halfDim, 0.0); postfixSums.resize(halfDim, 0.0); double denominator = 0; { const size_t idx = halfDim - 1; const double in1 = insideWeight(idx); const double in2 = insideWeight(idx + halfDim); const double out1 = outsideWeight(idx); const double out2 = outsideWeight(idx + halfDim); denominator += in1 * out1 + in2 * out2; } for (size_t idx = 0; idx < halfDim - 1; ++idx) { const double in1 = insideWeight(idx); const double in2 = insideWeight(idx + halfDim); const double out1 = outsideWeight(idx); const double out2 = outsideWeight(idx + halfDim); prefixSums[idx + 1] = prefixSums[idx] + in1 * out1 + in2 * out2; denominator += in1 * out1 + in2 * out2; } for (size_t idx = halfDim - 1; idx > 0; --idx) { const double in1 = insideWeight(idx); const double in2 = insideWeight(idx + halfDim); const double out1 = outsideWeight(idx); const double out2 = outsideWeight(idx + halfDim); postfixSums[idx - 1] = postfixSums[idx] + in1 * out1 + in2 * out2; } // inside weight of some nodes can be zero in certain LA-dimensions // since LA-rule weights may converge to zero // we ignore those dimensions in Δ computation if (denominator == 0) continue; for (unsigned idx = 0; idx < halfDim; ++idx) { const double in1 = insideWeight(idx); const double in2 = insideWeight(idx + halfDim); const double out1 = outsideWeight(idx); const double out2 = outsideWeight(idx + halfDim); const double p1 = p[nont][idx]; const double p2 = p[nont][idx + halfDim]; const double inMerged = (p1 * in1) + (p2 * in2); const double outMerged = out1 + out2; const double Q = (prefixSums[idx] + postfixSums[idx] + inMerged * outMerged) / denominator; if (std::isnan(Q)) { std::cerr << "bad fraction " << Q << " where" << std::endl; std::cerr << "prefix " << prefixSums[idx] << std::endl; std::cerr << "postfix " << postfixSums[idx] << std::endl; std::cerr << "merged " << inMerged * outMerged << std::endl; std::cerr << "denom " << denominator << std::endl; assert(!std::isnan(Q)); } double &delta = mergeDelta[nont][idx]; delta += std::log(Q); } prefixSums.clear(); postfixSums.clear(); } } } virtual double computeMergeThreshold(const std::vector<std::vector<double>> &mergeDelta) = 0; }; /** * Merge all splits, where merge-Δ is above given threshold. */ template<typename Nonterminal, typename TraceID> class ThresholdMergePreparator : public DefaultMergePreparator<Nonterminal, TraceID> { const double merge_threshold; public: ThresholdMergePreparator( TraceManagerPtr <Nonterminal, TraceID> traceManager , std::shared_ptr<StorageManager> storageManager , std::shared_ptr<const GrammarInfo2> grammarInfo , double merge_threshold , unsigned threads = 1 , bool debug = false ) : DefaultMergePreparator<Nonterminal, TraceID>( traceManager , storageManager , grammarInfo , threads , debug ), merge_threshold(merge_threshold) {} protected: double computeMergeThreshold(const std::vector<std::vector<double>> &merge_delta) { std::cerr << "Selecting merges "; std::cerr << "above threshold " << merge_threshold; std::cerr << std::endl; return merge_threshold; } }; /** * Merges the first mergePercent % of splits ordered by merge-Δ in descending order. */ template<typename Nonterminal, typename TraceID> class PercentMergePreparator : public DefaultMergePreparator<Nonterminal, TraceID> { const double mergePercent; public: PercentMergePreparator( TraceManagerPtr <Nonterminal, TraceID> traceManager , std::shared_ptr<StorageManager> storageManager , std::shared_ptr<const GrammarInfo2> grammarInfo , double mergePercent , unsigned threads = 1 , bool debug = false ) : DefaultMergePreparator<Nonterminal, TraceID>(traceManager, storageManager, grammarInfo, threads, debug), mergePercent(mergePercent) {} protected: double computeMergeThreshold(const std::vector<std::vector<double>> &mergeDelta) { std::cerr << "Selecting merges "; std::cerr << "best " << mergePercent << " % "; std::cerr << std::endl; std::vector<double> orderedMergeDeltas; // order merges according to likelihood_loss for (const auto &delta : mergeDelta) { orderedMergeDeltas.insert( std::end(orderedMergeDeltas) , std::begin(delta) , std::end(delta)); } std::sort(std::begin(orderedMergeDeltas), std::end(orderedMergeDeltas), std::greater<double>()); std::cerr << "ordered merge Δs: "; for (auto weight : orderedMergeDeltas) std::cerr << weight << " "; std::cerr << std::endl; // todo: option to skip over merge_weights >= 1 size_t index = (size_t) (mergePercent / 100.0 * orderedMergeDeltas.size()); if (index > orderedMergeDeltas.size()) index = orderedMergeDeltas.size() - 1; std::cerr << "index for ordered merges " << index << " / " << orderedMergeDeltas.size() << std::endl; return orderedMergeDeltas[index]; } }; /** * Merges nonterminals according to the principle stated in www.aclweb.org/anthology/E14-1015 * * Merge-Δs are computed for each pair {i,j} of latent annotations of some nonterminal. * Then a fully connected, undirected graph G with latent annotations as nodes is constructed. * Each edge {i,j} is weighted by w=Δ({i,j}) and edges with w <= threshold are removed. * The (strongly) connected components of G are the new latent annotations. * Merge weights are chosen proportional to the expected frequency of the annotations. * * @tparam Nonterminal * @tparam TraceID */ template <typename Nonterminal, typename TraceID> class SCCMerger : public DefaultMergePreparator<Nonterminal, TraceID> { std::vector<size_t> relevantNonterminals; ThresholdFunction thresholdFunction; public: SCCMerger( TraceManagerPtr <Nonterminal, TraceID> traceManager , std::shared_ptr<StorageManager> storageManager , std::shared_ptr<const GrammarInfo2> grammarInfo , std::vector<size_t> relevantNonterminals , ThresholdFunction thresholdFunction , unsigned threads = 1 , bool debug = false ) : DefaultMergePreparator<Nonterminal, TraceID> ( traceManager , storageManager , grammarInfo , threads , debug ), relevantNonterminals(relevantNonterminals), thresholdFunction(thresholdFunction) {}; MergeInfo merge_prepare(const LatentAnnotation &latentAnnotation) { // setup temporary data structures if (this->tracesInsideWeights.size() < this->traceManager->size()) this->tracesInsideWeights.resize(this->traceManager->size()); if (this->tracesOutsideWeights.size() < this->traceManager->size()) this->tracesOutsideWeights.resize(this->traceManager->size()); std::vector<WeightVector> nonterminalFrequencies{this->estimateNontFreqLA(latentAnnotation)}; // computing Δ per nont and pair of LAs j and i (where j > i) std::vector<std::vector<std::vector<double>>> merge_delta; computePairwiseMergeDeltas(nonterminalFrequencies, latentAnnotation.nonterminalSplits, merge_delta); auto stats = mergeWeightStatistics(merge_delta); const double merge_threshold = thresholdFunction(stats); std::cerr << "SCC merging with threshold: " << merge_threshold << std::endl; // ingredients for the MergeInfo std::vector<std::vector<std::vector<size_t>>> mergeSources; std::vector<size_t> nontSplitsAfterMerge; std::vector<std::vector<double>> mergeFactors; for (size_t nont = 0; nont < latentAnnotation.nonterminalSplits.size(); ++nont) { // check if nont ∈ relevantNonterminals bool relevant = false; for (size_t nont2 : relevantNonterminals) { if (nont2 == nont) relevant = true; if (nont2 >= nont) break; } if (relevant) { // lazily build graph by pairwise connecting all LAs of nont (implicit) // we only add an edge to the representation, if it is not removed in the next step // the graph is represented by two maps encoding maximal SCCs, // satisfying // 1. j ∈ edges[i] if i < j and (i,j) are connected in graph // 2. inSCC[i] = i or i ∈ edges[inSCC[i]] MAPTYPE<size_t, std::vector<size_t >> edges; MAPTYPE<size_t, size_t> inSCC; // determine weight Δ for each edge (i,j) in graph and remove edge if Δ <= threshold // i.e., we add i and j to the same SCC if Δ > threshold for (size_t i = 0; i < latentAnnotation.nonterminalSplits[nont]; ++i) { for (size_t j = i + 1; j < latentAnnotation.nonterminalSplits[nont]; ++j) { if (merge_delta[nont][j][i] > merge_threshold) { if (not(inSCC.count(i) or inSCC.count(j))) { edges[i].push_back(j); inSCC[i] = i; inSCC[j] = i; } else if (not inSCC.count(j)) { inSCC[j] = inSCC[i]; edges[inSCC[i]].push_back(j); } else if (not inSCC.count(i)) { inSCC[i] = inSCC[j]; edges[inSCC[j]].push_back(i); } else { if (inSCC[i] == inSCC[j]) { // nothing needs to be done! } else if (inSCC[i] < inSCC[j]) { const size_t old_scc_j = inSCC[j]; for (size_t k : edges[old_scc_j]) { edges[inSCC[i]].push_back(k); inSCC[k] = inSCC[i]; } edges[inSCC[i]].push_back(old_scc_j); inSCC[old_scc_j] = inSCC[i]; edges.erase(old_scc_j); } else { const size_t old_scc_i = inSCC[i]; for (size_t k : edges[old_scc_i]) { edges[inSCC[j]].push_back(k); inSCC[k] = inSCC[j]; } edges[inSCC[j]].push_back(old_scc_i); inSCC[old_scc_i] = inSCC[j]; edges.erase(old_scc_i); } } } } } // new LAs = maximal SCCs and // set mergeFactor proportional to nontFreq std::vector<std::vector<size_t>> mergeLists; std::vector<double> laMergeFactors(latentAnnotation.nonterminalSplits[nont]); size_t merged_splits = 0; for (auto key_value_pair : edges) { if (inSCC[key_value_pair.first] != key_value_pair.first) continue; mergeLists.push_back(key_value_pair.second); mergeLists.back().push_back(key_value_pair.first); std::sort(mergeLists.back().begin(), mergeLists.back().end()); merged_splits += mergeLists.back().size(); double normalizer = 0.0; for (auto la : mergeLists.back()) normalizer += nonterminalFrequencies[nont](la); if (normalizer > 0 and not std::isnan(normalizer) and not std::isnan(normalizer)) for (auto la : mergeLists.back()) { /* for debugging if (nont == 179) std::cerr << nont << " la: " << la << " freq: " << nonterminalFrequencies[nont](la) << " n: " << normalizer << std::endl; */ laMergeFactors[la] = nonterminalFrequencies[nont](la) / normalizer; } else for (auto la : mergeLists.back()) { laMergeFactors[la] = 1 / mergeLists.back().size(); } } // add all singletons for (size_t la = 0; la < latentAnnotation.nonterminalSplits[nont]; ++la) { if (not inSCC.count(la)) { mergeLists.emplace_back(1, la); laMergeFactors[la] = 1.0; ++merged_splits; } } /*// for debugging for (size_t i = 0; i < mergeLists.size(); ++i) { std::cerr << nont << ": " << i << " [ "; for (auto elem : mergeLists[i]) std::cerr << elem << ", "; std::cerr << "]" << std::endl; } */ if (merged_splits != latentAnnotation.nonterminalSplits[nont]) { for (size_t la = 0; la < latentAnnotation.nonterminalSplits[nont]; ++la) { if (inSCC.count(la)) std::cerr << nont << "-" << la << " is in SCC " << inSCC[la] << std::endl; else std::cerr << nont << "-" << la << " is not in any SCC" << std::endl; if (edges.count(la)) { std::cerr << nont << "-" << la << " has edges to "; for (auto e : edges[la]) std::cerr << e << " "; std::cerr << std::endl; } else std::cerr << nont << "-" << la << " has no edges" << std::endl; } abort(); } nontSplitsAfterMerge.push_back(mergeLists.size()); mergeSources.push_back(mergeLists); mergeFactors.push_back(laMergeFactors); // if nont not in relevant items } else { size_t n = latentAnnotation.nonterminalSplits.at(nont); nontSplitsAfterMerge.push_back(n); mergeFactors.emplace_back(n, 1.0); std::vector<std::vector<size_t>> mergeLists; for (size_t la = 0; la < n; ++la) { mergeLists.emplace_back(1, la); } mergeSources.push_back(mergeLists); /*// for debugging for (size_t i = 0; i < mergeLists.size(); ++i) { std::cerr << nont << ": " << i << " [ "; for (auto elem : mergeLists[i]) std::cerr << elem << ", "; std::cerr << "]" << std::endl; } */ } } // clean up this->storageManager->free_weight_maps(this->tracesInsideWeights); this->storageManager->free_weight_maps(this->tracesOutsideWeights); for (WeightVector &weightVector : nonterminalFrequencies) { this->storageManager->free_weight_vector(weightVector); } nonterminalFrequencies.clear(); return MergeInfo(mergeSources, nontSplitsAfterMerge, mergeFactors); } void setMergeThresholdFunction(ThresholdFunction thresholdFunction) { this->thresholdFunction = thresholdFunction; } private: /** * Compute merge-Δ for each pair of latent annotation. This is an approximation of likelihood after merge * divided by likelihood before merge. * Splits with high merge-Δ should be merged, splits with low merge-Δ should be kept. */ inline void computePairwiseMergeDeltas( const std::vector<WeightVector> & expectedFrequencies , const std::vector<size_t> &nontDimensions , std::vector<std::vector<std::vector<double>>> &mergeDelta ) const { mergeDelta.clear(); for (size_t nont = 0; nont < nontDimensions.size(); ++nont){ mergeDelta.emplace_back(0); for (size_t j = 0; j < nontDimensions[nont]; ++ j) { mergeDelta.back().emplace_back(j, 0.0); } } for (typename DefaultMergePreparator<Nonterminal, TraceID>::TraceIterator trace_id = this->traceManager->cbegin() ; trace_id < this->traceManager->cend() ; ++trace_id) { const MAPTYPE<Element<Node<Nonterminal>>, WeightVector> &insideWeights = this->tracesInsideWeights[trace_id - this->traceManager->cbegin()]; const MAPTYPE<Element<Node<Nonterminal>>, WeightVector> &outsideWeights = this->tracesOutsideWeights[trace_id - this->traceManager->cbegin()]; for (const Element<Node<Nonterminal>> &node : *(trace_id->get_hypergraph())) { const size_t nont = node->get_label_id(); const size_t nontDim = nontDimensions[nont]; const auto &insideWeight = insideWeights.at(node); const auto &outsideWeight = outsideWeights.at(node); double denominator = 0.0; for (size_t i = 0; i < nontDim; ++i) { const double in = insideWeight(i); const double out = outsideWeight(i); denominator += in * out; } if ( denominator <= 0 or std::isinf(denominator) or std::isnan(denominator)) continue; double prefix_sum = 0; for (size_t i = 0; i < nontDim; ++i) { const double in1 = insideWeight(i); const double out1 = outsideWeight(i); double infix_sum = 0; for (size_t j = i + 1; j < nontDim; ++j) { const double in2 = insideWeight(j); const double out2 = outsideWeight(j); const double f_norm = expectedFrequencies[nont](i) + expectedFrequencies[nont](j); const double p1 = expectedFrequencies[nont](i) / f_norm; const double p2 = expectedFrequencies[nont](j) / f_norm; const double inMerged = (p1 * in1) + (p2 * in2); const double outMerged = out1 + out2; double postfix_sum = 0; for (size_t k = j + 1; k < nontDim; ++k) { postfix_sum += insideWeight(k) * outsideWeight(k); } const double others = prefix_sum + infix_sum + postfix_sum; const double Q = (others + inMerged * outMerged) / denominator; if (std::isnan(Q)) { std::cerr << "bad fraction " << Q << " where" << std::endl; std::cerr << "merged " << inMerged * outMerged << std::endl; std::cerr << "denom " << denominator << std::endl; assert(!std::isnan(Q)); } double &delta = mergeDelta[nont][j][i]; delta += std::log(Q); infix_sum += in2 * out2; } prefix_sum += in1 * out1; } } } // for (auto nont = 0; nont < nontDimensions.size(); ++nont) { // for (size_t j = 0; j < nontDimensions[nont]; ++j) // for (size_t i = 0; i < j; ++i) // std::cerr << "(" << nont << ": " << j << " vs. " << i << ": " << mergeDelta[nont][j][i] << ") "; // } // std::cerr << std::endl; } // not used in this class double computeMergeThreshold(const std::vector<std::vector<double>> &) { return 0.0; }; // compute merge Δ statistics std::vector<double> mergeWeightStatistics(const std::vector<std::vector<std::vector<double>>>& mergeDeltas) { double min {std::numeric_limits<double>::max()}; double max {std::numeric_limits<double>::min()}; double sum {0.0}; size_t count {0}; for (auto nont_vec : mergeDeltas) { for (auto la_1 : nont_vec){ for (auto la_1_2_delta : la_1) { if (la_1_2_delta > max) max = la_1_2_delta; if (la_1_2_delta < min) min = la_1_2_delta; sum += la_1_2_delta; count++; } } } const double mean {sum / count}; double above_mean_sum {0.0}; size_t above_mean_count {0}; double below_mean_sum {0.0}; size_t below_mean_count {0}; for (auto nont_vec : mergeDeltas) { for (auto la_1 : nont_vec){ for (auto la_1_2_delta : la_1) { if (la_1_2_delta > mean) { above_mean_sum += la_1_2_delta; above_mean_count++; } else if (la_1_2_delta < mean) { below_mean_sum += la_1_2_delta; below_mean_count++; } } } } const double third_quartile {above_mean_count > 0 ? above_mean_sum / above_mean_count : mean}; const double first_quartile {below_mean_count > 0 ? below_mean_sum / below_mean_count : mean}; std::cerr << "SCC merge Δ statistics {"; std::cerr << "min: " << min << " first quartile: " << first_quartile << " mean: " << mean << " third quartile: " << third_quartile << " max: " << max << " }" << std::endl; return {min, first_quartile, mean, third_quartile, max}; } }; } #endif //STERMPARSER_MERGEPREPARATOR_H
GB_unop__identity_fc32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_fp32) // op(A') function: GB (_unop_tran__identity_fc32_fp32) // C type: GxB_FC32_t // A type: float // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_fp32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
updatePCG.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void BPUpdatePCG(const dlong & N, const dlong & Nblocks, const dfloat * __restrict__ cpu_invDegree, const dfloat * __restrict__ cpu_p, const dfloat * __restrict__ cpu_Ap, const dfloat & alpha, dfloat * __restrict__ cpu_x, dfloat * __restrict__ cpu_r, dfloat * __restrict__ redr){ dfloat rdotr = 0; const dlong Nelements = N/p_Np; #pragma omp parallel for reduction(+: rdotr) for(dlong e=0;e<Nelements;++e){ for(int i=0;i<p_Np;++i){ const dlong n = e*p_Np+i; cpu_x[n] += alpha*cpu_p[n]; const dfloat rn = cpu_r[n] - alpha*cpu_Ap[n]; rdotr += rn*rn*cpu_invDegree[n]; cpu_r[n] = rn; } } redr[0] = rdotr; } extern "C" void BPMultipleUpdatePCG( const dlong & N, const dlong & offset, const dlong & Nblocks, const dfloat * __restrict__ cpu_invDegree, const dfloat * __restrict__ cpu_p, const dfloat * __restrict__ cpu_Ap, const dfloat alpha, dfloat * __restrict__ cpu_x, dfloat * __restrict__ cpu_r, dfloat * __restrict__ redr){ dfloat rdotr = 0; const dlong Nelements = N/p_Np; for(int fld=0; fld<p_Nfields; fld++){ #pragma omp parallel for reduction(+: rdotr) for(dlong e=0;e<Nelements;++e){ for(int i=0;i<p_Np;++i){ const dlong n = e*p_Np+i + fld*offset; cpu_x[n] += alpha*cpu_p[n]; const dfloat rn = cpu_r[n] - alpha*cpu_Ap[n]; rdotr += rn*rn*cpu_invDegree[e*p_Np+i]; cpu_r[n] = rn; } } } redr[0] = rdotr; }
tensor_cpu-inl.h
/*! * Copyright (c) 2014 by Contributors * \file tensor_cpu-inl.h * \brief implementation of CPU host code * \author Bing Xu, Tianqi Chen */ #ifndef MSHADOW_TENSOR_CPU_INL_H_ #define MSHADOW_TENSOR_CPU_INL_H_ #include <cstring> #include <functional> #include <utility> #include <vector> #include "./base.h" #include "./tensor.h" #include "./packet-inl.h" #include "./dot_engine-inl.h" namespace mshadow { template<> inline void InitTensorEngine<cpu>(int dev_id) { } template<> inline void ShutdownTensorEngine<cpu>(void) { } template<> inline void SetDevice<cpu>(int devid) { } template<> inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle, bool create_dnn_handle, int dev_id) { return new Stream<cpu>(); } template<> inline void DeleteStream<cpu>(Stream<cpu> *stream) { delete stream; } template<int ndim> inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) { // NOLINT(*) os << '('; for (int i = 0; i < ndim; ++i) { if (i != 0) os << ','; os << shape[i]; } // python style tuple if (ndim == 1) os << ','; os << ')'; return os; } template<typename xpu> inline void *AllocHost_(size_t size); template<typename xpu> inline void FreeHost_(void * dptr); #ifdef __CUDACC__ template<> inline void *AllocHost_<gpu>(size_t size) { void *dptr; MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable)); return dptr; } template<> inline void FreeHost_<gpu>(void *dptr) { MSHADOW_CUDA_CALL(cudaFreeHost(dptr)); } #endif template<> inline void *AllocHost_<cpu>(size_t size) { size_t pitch; return packet::AlignedMallocPitch(&pitch, size, 1); } template<> inline void FreeHost_<cpu>(void *dptr) { packet::AlignedFree(dptr); } template<typename xpu, int dim, typename DType> inline void AllocHost(Tensor<cpu, dim, DType> *obj) { obj->stride_ = obj->size(dim - 1); CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost"; void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType)); obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename xpu, int dim, typename DType> inline void FreeHost(Tensor<cpu, dim, DType> *obj) { if (obj->dptr_ == NULL) { LOG(FATAL) << "FreeHost:: double free"; } FreeHost_<xpu>(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) { size_t pitch; void *dptr; if (pad) { dptr = packet::AlignedMallocPitch (&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]); obj->stride_ = static_cast<index_t>(pitch / sizeof(DType)); } else { obj->stride_ = obj->size(dim - 1); dptr = packet::AlignedMallocPitch (&pitch, obj->shape_.Size() * sizeof(DType), 1); } obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename Device, typename DType, int dim> inline Tensor<Device, dim, DType> NewTensor(const Shape<dim> &shape, DType initv, bool pad, Stream<Device> *stream_) { Tensor<Device, dim, DType> obj(shape); obj.stream_ = stream_; AllocSpace(&obj, pad); MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv)); return obj; } template<int dim, typename DType> inline void FreeSpace(Tensor<cpu, dim, DType> *obj) { packet::AlignedFree(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void Copy(Tensor<cpu, dim, DType> _dst, const Tensor<cpu, dim, DType> &_src, Stream<cpu> *stream) { CHECK_EQ(_dst.shape_, _src.shape_) << "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_; if (_dst.CheckContiguous() && _src.CheckContiguous()) { memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size()); } else { Tensor<cpu, 2, DType> dst = _dst.FlatTo2D(); Tensor<cpu, 2, DType> src = _src.FlatTo2D(); for (index_t y = 0; y < dst.size(0); ++y) { memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1)); } } } template<typename Saver, typename R, int dim, typename DType, typename E> inline void MapPlan(TRValue<R, cpu, dim, DType> *dst, const expr::Plan<E, DType> &plan) { Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D(); expr::Plan<R, DType> dplan = expr::MakePlan(dst->self()); #if (MSHADOW_USE_CUDA == 0) #pragma omp parallel for #endif // temp remove openmp, as default setting throttles CPU for (openmp_index_t y = 0; y < shape[0]; ++y) { for (index_t x = 0; x < shape[1]; ++x) { // trust your compiler! -_- they will optimize it Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x)); } } } // code to handle SSE optimization template<bool pass_check, typename Saver, typename R, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine { inline static void Map(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { MapPlan<Saver>(dst, MakePlan(exp.self())); } }; template<typename SV, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>, dim, DType, E, etype> { inline static void Map(Tensor<cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(exp.self()) && expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>, MSHADOW_DEFAULT_PACKET>::Check(*dst)) { expr::MapPacketPlan<SV>(dst->self(), expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self())); } else { MapPlan<SV>(dst, MakePlan(exp.self())); } } }; template<typename Saver, typename R, int dim, typename DType, typename E, int etype> inline void MapExp(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass> ::Error_All_Tensor_in_Exp_Must_Have_Same_Type(); Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self()); Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self()); CHECK(eshape[0] == 0 || eshape == dshape) << "Assignment: Shape of Tensors are not consistent with target, " << "eshape: " << eshape << " dshape:" << dshape; MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass, Saver, R, dim, DType, E, etype> ::Map(dst->ptrself(), exp); } template<typename Saver, typename Reducer, typename R, typename DType, typename E, int etype> inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); Shape<2> eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()).FlatTo2D(); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[1], dshape[0]) << "MapReduceKeepLowest::reduction dimension do not match"; CHECK_NE(eshape[0], 0U) << "can not reduce over empty tensor"; // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #if (MSHADOW_USE_CUDA == 0) #pragma omp parallel for #endif for (openmp_index_t x = 0; x < eshape[1]; ++x) { DType res = splan.Eval(0, x); for (index_t y = 1; y < eshape[0]; ++y) { Reducer::Reduce(res, splan.Eval(y, x)); } Saver::template Save<DType>(dplan.REval(0, x), res * scale); } } template<typename Saver, typename Reducer, int dimkeep, typename R, typename DType, typename E, int etype> inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); typedef Shape<expr::ExpInfo<E>::kDim> EShape; EShape eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[dimkeep], dshape[0]) << "MapReduceKeepHighDim::reduction dimension do not match"; // use equvalent form Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep), eshape[dimkeep], eshape.ProdShape(dimkeep + 1, EShape::kSubdim), eshape[EShape::kSubdim]); // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #if (MSHADOW_USE_CUDA == 0) #pragma omp parallel for #endif for (openmp_index_t c = 0; c < pshape[1]; ++c) { DType res; Reducer::SetInitValue(res); for (index_t n = 0; n < pshape[0]; ++n) { DType tres; Reducer::SetInitValue(tres); for (index_t y = 0; y < pshape[2]; ++y) { for (index_t x = 0; x < pshape[3]; ++x) { Reducer::Reduce(tres, splan.Eval((n * pshape[1] + c) * pshape[2] + y, x)); } } Reducer::Reduce(res, tres); } Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale)); } } template<typename DType> inline void Softmax(Tensor<cpu, 1, DType> dst, const Tensor<cpu, 1, DType> &energy) { DType mmax = energy[0]; for (index_t x = 1; x < dst.size(0); ++x) { if (mmax < energy[x]) mmax = energy[x]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(0); ++x) { dst[x] = std::exp(energy[x] - mmax); sum += dst[x]; } for (index_t x = 0; x < dst.size(0); ++x) { dst[x] /= sum; } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (static_cast<int>(ignore_label) == k) { dst[y][x] = 0.0f; } else { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); if (k == static_cast<int>(ignore_label)) { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { dst[y][x][n] = DType(0.0f); } } else { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } } template<typename DType> inline void Softmax(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { Softmax(dst[y], energy[y]); } } template<typename DType> inline void Softmax(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { for (index_t n = 0; n < dst.size(2); ++n) { DType mmax = energy[y][0][n]; for (index_t x = 1; x < dst.size(1); ++x) { if (mmax < energy[y][x][n]) mmax = energy[y][x][n]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] = std::exp(energy[y][x][n] - mmax); sum += dst[y][x][n]; } for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] /= sum; } } } } template<typename IndexType, typename DType> inline void AddTakeGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { const int K = dst.shape_[0]; for (index_t y = 0; y < index.size(0); ++y) { int j = index[y]; if (j <= 0) j = 0; else if (j >= K) j = K - 1; dst[j] += src[y]; } } template<typename IndexType, typename DType> inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& sorted, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < sorted.size(0); ++y) { dst[sorted[y]] += src[index[y]]; } } template<typename IndexType, typename DType> inline void IndexFill(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < index.size(0); ++y) { for (index_t j = 0; j < src.size(1); j++) { dst[index[y]][j] = src[y][j]; } } } template<typename KDType, typename VDType> inline void SortByKey(Tensor<cpu, 1, KDType> keys, Tensor<cpu, 1, VDType> values, bool is_ascend) { CHECK_EQ(keys.CheckContiguous(), true); CHECK_EQ(values.CheckContiguous(), true); CHECK_EQ(keys.size(0), values.size(0)) << "The sizes of key/value are not equal! keys_size: " << keys.size(0) << "values_size: " << values.size(0); std::vector<size_t> idx(keys.size(0)); std::vector<KDType> keys_vec(keys.size(0)); std::vector<VDType> values_vec(values.size(0)); for (int i = 0; i < keys.size(0); i++) { idx[i] = i; keys_vec[i] = keys[i]; values_vec[i] = values[i]; } if (is_ascend) { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] < keys_vec[i2]; }); } else { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] > keys_vec[i2]; }); } for (index_t i = 0; i < values.size(0); i++) { keys[i] = keys_vec[idx[i]]; values[i] = values_vec[idx[i]]; } } template<typename Device, typename VDType, typename SDType> inline void VectorizedSort(Tensor<Device, 1, VDType> values, Tensor<Device, 1, SDType> segments) { // We can sort each segments using two stable sorts SortByKey(values, segments, true); SortByKey(segments, values, true); } // blas related template<typename Device, typename DType> inline void VectorDot(Tensor<Device, 1, DType> dst, const Tensor<Device, 1, DType> &lhs, const Tensor<Device, 1, DType> &rhs) { CHECK_EQ(lhs.size(0), rhs.size(0)) << "VectorDot: Shape mismatch"; CHECK_EQ(dst.size(0), 1U) << "VectorDot: expect dst to be scalar"; expr::BLASEngine<Device, DType>::SetStream(lhs.stream_); mshadow::expr::BLASEngine<Device, DType>::dot( lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_); } template<bool transpose_left, bool transpose_right, typename Device, typename DType> inline void BatchGEMM(Tensor<Device, 3, DType> dst, const Tensor<Device, 3, DType> &lhs, const Tensor<Device, 3, DType> &rhs, DType alpha, DType beta, Tensor<Device, 1, DType*> workspace) { index_t batch_size = dst.shape_[0]; expr::BLASEngine<Device, DType>::SetStream(dst.stream_); Shape<3> sleft = transpose_left ? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1]) : lhs.shape_; Shape<3> sright = transpose_right ? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1]) : rhs.shape_; CHECK_EQ(dst.CheckContiguous(), true); CHECK_EQ(lhs.CheckContiguous(), true); CHECK_EQ(rhs.CheckContiguous(), true); CHECK(sleft[0] == batch_size && sright[0] == batch_size) << "BatchGEMM: batchsize must be equal." << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] && sleft[2] == sright[1]) << "BatchGEMM: matrix shape mismatch" << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(workspace.size(0) >= 3 * batch_size) << "Workspace Size must be bigger than " << 3 * batch_size; CHECK_EQ(workspace.CheckContiguous(), true); // use column major argument to compatible with most BLAS expr::BLASEngine<Device, DType>::batched_gemm (dst.stream_, transpose_right, transpose_left, transpose_right ? rhs.size(1) : rhs.size(2), transpose_left ? lhs.size(2) : lhs.size(1), transpose_right ? rhs.size(2) : rhs.size(1), alpha, rhs.dptr_, rhs.stride_, lhs.dptr_, lhs.stride_, beta, dst.dptr_, dst.stride_, batch_size, workspace.dptr_); } } // namespace mshadow #endif // MSHADOW_TENSOR_CPU_INL_H_
effect.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE FFFFF FFFFF EEEEE CCCC TTTTT % % E F F E C T % % EEE FFF FFF EEE C T % % E F F E C T % % EEEEE F F EEEEE CCCC T % % % % % % MagickCore Image Effects Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright @ 1999 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/threshold.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveBlurImage() adaptively blurs the image by blurring less % intensely near image edges and more intensely far from edges. We blur the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveBlurImage() selects a suitable radius for you. % % The format of the AdaptiveBlurImage method is: % % Image *AdaptiveBlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveBlurImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *blur_view, *edge_view, *image_view; double normalize, **kernel; Image *blur_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t w, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(blur_image); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, blur, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width, sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (w=0; w < (ssize_t) width; w+=2) { ssize_t j, k, u, v; kernel[w]=(double *) MagickAssumeAligned(AcquireAlignedMemory( (size_t) (width-w),(width-w)*sizeof(**kernel))); if (kernel[w] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-w-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[w][k]=(double) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[w][k]; k++; } } kernel[w][(k-1)/2]+=(double) (1.0-normalize); if (sigma < MagickEpsilon) kernel[w][(k-1)/2]=1.0; } if (w < (ssize_t) width) { for (w-=2; w >= 0; w-=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { const Quantum *magick_restrict r; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { const Quantum *magick_restrict p; ssize_t i; ssize_t center, j; j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5)); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const double *magick_restrict k; const Quantum *magick_restrict pixels; ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(blur_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (w=0; w < (ssize_t) width; w+=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveSharpenImage() adaptively sharpens the image by sharpening more % intensely near image edges and less intensely far from edges. We sharpen the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and AdaptiveSharpenImage() selects a suitable radius for you. % % The format of the AdaptiveSharpenImage method is: % % Image *AdaptiveSharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveSharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define AdaptiveSharpenImageTag "Convolve/Image" #define MagickSigma (fabs(sigma) < MagickEpsilon ? MagickEpsilon : sigma) CacheView *sharp_view, *edge_view, *image_view; double normalize, **kernel; Image *sharp_image, *edge_image, *gaussian_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t w, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); sharp_image=CloneImage(image,0,0,MagickTrue,exception); if (sharp_image == (Image *) NULL) return((Image *) NULL); if (fabs(sigma) < MagickEpsilon) return(sharp_image); if (SetImageStorageClass(sharp_image,DirectClass,exception) == MagickFalse) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } /* Edge detect the image brightness channel, level, sharp, and level again. */ edge_image=EdgeImage(image,radius,exception); if (edge_image == (Image *) NULL) { sharp_image=DestroyImage(sharp_image); return((Image *) NULL); } (void) AutoLevelImage(edge_image,exception); gaussian_image=BlurImage(edge_image,radius,sigma,exception); if (gaussian_image != (Image *) NULL) { edge_image=DestroyImage(edge_image); edge_image=gaussian_image; } (void) AutoLevelImage(edge_image,exception); /* Create a set of kernels from maximum (radius,sigma) to minimum. */ width=GetOptimalKernelWidth2D(radius,sigma); kernel=(double **) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (double **) NULL) { edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(kernel,0,(size_t) width*sizeof(*kernel)); for (w=0; w < (ssize_t) width; w+=2) { ssize_t j, k, u, v; kernel[w]=(double *) MagickAssumeAligned(AcquireAlignedMemory((size_t) (width-w),(width-w)*sizeof(**kernel))); if (kernel[w] == (double *) NULL) break; normalize=0.0; j=(ssize_t) (width-w-1)/2; k=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel[w][k]=(double) (-exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel[w][k]; k++; } } kernel[w][(k-1)/2]=(double) ((-2.0)*normalize); if (sigma < MagickEpsilon) kernel[w][(k-1)/2]=1.0; } if (w < (ssize_t) width) { for (w-=2; w >= 0; w-=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); edge_image=DestroyImage(edge_image); sharp_image=DestroyImage(sharp_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Adaptively sharpen image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); edge_view=AcquireVirtualCacheView(edge_image,exception); sharp_view=AcquireAuthenticCacheView(sharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,sharp_image,sharp_image->rows,1) #endif for (y=0; y < (ssize_t) sharp_image->rows; y++) { const Quantum *magick_restrict r; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; r=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(sharp_view,0,y,sharp_image->columns,1, exception); if ((r == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) sharp_image->columns; x++) { const Quantum *magick_restrict p; ssize_t i; ssize_t center, j; j=CastDoubleToLong(ceil((double) width*(1.0-QuantumScale* GetPixelIntensity(edge_image,r))-0.5)); if (j < 0) j=0; else if (j > (ssize_t) width) j=(ssize_t) width; if ((j & 0x01) != 0) j--; p=GetCacheViewVirtualPixels(image_view,x-((ssize_t) (width-j)/2L),y- (ssize_t) ((width-j)/2L),width-j,width-j,exception); if (p == (const Quantum *) NULL) break; center=(ssize_t) GetPixelChannels(image)*(width-j)*((width-j)/2L)+ GetPixelChannels(image)*((width-j)/2); for (i=0; i < (ssize_t) GetPixelChannels(sharp_image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait sharp_traits, traits; const double *magick_restrict k; const Quantum *magick_restrict pixels; ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); sharp_traits=GetPixelChannelTraits(sharp_image,channel); if ((traits == UndefinedPixelTrait) || (sharp_traits == UndefinedPixelTrait)) continue; if ((sharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(sharp_image,channel,p[center+i],q); continue; } k=kernel[j]; pixels=p; pixel=0.0; gamma=0.0; if ((sharp_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { pixel+=(*k)*pixels[i]; gamma+=(*k); k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) (width-j); v++) { for (u=0; u < (ssize_t) (width-j); u++) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; k++; pixels+=GetPixelChannels(image); } } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(sharp_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(sharp_image); r+=GetPixelChannels(edge_image); } if (SyncCacheViewAuthenticPixels(sharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,AdaptiveSharpenImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sharp_image->type=image->type; sharp_view=DestroyCacheView(sharp_view); edge_view=DestroyCacheView(edge_view); image_view=DestroyCacheView(image_view); edge_image=DestroyImage(edge_image); for (w=0; w < (ssize_t) width; w+=2) kernel[w]=(double *) RelinquishAlignedMemory(kernel[w]); kernel=(double **) RelinquishAlignedMemory(kernel); if (status == MagickFalse) sharp_image=DestroyImage(sharp_image); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlurImage() blurs an image. We convolve the image with a Gaussian operator % of the given radius and standard deviation (sigma). For reasonable results, % the radius should be larger than sigma. Use a radius of 0 and BlurImage() % selects a suitable radius for you. % % The format of the BlurImage method is: % % Image *BlurImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *BlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateBlurImage(image,radius,sigma,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif (void) FormatLocaleString(geometry,MagickPathExtent, "blur:%.20gx%.20g;blur:%.20gx%.20g+90",radius,sigma,radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l a t e r a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilateralBlurImage() is a non-linear, edge-preserving, and noise-reducing % smoothing filter for images. It replaces the intensity of each pixel with % a weighted average of intensity values from nearby pixels. This weight is % based on a Gaussian distribution. The weights depend not only on Euclidean % distance of pixels, but also on the radiometric differences (e.g., range % differences, such as color intensity, depth distance, etc.). This preserves % sharp edges. % % The format of the BilateralBlurImage method is: % % Image *BilateralBlurImage(const Image *image,const size_t width, % const size_t height,const double intensity_sigma, % const double spatial_sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the neighborhood in pixels. % % o height: the height of the neighborhood in pixels. % % o intensity_sigma: sigma in the intensity space. A larger value means % that farther colors within the pixel neighborhood (see spatial_sigma) % will be mixed together, resulting in larger areas of semi-equal color. % % o spatial_sigma: sigma in the coordinate space. A larger value means that % farther pixels influence each other as long as their colors are close % enough (see intensity_sigma ). When the neigborhood diameter is greater % than zero, it specifies the neighborhood size regardless of % spatial_sigma. Otherwise, the neigborhood diameter is proportional to % spatial_sigma. % % o exception: return any errors or warnings in this structure. % */ static inline double BlurDistance(const ssize_t x,const ssize_t y, const ssize_t u,const ssize_t v) { return(sqrt(((double) x-u)*((double) x-u)+((double) y-v)*((double) y-v))); } static inline double BlurGaussian(const double x,const double sigma) { return(exp(-((double) x*x)*PerceptibleReciprocal(2.0*sigma*sigma))* PerceptibleReciprocal(Magick2PI*sigma*sigma)); } static double **DestroyBilateralThreadSet(const ssize_t number_threads, double **weights) { ssize_t i; assert(weights != (double **) NULL); for (i=0; i <= (ssize_t) number_threads; i++) if (weights[i] != (double *) NULL) weights[i]=(double *) RelinquishMagickMemory(weights[i]); weights=(double **) RelinquishMagickMemory(weights); return(weights); } static double **AcquireBilateralThreadSet(const size_t number_threads, const size_t width,const size_t height) { double **weights; ssize_t i; weights=(double **) AcquireQuantumMemory(number_threads+1,sizeof(*weights)); if (weights == (double **) NULL) return((double **) NULL); (void) memset(weights,0,number_threads*sizeof(*weights)); for (i=0; i <= (ssize_t) number_threads; i++) { weights[i]=(double *) AcquireQuantumMemory(width,height*sizeof(**weights)); if (weights[i] == (double *) NULL) return(DestroyBilateralThreadSet(number_threads,weights)); } return(weights); } MagickExport Image *BilateralBlurImage(const Image *image,const size_t width, const size_t height,const double intensity_sigma,const double spatial_sigma, ExceptionInfo *exception) { #define MaxIntensity (255) #define BilateralBlurImageTag "Blur/Image" CacheView *blur_view, *image_view; double intensity_gaussian[2*(MaxIntensity+1)], *spatial_gaussian, **weights; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo mid; ssize_t number_threads, w, y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } number_threads=(size_t) GetMagickResourceLimit(ThreadResource); weights=AcquireBilateralThreadSet(number_threads,width,height); if (weights == (double **) NULL) { blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (w=(-MaxIntensity); w < MaxIntensity; w++) intensity_gaussian[w+MaxIntensity]=BlurGaussian((double) w,intensity_sigma); spatial_gaussian=weights[number_threads]; { ssize_t n, v; n=0; mid.x=(ssize_t) (width/2L); mid.y=(ssize_t) (height/2L); for (v=0; v < (ssize_t) height; v++) { ssize_t u; for (u=0; u < (ssize_t) width; u++) spatial_gaussian[n++]=BlurGaussian(BlurDistance(0,0,u-mid.x,v-mid.y), spatial_sigma); } } /* Bilateral blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,blur_image->rows,1) #endif for (y=0; y < (ssize_t) blur_image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) blur_image->columns; x++) { double gamma, pixel; const Quantum *magick_restrict p, *magick_restrict r; ssize_t i, u; ssize_t n, v; /* Tonal weighting preserves edges while smoothing in the flat regions. */ p=GetCacheViewVirtualPixels(image_view,x-mid.x,y-mid.y,width,height, exception); if (p == (const Quantum *) NULL) break; p+=(ssize_t) GetPixelChannels(image)*width*mid.y+GetPixelChannels(image)* mid.x; n=0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { double intensity; r=p+(ssize_t) GetPixelChannels(image)*(ssize_t) width*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); intensity=ScaleQuantumToChar(GetPixelIntensity(image,r))- (double) ScaleQuantumToChar(GetPixelIntensity(image,p)); if ((intensity >= -MaxIntensity) && (intensity <= MaxIntensity)) weights[id][n]=intensity_gaussian[(ssize_t) intensity+MaxIntensity]* spatial_gaussian[n]; else weights[id][n]=BlurGaussian(intensity,intensity_sigma)* BlurGaussian(BlurDistance(x,y,x+u-mid.x,y+v-mid.y),spatial_sigma); n++; } } for (i=0; i < (ssize_t) GetPixelChannels(blur_image); i++) { PixelChannel channel; PixelTrait blur_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } pixel=0.0; gamma=0.0; n=0; if ((blur_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); pixel+=weights[id][n]*r[i]; gamma+=weights[id][n]; n++; } } SetPixelChannel(blur_image,channel,ClampToQuantum( PerceptibleReciprocal(gamma)*pixel),q); continue; } /* Alpha blending. */ for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { double alpha, beta; r=p+(ssize_t) GetPixelChannels(image)*width*(mid.y-v)+ GetPixelChannels(image)*(mid.x-u); alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=weights[id][n]*r[i]; gamma+=weights[id][n]*alpha*beta; n++; } } SetPixelChannel(blur_image,channel,ClampToQuantum( PerceptibleReciprocal(gamma)*pixel),q); } q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BilateralBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); image_view=DestroyCacheView(image_view); weights=DestroyBilateralThreadSet(number_threads,weights); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n v o l v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvolveImage() applies a custom convolution kernel to the image. % % The format of the ConvolveImage method is: % % Image *ConvolveImage(const Image *image,const KernelInfo *kernel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o kernel: the filtering kernel. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConvolveImage(const Image *image, const KernelInfo *kernel_info,ExceptionInfo *exception) { Image *convolve_image; #if defined(MAGICKCORE_OPENCL_SUPPORT) convolve_image=AccelerateConvolveImage(image,kernel_info,exception); if (convolve_image != (Image *) NULL) return(convolve_image); #endif convolve_image=MorphologyImage(image,ConvolveMorphology,1,kernel_info, exception); return(convolve_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s p e c k l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DespeckleImage() reduces the speckle noise in an image while perserving the % edges of the original image. A speckle removing filter uses a complementary % hulling technique (raising pixels that are darker than their surrounding % neighbors, then complementarily lowering pixels that are brighter than their % surrounding neighbors) to reduce the speckle index of that image (reference % Crimmins speckle removal). % % The format of the DespeckleImage method is: % % Image *DespeckleImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static void Hull(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const int polarity,Quantum *magick_restrict f,Quantum *magick_restrict g) { Quantum *p, *q, *r, *s; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(f != (Quantum *) NULL); assert(g != (Quantum *) NULL); p=f+(columns+2); q=g+(columns+2); r=p+(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickRealType v; ssize_t i, x; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] >= (v+ScaleCharToQuantum(2))) v+=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) p[i]; if ((MagickRealType) r[i] <= (v-ScaleCharToQuantum(2))) v-=ScaleCharToQuantum(1); q[i]=(Quantum) v; i++; } } p=f+(columns+2); q=g+(columns+2); r=q+(y_offset*((ssize_t) columns+2)+x_offset); s=q-(y_offset*((ssize_t) columns+2)+x_offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { ssize_t i, x; MagickRealType v; i=(2*y+1)+y*columns; if (polarity > 0) for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] >= (v+ScaleCharToQuantum(2))) && ((MagickRealType) r[i] > v)) v+=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } else for (x=0; x < (ssize_t) columns; x++) { v=(MagickRealType) q[i]; if (((MagickRealType) s[i] <= (v-ScaleCharToQuantum(2))) && ((MagickRealType) r[i] < v)) v-=ScaleCharToQuantum(1); p[i]=(Quantum) v; i++; } } } MagickExport Image *DespeckleImage(const Image *image,ExceptionInfo *exception) { #define DespeckleImageTag "Despeckle/Image" CacheView *despeckle_view, *image_view; Image *despeckle_image; MagickBooleanType status; MemoryInfo *buffer_info, *pixel_info; Quantum *magick_restrict buffer, *magick_restrict pixels; ssize_t i; size_t length; static const ssize_t X[4] = {0, 1, 1,-1}, Y[4] = {1, 0, 1, 1}; /* Allocate despeckled image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) despeckle_image=AccelerateDespeckleImage(image,exception); if (despeckle_image != (Image *) NULL) return(despeckle_image); #endif despeckle_image=CloneImage(image,0,0,MagickTrue,exception); if (despeckle_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(despeckle_image,DirectClass,exception); if (status == MagickFalse) { despeckle_image=DestroyImage(despeckle_image); return((Image *) NULL); } /* Allocate image buffer. */ length=(size_t) ((image->columns+2)*(image->rows+2)); pixel_info=AcquireVirtualMemory(length,sizeof(*pixels)); buffer_info=AcquireVirtualMemory(length,sizeof(*buffer)); if ((pixel_info == (MemoryInfo *) NULL) || (buffer_info == (MemoryInfo *) NULL)) { if (buffer_info != (MemoryInfo *) NULL) buffer_info=RelinquishVirtualMemory(buffer_info); if (pixel_info != (MemoryInfo *) NULL) pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image=DestroyImage(despeckle_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } pixels=(Quantum *) GetVirtualMemoryBlob(pixel_info); buffer=(Quantum *) GetVirtualMemoryBlob(buffer_info); /* Reduce speckle in the image. */ status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); despeckle_view=AcquireAuthenticCacheView(despeckle_image,exception); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait despeckle_traits, traits; ssize_t k, x; ssize_t j, y; if (status == MagickFalse) continue; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); despeckle_traits=GetPixelChannelTraits(despeckle_image,channel); if ((traits == UndefinedPixelTrait) || (despeckle_traits == UndefinedPixelTrait)) continue; if ((despeckle_traits & CopyPixelTrait) != 0) continue; (void) memset(pixels,0,length*sizeof(*pixels)); j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { pixels[j++]=p[i]; p+=GetPixelChannels(image); } j++; } (void) memset(buffer,0,length*sizeof(*buffer)); for (k=0; k < 4; k++) { Hull(image,X[k],Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,1,pixels,buffer); Hull(image,-X[k],-Y[k],image->columns,image->rows,-1,pixels,buffer); Hull(image,X[k],Y[k],image->columns,image->rows,-1,pixels,buffer); } j=(ssize_t) image->columns+2; for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(despeckle_view,0,y,despeckle_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } j++; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelChannel(despeckle_image,channel,pixels[j++],q); q+=GetPixelChannels(despeckle_image); } sync=SyncCacheViewAuthenticPixels(despeckle_view,exception); if (sync == MagickFalse) status=MagickFalse; j++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DespeckleImageTag,(MagickOffsetType) i, GetPixelChannels(image)); if (proceed == MagickFalse) status=MagickFalse; } } despeckle_view=DestroyCacheView(despeckle_view); image_view=DestroyCacheView(image_view); buffer_info=RelinquishVirtualMemory(buffer_info); pixel_info=RelinquishVirtualMemory(pixel_info); despeckle_image->type=image->type; if (status == MagickFalse) despeckle_image=DestroyImage(despeckle_image); return(despeckle_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E d g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EdgeImage() finds edges in an image. Radius defines the radius of the % convolution filter. Use a radius of 0 and EdgeImage() selects a suitable % radius for you. % % The format of the EdgeImage method is: % % Image *EdgeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EdgeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *edge_image; KernelInfo *kernel_info; ssize_t i; size_t width; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,0.5); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (kernel_info->width-1)/2; kernel_info->y=(ssize_t) (kernel_info->height-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]=(-1.0); kernel_info->values[i/2]=(double) kernel_info->width*kernel_info->height-1.0; edge_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(edge_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E m b o s s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EmbossImage() returns a grayscale image with a three-dimensional effect. % We convolve the image with a Gaussian operator of the given radius and % standard deviation (sigma). For reasonable results, radius should be % larger than sigma. Use a radius of 0 and Emboss() selects a suitable % radius for you. % % The format of the EmbossImage method is: % % Image *EmbossImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EmbossImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *emboss_image; KernelInfo *kernel_info; ssize_t i; size_t width; ssize_t j, k, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->width* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } j=(ssize_t) (kernel_info->width-1)/2; k=j; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (((u < 0) || (v < 0) ? -8.0 : 8.0)*exp(-((double) u*u+v*v)/(2.0*MagickSigma*MagickSigma))/ (2.0*MagickPI*MagickSigma*MagickSigma)); if (u != k) kernel_info->values[i]=0.0; i++; } k--; } normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; emboss_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); if (emboss_image != (Image *) NULL) (void) EqualizeImage(emboss_image,exception); return(emboss_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a u s s i a n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussianBlurImage() blurs an image. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, the radius should be larger than sigma. Use a % radius of 0 and GaussianBlurImage() selects a suitable radius for you. % % The format of the GaussianBlurImage method is: % % Image *GaussianBlurImage(const Image *image,onst double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GaussianBlurImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { char geometry[MagickPathExtent]; KernelInfo *kernel_info; Image *blur_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g", radius,sigma); kernel_info=AcquireKernelInfo(geometry,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); blur_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % K u w a h a r a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % KuwaharaImage() is an edge preserving noise reduction filter. % % The format of the KuwaharaImage method is: % % Image *KuwaharaImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the square window radius. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickRealType GetMeanLuma(const Image *magick_restrict image, const double *magick_restrict pixel) { return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+ 0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+ 0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */ } MagickExport Image *KuwaharaImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { #define KuwaharaImageTag "Kuwahara/Image" CacheView *image_view, *kuwahara_view; Image *gaussian_image, *kuwahara_image; MagickBooleanType status; MagickOffsetType progress; size_t width; ssize_t y; /* Initialize Kuwahara image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) radius+1; gaussian_image=BlurImage(image,radius,sigma,exception); if (gaussian_image == (Image *) NULL) return((Image *) NULL); kuwahara_image=CloneImage(image,0,0,MagickTrue,exception); if (kuwahara_image == (Image *) NULL) { gaussian_image=DestroyImage(gaussian_image); return((Image *) NULL); } if (SetImageStorageClass(kuwahara_image,DirectClass,exception) == MagickFalse) { gaussian_image=DestroyImage(gaussian_image); kuwahara_image=DestroyImage(kuwahara_image); return((Image *) NULL); } /* Edge preserving noise reduction filter. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(gaussian_image,exception); kuwahara_view=AcquireAuthenticCacheView(kuwahara_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,kuwahara_image,gaussian_image->rows,1) #endif for (y=0; y < (ssize_t) gaussian_image->rows; y++) { Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(kuwahara_view,0,y,kuwahara_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) gaussian_image->columns; x++) { const Quantum *magick_restrict p; double min_variance; RectangleInfo quadrant, target; size_t i; min_variance=MagickMaximumValue; SetGeometry(gaussian_image,&target); quadrant.width=width; quadrant.height=width; for (i=0; i < 4; i++) { const Quantum *magick_restrict k; double mean[MaxPixelChannels], variance; ssize_t n; ssize_t j; quadrant.x=x; quadrant.y=y; switch (i) { case 0: { quadrant.x=x-(ssize_t) (width-1); quadrant.y=y-(ssize_t) (width-1); break; } case 1: { quadrant.y=y-(ssize_t) (width-1); break; } case 2: { quadrant.x=x-(ssize_t) (width-1); break; } case 3: default: break; } p=GetCacheViewVirtualPixels(image_view,quadrant.x,quadrant.y, quadrant.width,quadrant.height,exception); if (p == (const Quantum *) NULL) break; for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]=0.0; k=p; for (n=0; n < (ssize_t) (width*width); n++) { for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]+=(double) k[j]; k+=GetPixelChannels(gaussian_image); } for (j=0; j < (ssize_t) GetPixelChannels(gaussian_image); j++) mean[j]/=(double) (width*width); k=p; variance=0.0; for (n=0; n < (ssize_t) (width*width); n++) { double luma; luma=GetPixelLuma(gaussian_image,k); variance+=(luma-GetMeanLuma(gaussian_image,mean))* (luma-GetMeanLuma(gaussian_image,mean)); k+=GetPixelChannels(gaussian_image); } if (variance < min_variance) { min_variance=variance; target=quadrant; } } if (i < 4) { status=MagickFalse; break; } status=InterpolatePixelChannels(gaussian_image,image_view,kuwahara_image, UndefinedInterpolatePixel,(double) target.x+target.width/2.0,(double) target.y+target.height/2.0,q,exception); if (status == MagickFalse) break; q+=GetPixelChannels(kuwahara_image); } if (SyncCacheViewAuthenticPixels(kuwahara_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,KuwaharaImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } kuwahara_view=DestroyCacheView(kuwahara_view); image_view=DestroyCacheView(image_view); gaussian_image=DestroyImage(gaussian_image); if (status == MagickFalse) kuwahara_image=DestroyImage(kuwahara_image); return(kuwahara_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L o c a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LocalContrastImage() attempts to increase the appearance of large-scale % light-dark transitions. Local contrast enhancement works similarly to % sharpening with an unsharp mask, however the mask is instead created using % an image with a greater blur distance. % % The format of the LocalContrastImage method is: % % Image *LocalContrastImage(const Image *image, const double radius, % const double strength,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian blur, in percentage with 100% % resulting in a blur radius of 20% of largest dimension. % % o strength: the strength of the blur mask in percentage. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LocalContrastImage(const Image *image,const double radius, const double strength,ExceptionInfo *exception) { #define LocalContrastImageTag "LocalContrast/Image" CacheView *image_view, *contrast_view; float *interImage, *scanline, totalWeight; Image *contrast_image; MagickBooleanType status; MemoryInfo *scanline_info, *interImage_info; ssize_t scanLineSize, width; /* Initialize contrast image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) contrast_image=AccelerateLocalContrastImage(image,radius,strength,exception); if (contrast_image != (Image *) NULL) return(contrast_image); #endif contrast_image=CloneImage(image,0,0,MagickTrue,exception); if (contrast_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(contrast_image,DirectClass,exception) == MagickFalse) { contrast_image=DestroyImage(contrast_image); return((Image *) NULL); } image_view=AcquireVirtualCacheView(image,exception); contrast_view=AcquireAuthenticCacheView(contrast_image,exception); scanLineSize=(ssize_t) MagickMax(image->columns,image->rows); width=(ssize_t) scanLineSize*0.002f*fabs(radius); scanLineSize+=(2*width); scanline_info=AcquireVirtualMemory((size_t) GetOpenMPMaximumThreads()* scanLineSize,sizeof(*scanline)); if (scanline_info == (MemoryInfo *) NULL) { contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } scanline=(float *) GetVirtualMemoryBlob(scanline_info); /* Create intermediate buffer. */ interImage_info=AcquireVirtualMemory(image->rows*(image->columns+(2*width)), sizeof(*interImage)); if (interImage_info == (MemoryInfo *) NULL) { scanline_info=RelinquishVirtualMemory(scanline_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); contrast_image=DestroyImage(contrast_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } interImage=(float *) GetVirtualMemoryBlob(interImage_info); totalWeight=(float) ((width+1)*(width+1)); /* Vertical pass. */ status=MagickTrue; { ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *out, *pix, *pixels; ssize_t y; ssize_t i; if (status == MagickFalse) continue; pixels=scanline; pixels+=id*scanLineSize; pix=pixels; p=GetCacheViewVirtualPixels(image_view,x,-width,1,image->rows+(2*width), exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) image->rows+(2*width); y++) { *pix++=(float)GetPixelLuma(image,p); p+=image->number_channels; } out=interImage+x+width; for (y=0; y < (ssize_t) image->rows; y++) { float sum, weight; weight=1.0f; sum=0; pix=pixels+y; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* write to output */ *out=sum/totalWeight; /* mirror into padding */ if (x <= width && x != 0) *(out-(x*2))=*out; if ((x > (ssize_t) image->columns-width-2) && (x != (ssize_t) image->columns-1)) *(out+((image->columns-x-1)*2))=*out; out+=image->columns+(width*2); } } } /* Horizontal pass. */ { ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; float *pix, *pixels; Quantum *magick_restrict q; ssize_t x; ssize_t i; if (status == MagickFalse) continue; pixels=scanline; pixels+=id*scanLineSize; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(contrast_view,0,y,image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } memcpy(pixels,interImage+(y*(image->columns+(2*width))),(image->columns+ (2*width))*sizeof(float)); for (x=0; x < (ssize_t) image->columns; x++) { float mult, srcVal, sum, weight; PixelTrait traits; weight=1.0f; sum=0; pix=pixels+x; for (i=0; i < width; i++) { sum+=weight*(*pix++); weight+=1.0f; } for (i=width+1; i < (2*width); i++) { sum+=weight*(*pix++); weight-=1.0f; } /* Apply and write */ srcVal=(float) GetPixelLuma(image,p); mult=(srcVal-(sum/totalWeight))*(strength/100.0f); mult=(srcVal+mult)/srcVal; traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelRed(contrast_image,ClampToQuantum((MagickRealType) GetPixelRed(image,p)*mult),q); traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelGreen(contrast_image,ClampToQuantum((MagickRealType) GetPixelGreen(image,p)*mult),q); traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) SetPixelBlue(contrast_image,ClampToQuantum((MagickRealType) GetPixelBlue(image,p)*mult),q); p+=image->number_channels; q+=contrast_image->number_channels; } if (SyncCacheViewAuthenticPixels(contrast_view,exception) == MagickFalse) status=MagickFalse; } } scanline_info=RelinquishVirtualMemory(scanline_info); interImage_info=RelinquishVirtualMemory(interImage_info); contrast_view=DestroyCacheView(contrast_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) contrast_image=DestroyImage(contrast_image); return(contrast_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o t i o n B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MotionBlurImage() simulates motion blur. We convolve the image with a % Gaussian operator of the given radius and standard deviation (sigma). % For reasonable results, radius should be larger than sigma. Use a % radius of 0 and MotionBlurImage() selects a suitable radius for you. % Angle gives the angle of the blurring motion. % % Andrew Protano contributed this effect. % % The format of the MotionBlurImage method is: % % Image *MotionBlurImage(const Image *image,const double radius, % const double sigma,const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting % the center pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o angle: Apply the effect along this angle. % % o exception: return any errors or warnings in this structure. % */ static MagickRealType *GetMotionBlurKernel(const size_t width, const double sigma) { MagickRealType *kernel, normalize; ssize_t i; /* Generate a 1-D convolution kernel. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) return(kernel); normalize=0.0; for (i=0; i < (ssize_t) width; i++) { kernel[i]=(MagickRealType) (exp((-((double) i*i)/(double) (2.0*MagickSigma* MagickSigma)))/(MagickSQ2PI*MagickSigma)); normalize+=kernel[i]; } for (i=0; i < (ssize_t) width; i++) kernel[i]/=normalize; return(kernel); } MagickExport Image *MotionBlurImage(const Image *image,const double radius, const double sigma,const double angle,ExceptionInfo *exception) { #define BlurImageTag "Blur/Image" CacheView *blur_view, *image_view, *motion_view; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; OffsetInfo *offset; PointInfo point; size_t width; ssize_t w, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); width=GetOptimalKernelWidth1D(radius,sigma); kernel=GetMotionBlurKernel(width,sigma); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); offset=(OffsetInfo *) AcquireQuantumMemory(width,sizeof(*offset)); if (offset == (OffsetInfo *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } point.x=(double) width*sin(DegreesToRadians(angle)); point.y=(double) width*cos(DegreesToRadians(angle)); for (w=0; w < (ssize_t) width; w++) { offset[w].x=CastDoubleToLong(ceil((double) (w*point.y)/ hypot(point.x,point.y)-0.5)); offset[w].y=CastDoubleToLong(ceil((double) (w*point.x)/ hypot(point.x,point.y)-0.5)); } /* Motion blur image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateMotionBlurImage(image,kernel,width,offset,exception); if (blur_image != (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return(blur_image); } #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); return((Image *) NULL); } if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); blur_image=DestroyImage(blur_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); motion_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const Quantum *magick_restrict r; MagickRealType *magick_restrict k; ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } k=kernel; pixel=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+ offset[j].y,1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=(*k)*r[i]; k++; } SetPixelChannel(blur_image,channel,ClampToQuantum(pixel),q); continue; } alpha=0.0; gamma=0.0; for (j=0; j < (ssize_t) width; j++) { r=GetCacheViewVirtualPixels(motion_view,x+offset[j].x,y+offset[j].y,1, 1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) (QuantumScale*GetPixelAlpha(image,r)); pixel+=(*k)*alpha*r[i]; gamma+=(*k)*alpha; k++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); motion_view=DestroyCacheView(motion_view); image_view=DestroyCacheView(image_view); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); offset=(OffsetInfo *) RelinquishMagickMemory(offset); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r e v i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PreviewImage() tiles 9 thumbnails of the specified image with an image % processing operation applied with varying parameters. This may be helpful % pin-pointing an appropriate parameter for a particular image processing % operation. % % The format of the PreviewImages method is: % % Image *PreviewImages(const Image *image,const PreviewType preview, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o preview: the image processing operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *PreviewImage(const Image *image,const PreviewType preview, ExceptionInfo *exception) { #define NumberTiles 9 #define PreviewImageTag "Preview/Image" #define DefaultPreviewGeometry "204x204+10+10" char factor[MagickPathExtent], label[MagickPathExtent]; double degrees, gamma, percentage, radius, sigma, threshold; Image *images, *montage_image, *preview_image, *thumbnail; ImageInfo *preview_info; MagickBooleanType proceed; MontageInfo *montage_info; QuantizeInfo quantize_info; RectangleInfo geometry; ssize_t i, x; size_t colors; ssize_t y; /* Open output image file. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); colors=2; degrees=0.0; gamma=(-0.2f); preview_info=AcquireImageInfo(); SetGeometry(image,&geometry); (void) ParseMetaGeometry(DefaultPreviewGeometry,&geometry.x,&geometry.y, &geometry.width,&geometry.height); images=NewImageList(); percentage=12.5; GetQuantizeInfo(&quantize_info); radius=0.0; sigma=1.0; threshold=0.0; x=0; y=0; for (i=0; i < NumberTiles; i++) { thumbnail=ThumbnailImage(image,geometry.width,geometry.height,exception); if (thumbnail == (Image *) NULL) break; (void) SetImageProgressMonitor(thumbnail,(MagickProgressMonitor) NULL, (void *) NULL); (void) SetImageProperty(thumbnail,"label",DefaultTileLabel,exception); if (i == (NumberTiles/2)) { (void) QueryColorCompliance("#dfdfdf",AllCompliance, &thumbnail->matte_color,exception); AppendImageToList(&images,thumbnail); continue; } switch (preview) { case RotatePreview: { degrees+=45.0; preview_image=RotateImage(thumbnail,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"rotate %g",degrees); break; } case ShearPreview: { degrees+=5.0; preview_image=ShearImage(thumbnail,degrees,degrees,exception); (void) FormatLocaleString(label,MagickPathExtent,"shear %gx%g",degrees, 2.0*degrees); break; } case RollPreview: { x=(ssize_t) ((i+1)*thumbnail->columns)/NumberTiles; y=(ssize_t) ((i+1)*thumbnail->rows)/NumberTiles; preview_image=RollImage(thumbnail,x,y,exception); (void) FormatLocaleString(label,MagickPathExtent,"roll %+.20gx%+.20g", (double) x,(double) y); break; } case HuePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case SaturationPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"100,%g",2.0* percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case BrightnessPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(factor,MagickPathExtent,"%g",2.0*percentage); (void) ModulateImage(preview_image,factor,exception); (void) FormatLocaleString(label,MagickPathExtent,"modulate %s",factor); break; } case GammaPreview: default: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; gamma+=0.4f; (void) GammaImage(preview_image,gamma,exception); (void) FormatLocaleString(label,MagickPathExtent,"gamma %g",gamma); break; } case SpiffPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image != (Image *) NULL) for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent,"contrast (%.20g)", (double) i+1); break; } case DullPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; for (x=0; x < i; x++) (void) ContrastImage(preview_image,MagickFalse,exception); (void) FormatLocaleString(label,MagickPathExtent,"+contrast (%.20g)", (double) i+1); break; } case GrayscalePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; quantize_info.colorspace=GRAYColorspace; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent, "-colorspace gray -colors %.20g",(double) colors); break; } case QuantizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; colors<<=1; quantize_info.number_colors=colors; (void) QuantizeImage(&quantize_info,preview_image,exception); (void) FormatLocaleString(label,MagickPathExtent,"colors %.20g", (double) colors); break; } case DespecklePreview: { for (x=0; x < (i-1); x++) { preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; thumbnail=DestroyImage(thumbnail); thumbnail=preview_image; } preview_image=DespeckleImage(thumbnail,exception); if (preview_image == (Image *) NULL) break; (void) FormatLocaleString(label,MagickPathExtent,"despeckle (%.20g)", (double) i+1); break; } case ReduceNoisePreview: { preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"noise %g",radius); break; } case AddNoisePreview: { switch ((int) i) { case 0: { (void) CopyMagickString(factor,"uniform",MagickPathExtent); break; } case 1: { (void) CopyMagickString(factor,"gaussian",MagickPathExtent); break; } case 2: { (void) CopyMagickString(factor,"multiplicative",MagickPathExtent); break; } case 3: { (void) CopyMagickString(factor,"impulse",MagickPathExtent); break; } case 5: { (void) CopyMagickString(factor,"laplacian",MagickPathExtent); break; } case 6: { (void) CopyMagickString(factor,"Poisson",MagickPathExtent); break; } default: { (void) CopyMagickString(thumbnail->magick,"NULL",MagickPathExtent); break; } } preview_image=StatisticImage(thumbnail,NonpeakStatistic,(size_t) i, (size_t) i,exception); (void) FormatLocaleString(label,MagickPathExtent,"+noise %s",factor); break; } case SharpenPreview: { preview_image=SharpenImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"sharpen %gx%g", radius,sigma); break; } case BlurPreview: { preview_image=BlurImage(thumbnail,radius,sigma,exception); (void) FormatLocaleString(label,MagickPathExtent,"blur %gx%g",radius, sigma); break; } case ThresholdPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) BilevelImage(thumbnail,(double) (percentage*((double) QuantumRange+1.0))/100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"threshold %g", (double) (percentage*((double) QuantumRange+1.0))/100.0); break; } case EdgeDetectPreview: { preview_image=EdgeImage(thumbnail,radius,exception); (void) FormatLocaleString(label,MagickPathExtent,"edge %g",radius); break; } case SpreadPreview: { preview_image=SpreadImage(thumbnail,image->interpolate,radius, exception); (void) FormatLocaleString(label,MagickPathExtent,"spread %g", radius+0.5); break; } case SolarizePreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; (void) SolarizeImage(preview_image,(double) QuantumRange*percentage/ 100.0,exception); (void) FormatLocaleString(label,MagickPathExtent,"solarize %g", (QuantumRange*percentage)/100.0); break; } case ShadePreview: { degrees+=10.0; preview_image=ShadeImage(thumbnail,MagickTrue,degrees,degrees, exception); (void) FormatLocaleString(label,MagickPathExtent,"shade %gx%g",degrees, degrees); break; } case RaisePreview: { RectangleInfo raise; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; raise.width=(size_t) (2*i+2); raise.height=(size_t) (2*i+2); raise.x=(i-1)/2; raise.y=(i-1)/2; (void) RaiseImage(preview_image,&raise,MagickTrue,exception); (void) FormatLocaleString(label,MagickPathExtent, "raise %.20gx%.20g%+.20g%+.20g",(double) raise.width,(double) raise.height,(double) raise.x,(double) raise.y); break; } case SegmentPreview: { preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; threshold+=0.4f; (void) SegmentImage(preview_image,sRGBColorspace,MagickFalse,threshold, threshold,exception); (void) FormatLocaleString(label,MagickPathExtent,"segment %gx%g", threshold,threshold); break; } case SwirlPreview: { preview_image=SwirlImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"swirl %g",degrees); degrees+=45.0; break; } case ImplodePreview: { degrees+=0.1f; preview_image=ImplodeImage(thumbnail,degrees,image->interpolate, exception); (void) FormatLocaleString(label,MagickPathExtent,"implode %g",degrees); break; } case WavePreview: { degrees+=5.0f; preview_image=WaveImage(thumbnail,0.5*degrees,2.0*degrees, image->interpolate,exception); (void) FormatLocaleString(label,MagickPathExtent,"wave %gx%g",0.5* degrees,2.0*degrees); break; } case OilPaintPreview: { preview_image=OilPaintImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case CharcoalDrawingPreview: { preview_image=CharcoalImage(thumbnail,(double) radius,(double) sigma, exception); (void) FormatLocaleString(label,MagickPathExtent,"charcoal %gx%g", radius,sigma); break; } case JPEGPreview: { char filename[MagickPathExtent]; int file; MagickBooleanType status; preview_image=CloneImage(thumbnail,0,0,MagickTrue,exception); if (preview_image == (Image *) NULL) break; preview_info->quality=(size_t) percentage; (void) FormatLocaleString(factor,MagickPathExtent,"%.20g",(double) preview_info->quality); file=AcquireUniqueFileResource(filename); if (file != -1) file=close(file)-1; (void) FormatLocaleString(preview_image->filename,MagickPathExtent, "jpeg:%s",filename); status=WriteImage(preview_info,preview_image,exception); if (status != MagickFalse) { Image *quality_image; (void) CopyMagickString(preview_info->filename, preview_image->filename,MagickPathExtent); quality_image=ReadImage(preview_info,exception); if (quality_image != (Image *) NULL) { preview_image=DestroyImage(preview_image); preview_image=quality_image; } } (void) RelinquishUniqueFileResource(preview_image->filename); if ((GetBlobSize(preview_image)/1024) >= 1024) (void) FormatLocaleString(label,MagickPathExtent,"quality %s\n%gmb ", factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/ 1024.0/1024.0); else if (GetBlobSize(preview_image) >= 1024) (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%gkb ",factor,(double) ((MagickOffsetType) GetBlobSize(preview_image))/1024.0); else (void) FormatLocaleString(label,MagickPathExtent, "quality %s\n%.20gb ",factor,(double) ((MagickOffsetType) GetBlobSize(thumbnail))); break; } } thumbnail=DestroyImage(thumbnail); percentage+=12.5; radius+=0.5; sigma+=0.25; if (preview_image == (Image *) NULL) break; preview_image->alpha_trait=UndefinedPixelTrait; (void) DeleteImageProperty(preview_image,"label"); (void) SetImageProperty(preview_image,"label",label,exception); AppendImageToList(&images,preview_image); proceed=SetImageProgress(image,PreviewImageTag,(MagickOffsetType) i, NumberTiles); if (proceed == MagickFalse) break; } if (images == (Image *) NULL) { preview_info=DestroyImageInfo(preview_info); return((Image *) NULL); } /* Create the montage. */ montage_info=CloneMontageInfo(preview_info,(MontageInfo *) NULL); (void) CopyMagickString(montage_info->filename,image->filename, MagickPathExtent); montage_info->shadow=MagickTrue; (void) CloneString(&montage_info->tile,"3x3"); (void) CloneString(&montage_info->geometry,DefaultPreviewGeometry); (void) CloneString(&montage_info->frame,DefaultTileFrame); montage_image=MontageImages(images,montage_info,exception); montage_info=DestroyMontageInfo(montage_info); images=DestroyImageList(images); if (montage_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); if (montage_image->montage != (char *) NULL) { /* Free image directory. */ montage_image->montage=(char *) RelinquishMagickMemory( montage_image->montage); if (image->directory != (char *) NULL) montage_image->directory=(char *) RelinquishMagickMemory( montage_image->directory); } preview_info=DestroyImageInfo(preview_info); return(montage_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t i o n a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotationalBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RotationalBlurImage method is: % % Image *RotationalBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o angle: the angle of the radial blur. % % o blur: the blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotationalBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { CacheView *blur_view, *image_view, *radial_view; double blur_radius, *cos_theta, offset, *sin_theta, theta; Image *blur_image; MagickBooleanType status; MagickOffsetType progress; PointInfo blur_center; size_t n; ssize_t w, y; /* Allocate blur image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) blur_image=AccelerateRotationalBlurImage(image,angle,exception); if (blur_image != (Image *) NULL) return(blur_image); #endif blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); return((Image *) NULL); } blur_center.x=(double) (image->columns-1)/2.0; blur_center.y=(double) (image->rows-1)/2.0; blur_radius=hypot(blur_center.x,blur_center.y); n=(size_t) fabs(4.0*DegreesToRadians(angle)*sqrt((double) blur_radius)+2UL); theta=DegreesToRadians(angle)/(double) (n-1); cos_theta=(double *) AcquireQuantumMemory((size_t) n,sizeof(*cos_theta)); sin_theta=(double *) AcquireQuantumMemory((size_t) n,sizeof(*sin_theta)); if ((cos_theta == (double *) NULL) || (sin_theta == (double *) NULL)) { if (cos_theta != (double *) NULL) cos_theta=(double *) RelinquishMagickMemory(cos_theta); if (sin_theta != (double *) NULL) sin_theta=(double *) RelinquishMagickMemory(sin_theta); blur_image=DestroyImage(blur_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } offset=theta*(double) (n-1)/2.0; for (w=0; w < (ssize_t) n; w++) { cos_theta[w]=cos((double) (theta*w-offset)); sin_theta[w]=sin((double) (theta*w-offset)); } /* Radial blur image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); radial_view=AcquireVirtualCacheView(image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double radius; PointInfo center; ssize_t i; size_t step; center.x=(double) x-blur_center.x; center.y=(double) y-blur_center.y; radius=hypot((double) center.x,center.y); if (radius == 0) step=1; else { step=(size_t) (blur_radius/radius); if (step == 0) step=1; else if (step >= n) step=n-1; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const Quantum *magick_restrict r; ssize_t j; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[i],q); continue; } gamma=0.0; pixel=0.0; if ((GetPixelChannelTraits(image,AlphaPixelChannel) == UndefinedPixelTrait) || (channel == AlphaPixelChannel)) { for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel+=r[i]; gamma++; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (j=0; j < (ssize_t) n; j+=(ssize_t) step) { double alpha; r=GetCacheViewVirtualPixels(radial_view, (ssize_t) (blur_center.x+ center.x*cos_theta[j]-center.y*sin_theta[j]+0.5),(ssize_t) (blur_center.y+center.x*sin_theta[j]+center.y*cos_theta[j]+0.5), 1,1,exception); if (r == (const Quantum *) NULL) { status=MagickFalse; continue; } alpha=(double) QuantumScale*GetPixelAlpha(image,r); pixel+=alpha*r[i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(blur_image); } if (SyncCacheViewAuthenticPixels(blur_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,BlurImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_view=DestroyCacheView(blur_view); radial_view=DestroyCacheView(radial_view); image_view=DestroyCacheView(image_view); cos_theta=(double *) RelinquishMagickMemory(cos_theta); sin_theta=(double *) RelinquishMagickMemory(sin_theta); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e l e c t i v e B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SelectiveBlurImage() selectively blur pixels within a contrast threshold. % It is similar to the unsharpen mask that sharpens everything with contrast % above a certain threshold. % % The format of the SelectiveBlurImage method is: % % Image *SelectiveBlurImage(const Image *image,const double radius, % const double sigma,const double threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o threshold: only pixels within this contrast threshold are included % in the blur operation. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SelectiveBlurImage(const Image *image,const double radius, const double sigma,const double threshold,ExceptionInfo *exception) { #define SelectiveBlurImageTag "SelectiveBlur/Image" CacheView *blur_view, *image_view, *luminance_view; Image *blur_image, *luminance_image; MagickBooleanType status; MagickOffsetType progress; MagickRealType *kernel; size_t width; ssize_t center, y; /* Initialize blur image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth1D(radius,sigma); kernel=(MagickRealType *) MagickAssumeAligned(AcquireAlignedMemory((size_t) width,width*sizeof(*kernel))); if (kernel == (MagickRealType *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); { ssize_t i, j, v; j=(ssize_t) (width-1)/2; i=0; for (v=(-j); v <= j; v++) { ssize_t u; for (u=(-j); u <= j; u++) kernel[i++]=(MagickRealType) (exp(-((double) u*u+v*v)/(2.0*MagickSigma* MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); } } if (image->debug != MagickFalse) { char format[MagickPathExtent], *message; const MagickRealType *k; ssize_t u, v; (void) LogMagickEvent(TransformEvent,GetMagickModule(), " SelectiveBlurImage with %.20gx%.20g kernel:",(double) width,(double) width); message=AcquireString(""); k=kernel; for (v=0; v < (ssize_t) width; v++) { *message='\0'; (void) FormatLocaleString(format,MagickPathExtent,"%.20g: ",(double) v); (void) ConcatenateString(&message,format); for (u=0; u < (ssize_t) width; u++) { (void) FormatLocaleString(format,MagickPathExtent,"%+f ",(double) *k++); (void) ConcatenateString(&message,format); } (void) LogMagickEvent(TransformEvent,GetMagickModule(),"%s",message); } message=DestroyString(message); } blur_image=CloneImage(image,0,0,MagickTrue,exception); if (blur_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(blur_image,DirectClass,exception) == MagickFalse) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } luminance_image=CloneImage(image,0,0,MagickTrue,exception); if (luminance_image == (Image *) NULL) { blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } status=TransformImageColorspace(luminance_image,GRAYColorspace,exception); if (status == MagickFalse) { luminance_image=DestroyImage(luminance_image); blur_image=DestroyImage(blur_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); return((Image *) NULL); } /* Threshold blur image. */ status=MagickTrue; progress=0; center=(ssize_t) (GetPixelChannels(image)*(image->columns+width)* ((width-1)/2L)+GetPixelChannels(image)*((width-1)/2L)); image_view=AcquireVirtualCacheView(image,exception); luminance_view=AcquireVirtualCacheView(luminance_image,exception); blur_view=AcquireAuthenticCacheView(blur_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,blur_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double contrast; MagickBooleanType sync; const Quantum *magick_restrict l, *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) (width-1)/2L),y-(ssize_t) ((width-1)/2L),image->columns+width,width,exception); l=GetCacheViewVirtualPixels(luminance_view,-((ssize_t) (width-1)/2L),y- (ssize_t) ((width-1)/2L),luminance_image->columns+width,width,exception); q=QueueCacheViewAuthenticPixels(blur_view,0,y,blur_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (l == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double intensity; ssize_t i; intensity=GetPixelIntensity(image,p+center); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait blur_traits, traits; const MagickRealType *magick_restrict k; const Quantum *magick_restrict luminance_pixels, *magick_restrict pixels; ssize_t u; ssize_t v; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); blur_traits=GetPixelChannelTraits(blur_image,channel); if ((traits == UndefinedPixelTrait) || (blur_traits == UndefinedPixelTrait)) continue; if ((blur_traits & CopyPixelTrait) != 0) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } k=kernel; pixel=0.0; pixels=p; luminance_pixels=l; gamma=0.0; if ((blur_traits & BlendPixelTrait) == 0) { for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(luminance_image,luminance_pixels)- intensity; if (fabs(contrast) < threshold) { pixel+=(*k)*pixels[i]; gamma+=(*k); } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); continue; } for (v=0; v < (ssize_t) width; v++) { for (u=0; u < (ssize_t) width; u++) { contrast=GetPixelIntensity(image,pixels)-intensity; if (fabs(contrast) < threshold) { alpha=(double) (QuantumScale*GetPixelAlpha(image,pixels)); pixel+=(*k)*alpha*pixels[i]; gamma+=(*k)*alpha; } k++; pixels+=GetPixelChannels(image); luminance_pixels+=GetPixelChannels(luminance_image); } pixels+=GetPixelChannels(image)*image->columns; luminance_pixels+=GetPixelChannels(luminance_image)* luminance_image->columns; } if (fabs((double) gamma) < MagickEpsilon) { SetPixelChannel(blur_image,channel,p[center+i],q); continue; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(blur_image,channel,ClampToQuantum(gamma*pixel),q); } p+=GetPixelChannels(image); l+=GetPixelChannels(luminance_image); q+=GetPixelChannels(blur_image); } sync=SyncCacheViewAuthenticPixels(blur_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SelectiveBlurImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } blur_image->type=image->type; blur_view=DestroyCacheView(blur_view); luminance_view=DestroyCacheView(luminance_view); image_view=DestroyCacheView(image_view); luminance_image=DestroyImage(luminance_image); kernel=(MagickRealType *) RelinquishAlignedMemory(kernel); if (status == MagickFalse) blur_image=DestroyImage(blur_image); return(blur_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShadeImage() shines a distant light on an image to create a % three-dimensional effect. You control the positioning of the light with % azimuth and elevation; azimuth is measured in degrees off the x axis % and elevation is measured in pixels above the Z axis. % % The format of the ShadeImage method is: % % Image *ShadeImage(const Image *image,const MagickBooleanType gray, % const double azimuth,const double elevation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o gray: A value other than zero shades the intensity of each pixel. % % o azimuth, elevation: Define the light source direction. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShadeImage(const Image *image,const MagickBooleanType gray, const double azimuth,const double elevation,ExceptionInfo *exception) { #define GetShadeIntensity(image,pixel) \ ClampPixel(GetPixelIntensity((image),(pixel))) #define ShadeImageTag "Shade/Image" CacheView *image_view, *shade_view; Image *linear_image, *shade_image; MagickBooleanType status; MagickOffsetType progress; PrimaryInfo light; ssize_t y; /* Initialize shaded image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); linear_image=CloneImage(image,0,0,MagickTrue,exception); shade_image=CloneImage(image,0,0,MagickTrue,exception); if ((linear_image == (Image *) NULL) || (shade_image == (Image *) NULL)) { if (linear_image != (Image *) NULL) linear_image=DestroyImage(linear_image); if (shade_image != (Image *) NULL) shade_image=DestroyImage(shade_image); return((Image *) NULL); } if (SetImageStorageClass(shade_image,DirectClass,exception) == MagickFalse) { linear_image=DestroyImage(linear_image); shade_image=DestroyImage(shade_image); return((Image *) NULL); } /* Compute the light vector. */ light.x=(double) QuantumRange*cos(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.y=(double) QuantumRange*sin(DegreesToRadians(azimuth))* cos(DegreesToRadians(elevation)); light.z=(double) QuantumRange*sin(DegreesToRadians(elevation)); /* Shade image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(linear_image,exception); shade_view=AcquireAuthenticCacheView(shade_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(linear_image,shade_image,linear_image->rows,1) #endif for (y=0; y < (ssize_t) linear_image->rows; y++) { double distance, normal_distance, shade; PrimaryInfo normal; const Quantum *magick_restrict center, *magick_restrict p, *magick_restrict post, *magick_restrict pre; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-1,y-1,linear_image->columns+2,3, exception); q=QueueCacheViewAuthenticPixels(shade_view,0,y,shade_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Shade this row of pixels. */ normal.z=2.0*(double) QuantumRange; /* constant Z of surface normal */ for (x=0; x < (ssize_t) linear_image->columns; x++) { ssize_t i; /* Determine the surface normal and compute shading. */ pre=p+GetPixelChannels(linear_image); center=pre+(linear_image->columns+2)*GetPixelChannels(linear_image); post=center+(linear_image->columns+2)*GetPixelChannels(linear_image); normal.x=(double) ( GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,center-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,center+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))); normal.y=(double) ( GetShadeIntensity(linear_image,post-GetPixelChannels(linear_image))+ GetShadeIntensity(linear_image,post)+ GetShadeIntensity(linear_image,post+GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre-GetPixelChannels(linear_image))- GetShadeIntensity(linear_image,pre)- GetShadeIntensity(linear_image,pre+GetPixelChannels(linear_image))); if ((fabs(normal.x) <= MagickEpsilon) && (fabs(normal.y) <= MagickEpsilon)) shade=light.z; else { shade=0.0; distance=normal.x*light.x+normal.y*light.y+normal.z*light.z; if (distance > MagickEpsilon) { normal_distance=normal.x*normal.x+normal.y*normal.y+ normal.z*normal.z; if (normal_distance > (MagickEpsilon*MagickEpsilon)) shade=distance/sqrt((double) normal_distance); } } for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++) { PixelChannel channel; PixelTrait shade_traits, traits; channel=GetPixelChannelChannel(linear_image,i); traits=GetPixelChannelTraits(linear_image,channel); shade_traits=GetPixelChannelTraits(shade_image,channel); if ((traits == UndefinedPixelTrait) || (shade_traits == UndefinedPixelTrait)) continue; if ((shade_traits & CopyPixelTrait) != 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if ((traits & UpdatePixelTrait) == 0) { SetPixelChannel(shade_image,channel,center[i],q); continue; } if (gray != MagickFalse) { SetPixelChannel(shade_image,channel,ClampToQuantum(shade),q); continue; } SetPixelChannel(shade_image,channel,ClampToQuantum(QuantumScale*shade* center[i]),q); } p+=GetPixelChannels(linear_image); q+=GetPixelChannels(shade_image); } if (SyncCacheViewAuthenticPixels(shade_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ShadeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } shade_view=DestroyCacheView(shade_view); image_view=DestroyCacheView(image_view); linear_image=DestroyImage(linear_image); if (status == MagickFalse) shade_image=DestroyImage(shade_image); return(shade_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a r p e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SharpenImage() sharpens the image. We convolve the image with a Gaussian % operator of the given radius and standard deviation (sigma). For % reasonable results, radius should be larger than sigma. Use a radius of 0 % and SharpenImage() selects a suitable radius for you. % % Using a separable kernel would be faster, but the negative weights cancel % out on the corners of the kernel producing often undesirable ringing in the % filtered result; this can be avoided by using a 2D gaussian shaped image % sharpening kernel instead. % % The format of the SharpenImage method is: % % Image *SharpenImage(const Image *image,const double radius, % const double sigma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Laplacian, in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SharpenImage(const Image *image,const double radius, const double sigma,ExceptionInfo *exception) { double gamma, normalize; Image *sharp_image; KernelInfo *kernel_info; ssize_t i; size_t width; ssize_t j, u, v; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=GetOptimalKernelWidth2D(radius,sigma); kernel_info=AcquireKernelInfo((const char *) NULL,exception); if (kernel_info == (KernelInfo *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); (void) memset(kernel_info,0,sizeof(*kernel_info)); kernel_info->width=width; kernel_info->height=width; kernel_info->x=(ssize_t) (width-1)/2; kernel_info->y=(ssize_t) (width-1)/2; kernel_info->signature=MagickCoreSignature; kernel_info->values=(MagickRealType *) MagickAssumeAligned( AcquireAlignedMemory(kernel_info->width,kernel_info->height* sizeof(*kernel_info->values))); if (kernel_info->values == (MagickRealType *) NULL) { kernel_info=DestroyKernelInfo(kernel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } normalize=0.0; j=(ssize_t) (kernel_info->width-1)/2; i=0; for (v=(-j); v <= j; v++) { for (u=(-j); u <= j; u++) { kernel_info->values[i]=(MagickRealType) (-exp(-((double) u*u+v*v)/(2.0* MagickSigma*MagickSigma))/(2.0*MagickPI*MagickSigma*MagickSigma)); normalize+=kernel_info->values[i]; i++; } } kernel_info->values[i/2]=(double) ((-2.0)*normalize); normalize=0.0; for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) normalize+=kernel_info->values[i]; gamma=PerceptibleReciprocal(normalize); for (i=0; i < (ssize_t) (kernel_info->width*kernel_info->height); i++) kernel_info->values[i]*=gamma; sharp_image=ConvolveImage(image,kernel_info,exception); kernel_info=DestroyKernelInfo(kernel_info); return(sharp_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p r e a d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpreadImage() is a special effects method that randomly displaces each % pixel in a square area defined by the radius parameter. % % The format of the SpreadImage method is: % % Image *SpreadImage(const Image *image, % const PixelInterpolateMethod method,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: intepolation method. % % o radius: choose a random pixel in a neighborhood of this extent. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpreadImage(const Image *image, const PixelInterpolateMethod method,const double radius, ExceptionInfo *exception) { #define SpreadImageTag "Spread/Image" CacheView *image_view, *spread_view; Image *spread_image; MagickBooleanType status; MagickOffsetType progress; RandomInfo **magick_restrict random_info; size_t width; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif /* Initialize spread image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); spread_image=CloneImage(image,0,0,MagickTrue,exception); if (spread_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(spread_image,DirectClass,exception) == MagickFalse) { spread_image=DestroyImage(spread_image); return((Image *) NULL); } /* Spread image. */ status=MagickTrue; progress=0; width=GetOptimalKernelWidth1D(radius,0.5); random_info=AcquireRandomInfoThreadSet(); image_view=AcquireVirtualCacheView(image,exception); spread_view=AcquireAuthenticCacheView(spread_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,spread_image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(spread_view,0,y,spread_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PointInfo point; point.x=GetPseudoRandomValue(random_info[id]); point.y=GetPseudoRandomValue(random_info[id]); status=InterpolatePixelChannels(image,image_view,spread_image,method, (double) x+width*(point.x-0.5),(double) y+width*(point.y-0.5),q, exception); if (status == MagickFalse) break; q+=GetPixelChannels(spread_image); } if (SyncCacheViewAuthenticPixels(spread_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SpreadImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } spread_view=DestroyCacheView(spread_view); image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); if (status == MagickFalse) spread_image=DestroyImage(spread_image); return(spread_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h a r p M a s k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnsharpMaskImage() sharpens one or more image channels. We convolve the % image with a Gaussian operator of the given radius and standard deviation % (sigma). For reasonable results, radius should be larger than sigma. Use a % radius of 0 and UnsharpMaskImage() selects a suitable radius for you. % % The format of the UnsharpMaskImage method is: % % Image *UnsharpMaskImage(const Image *image,const double radius, % const double sigma,const double amount,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the Gaussian, in pixels, not counting the center % pixel. % % o sigma: the standard deviation of the Gaussian, in pixels. % % o gain: the percentage of the difference between the original and the % blur image that is added back into the original. % % o threshold: the threshold in pixels needed to apply the diffence gain. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *UnsharpMaskImage(const Image *image,const double radius, const double sigma,const double gain,const double threshold, ExceptionInfo *exception) { #define SharpenImageTag "Sharpen/Image" CacheView *image_view, *unsharp_view; Image *unsharp_image; MagickBooleanType status; MagickOffsetType progress; double quantum_threshold; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); /* This kernel appears to be broken. #if defined(MAGICKCORE_OPENCL_SUPPORT) unsharp_image=AccelerateUnsharpMaskImage(image,radius,sigma,gain,threshold, exception); if (unsharp_image != (Image *) NULL) return(unsharp_image); #endif */ unsharp_image=BlurImage(image,radius,sigma,exception); if (unsharp_image == (Image *) NULL) return((Image *) NULL); quantum_threshold=(double) QuantumRange*threshold; /* Unsharp-mask image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); unsharp_view=AcquireAuthenticCacheView(unsharp_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,unsharp_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(unsharp_view,0,y,unsharp_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelChannel channel; PixelTrait traits, unsharp_traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); unsharp_traits=GetPixelChannelTraits(unsharp_image,channel); if ((traits == UndefinedPixelTrait) || (unsharp_traits == UndefinedPixelTrait)) continue; if ((unsharp_traits & CopyPixelTrait) != 0) { SetPixelChannel(unsharp_image,channel,p[i],q); continue; } pixel=p[i]-(double) GetPixelChannel(unsharp_image,channel,q); if (fabs(2.0*pixel) < quantum_threshold) pixel=(double) p[i]; else pixel=(double) p[i]+gain*pixel; SetPixelChannel(unsharp_image,channel,ClampToQuantum(pixel),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(unsharp_image); } if (SyncCacheViewAuthenticPixels(unsharp_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SharpenImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } unsharp_image->type=image->type; unsharp_view=DestroyCacheView(unsharp_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) unsharp_image=DestroyImage(unsharp_image); return(unsharp_image); }
par_csr_matop_device.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_utilities.h" #include "_hypre_parcsr_mv.h" #include "_hypre_utilities.hpp" #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_Int hypre_ParcsrGetExternalRowsDeviceInit( hypre_ParCSRMatrix *A, HYPRE_Int indices_len, HYPRE_BigInt *indices, hypre_ParCSRCommPkg *comm_pkg, HYPRE_Int want_data, void **request_ptr) { HYPRE_Int i, j; HYPRE_Int num_sends, num_rows_send, num_nnz_send, num_recvs, num_rows_recv, num_nnz_recv; HYPRE_Int *d_send_i, *send_i, *d_send_map, *d_recv_i, *recv_i; HYPRE_BigInt *d_send_j, *d_recv_j; HYPRE_Int *send_jstarts, *recv_jstarts; HYPRE_Complex *d_send_a = NULL, *d_recv_a = NULL; hypre_ParCSRCommPkg *comm_pkg_j; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; /* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */ /* diag part of A */ hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); /* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */ /* off-diag part of A */ hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); /* HYPRE_Int *row_starts = hypre_ParCSRMatrixRowStarts(A); */ /* HYPRE_Int first_row = hypre_ParCSRMatrixFirstRowIndex(A); */ HYPRE_Int first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); MPI_Comm comm = hypre_ParCSRMatrixComm(A); HYPRE_Int num_procs; HYPRE_Int my_id; void **vrequest; hypre_CSRMatrix *A_ext; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); /* number of sends (#procs) */ num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); /* number of rows to send */ num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends); /* number of recvs (#procs) */ num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); /* number of rows to recv */ num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs); /* must be true if indices contains proper offd indices */ hypre_assert(indices_len == num_rows_recv); /* send_i/recv_i: * the arrays to send and recv: we first send and recv the row lengths */ d_send_i = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_DEVICE); d_send_map = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE); send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST); recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST); d_recv_i = hypre_TAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE); /* fill the send array with row lengths */ hypre_TMemcpy(d_send_map, hypre_ParCSRCommPkgSendMapElmts(comm_pkg), HYPRE_Int, num_rows_send, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_Memset(d_send_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(num_rows_send, d_send_map, A_diag_i, A_offd_i, d_send_i + 1); /* send array send_i out: deviceTohost first and MPI (async) * note the shift in recv_i by one */ hypre_TMemcpy(send_i, d_send_i + 1, HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i + 1); hypreDevice_IntegerInclusiveScan(num_rows_send + 1, d_send_i); /* total number of nnz to send */ hypre_TMemcpy(&num_nnz_send, d_send_i + num_rows_send, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /* prepare data to send out. overlap with the above commmunication */ d_send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_DEVICE); if (want_data) { d_send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_DEVICE); } if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } /* job == 2, d_send_i is input that contains row ptrs (length num_rows_send) */ hypreDevice_CopyParCSRRows(num_rows_send, d_send_map, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, d_send_i, d_send_j, d_send_a); /* pointers to each proc in send_j */ send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); send_jstarts[0] = 0; for (i = 1; i <= num_sends; i++) { send_jstarts[i] = send_jstarts[i - 1]; for ( j = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i - 1); j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i); j++ ) { send_jstarts[i] += send_i[j]; } } hypre_assert(send_jstarts[num_sends] == num_nnz_send); /* finish the above communication: send_i/recv_i */ hypre_ParCSRCommHandleDestroy(comm_handle); /* adjust recv_i to ptrs */ recv_i[0] = 0; for (i = 1; i <= num_rows_recv; i++) { recv_i[i] += recv_i[i - 1]; } num_nnz_recv = recv_i[num_rows_recv]; /* allocate device memory for j and a */ d_recv_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_DEVICE); if (want_data) { d_recv_a = hypre_TAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_DEVICE); } recv_jstarts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); recv_jstarts[0] = 0; for (i = 1; i <= num_recvs; i++) { j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i); recv_jstarts[i] = recv_i[j]; } /* ready to send and recv: create a communication package for data */ comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm (comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg); hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts; hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg); hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts; /* init communication */ /* ja */ comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_j, HYPRE_MEMORY_DEVICE, d_recv_j); if (want_data) { /* a */ comm_handle_a = hypre_ParCSRCommHandleCreate_v2(1, comm_pkg_j, HYPRE_MEMORY_DEVICE, d_send_a, HYPRE_MEMORY_DEVICE, d_recv_a); } else { comm_handle_a = NULL; } hypre_TMemcpy(d_recv_i, recv_i, HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create A_ext: on device */ A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv); hypre_CSRMatrixI (A_ext) = d_recv_i; hypre_CSRMatrixBigJ(A_ext) = d_recv_j; hypre_CSRMatrixData(A_ext) = d_recv_a; hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) A_ext; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(send_i, HYPRE_MEMORY_HOST); hypre_TFree(recv_i, HYPRE_MEMORY_HOST); hypre_TFree(d_send_i, HYPRE_MEMORY_DEVICE); hypre_TFree(d_send_map, HYPRE_MEMORY_DEVICE); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParcsrGetExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2]; HYPRE_BigInt *send_j = comm_handle_j ? (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(comm_handle_j) : NULL; HYPRE_Complex *send_a = comm_handle_a ? (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a) : NULL; hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(send_j, HYPRE_MEMORY_DEVICE); hypre_TFree(send_a, HYPRE_MEMORY_DEVICE); hypre_TFree(request, HYPRE_MEMORY_HOST); return A_ext; } hypre_CSRMatrix* hypre_MergeDiagAndOffdDevice(hypre_ParCSRMatrix *A) { MPI_Comm comm = hypre_ParCSRMatrixComm(A); hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); HYPRE_BigInt glbal_num_cols = hypre_ParCSRMatrixGlobalNumCols(A); HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *d_col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); hypre_CSRMatrix *B; HYPRE_Int B_nrows = local_num_rows; HYPRE_BigInt B_ncols = glbal_num_cols; HYPRE_Int *B_i = hypre_TAlloc(HYPRE_Int, B_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_j; HYPRE_Complex *B_a; HYPRE_Int B_nnz; HYPRE_Int num_procs; hypre_MPI_Comm_size(comm, &num_procs); hypre_Memset(B_i, 0, sizeof(HYPRE_Int), HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(B_nrows, NULL, A_diag_i, A_offd_i, B_i + 1); hypreDevice_IntegerInclusiveScan(B_nrows + 1, B_i); /* total number of nnz */ hypre_TMemcpy(&B_nnz, B_i + B_nrows, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); B_j = hypre_TAlloc(HYPRE_BigInt, B_nnz, HYPRE_MEMORY_DEVICE); B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); if (d_col_map_offd_A == NULL) { d_col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_A, col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = d_col_map_offd_A; } hypreDevice_CopyParCSRRows(B_nrows, NULL, 2, num_procs > 1, first_col, d_col_map_offd_A, A_diag_i, A_diag_j, A_diag_a, A_offd_i, A_offd_j, A_offd_a, B_i, B_j, B_a); /* output */ B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI (B) = B_i; hypre_CSRMatrixBigJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; hypre_SyncCudaComputeStream(hypre_handle()); return B; } HYPRE_Int hypre_ExchangeExternalRowsDeviceInit( hypre_CSRMatrix *B_ext, hypre_ParCSRCommPkg *comm_pkg_A, HYPRE_Int want_data, void **request_ptr) { MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A); HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A); HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A); HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A); HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A); HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A); HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A); HYPRE_Int num_elmts_send = send_map_starts[num_sends]; HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs]; HYPRE_Int *B_ext_i_d = hypre_CSRMatrixI(B_ext); HYPRE_BigInt *B_ext_j_d = hypre_CSRMatrixBigJ(B_ext); HYPRE_Complex *B_ext_a_d = hypre_CSRMatrixData(B_ext); HYPRE_Int B_ext_ncols = hypre_CSRMatrixNumCols(B_ext); HYPRE_Int B_ext_nrows = hypre_CSRMatrixNumRows(B_ext); HYPRE_Int B_ext_nnz = hypre_CSRMatrixNumNonzeros(B_ext); HYPRE_Int *B_ext_rownnz_d = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_ext_rownnz_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST); HYPRE_Int *B_ext_i_h = hypre_TAlloc(HYPRE_Int, B_ext_nrows + 1, HYPRE_MEMORY_HOST); hypre_assert(num_elmts_recv == B_ext_nrows); /* output matrix */ hypre_CSRMatrix *B_int_d; HYPRE_Int B_int_nrows = num_elmts_send; HYPRE_Int B_int_ncols = B_ext_ncols; HYPRE_Int *B_int_i_h = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST); HYPRE_Int *B_int_i_d = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *B_int_j_d = NULL; HYPRE_Complex *B_int_a_d = NULL; HYPRE_Int B_int_nnz; hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a; hypre_ParCSRCommPkg *comm_pkg_j; HYPRE_Int *jdata_recv_vec_starts; HYPRE_Int *jdata_send_map_starts; HYPRE_Int i; HYPRE_Int num_procs, my_id; void **vrequest; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST); /*-------------------------------------------------------------------------- * B_ext_rownnz contains the number of elements of row j * (to be determined through send_map_elmnts on the receiving end) *--------------------------------------------------------------------------*/ HYPRE_THRUST_CALL(adjacent_difference, B_ext_i_d, B_ext_i_d + B_ext_nrows + 1, B_ext_rownnz_d); hypre_TMemcpy(B_ext_rownnz_h, B_ext_rownnz_d + 1, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); /*-------------------------------------------------------------------------- * initialize communication: send/recv the row nnz * (note the use of comm_pkg_A, mode 12, as in transpose matvec *--------------------------------------------------------------------------*/ comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz_h, B_int_i_h + 1); jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST); jdata_recv_vec_starts[0] = 0; B_ext_i_h[0] = 0; hypre_TMemcpy(B_ext_i_h + 1, B_ext_rownnz_h, HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST); for (i = 1; i <= B_ext_nrows; i++) { B_ext_i_h[i] += B_ext_i_h[i - 1]; } hypre_assert(B_ext_i_h[B_ext_nrows] == B_ext_nnz); for (i = 1; i <= num_recvs; i++) { jdata_recv_vec_starts[i] = B_ext_i_h[recv_vec_starts[i]]; } comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgComm(comm_pkg_j) = comm; hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs; hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends; hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs; hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs; hypre_ParCSRCommHandleDestroy(comm_handle); /*-------------------------------------------------------------------------- * compute B_int: row nnz to row ptrs *--------------------------------------------------------------------------*/ B_int_i_h[0] = 0; for (i = 1; i <= B_int_nrows; i++) { B_int_i_h[i] += B_int_i_h[i - 1]; } B_int_nnz = B_int_i_h[B_int_nrows]; B_int_j_d = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_DEVICE); if (want_data) { B_int_a_d = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_DEVICE); } for (i = 0; i <= num_sends; i++) { jdata_send_map_starts[i] = B_int_i_h[send_map_starts[i]]; } /* note the order of send/recv is reversed */ hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts; hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts; /* send/recv CSR rows */ if (want_data) { comm_handle_a = hypre_ParCSRCommHandleCreate_v2( 1, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_a_d, HYPRE_MEMORY_DEVICE, B_int_a_d ); } else { comm_handle_a = NULL; } comm_handle_j = hypre_ParCSRCommHandleCreate_v2(21, comm_pkg_j, HYPRE_MEMORY_DEVICE, B_ext_j_d, HYPRE_MEMORY_DEVICE, B_int_j_d ); hypre_TMemcpy(B_int_i_d, B_int_i_h, HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); /* create CSR: on device */ B_int_d = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz); hypre_CSRMatrixI(B_int_d) = B_int_i_d; hypre_CSRMatrixBigJ(B_int_d) = B_int_j_d; hypre_CSRMatrixData(B_int_d) = B_int_a_d; hypre_CSRMatrixMemoryLocation(B_int_d) = HYPRE_MEMORY_DEVICE; /* output */ vrequest = hypre_TAlloc(void *, 3, HYPRE_MEMORY_HOST); vrequest[0] = (void *) comm_handle_j; vrequest[1] = (void *) comm_handle_a; vrequest[2] = (void *) B_int_d; *request_ptr = (void *) vrequest; /* free */ hypre_TFree(B_ext_rownnz_d, HYPRE_MEMORY_DEVICE); hypre_TFree(B_ext_rownnz_h, HYPRE_MEMORY_HOST); hypre_TFree(B_ext_i_h, HYPRE_MEMORY_HOST); hypre_TFree(B_int_i_h, HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST); hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST); return hypre_error_flag; } hypre_CSRMatrix* hypre_ExchangeExternalRowsDeviceWait(void *vrequest) { void **request = (void **) vrequest; hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0]; hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1]; hypre_CSRMatrix *B_int_d = (hypre_CSRMatrix *) request[2]; /* communication done */ hypre_ParCSRCommHandleDestroy(comm_handle_j); hypre_ParCSRCommHandleDestroy(comm_handle_a); hypre_TFree(request, HYPRE_MEMORY_HOST); return B_int_d; } /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */ HYPRE_Int hypre_ParCSRMatrixExtractBExtDeviceInit( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data, void **request_ptr) { hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) == hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) ); /* hypre_assert( hypre_GetActualMemLocation( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B))) == HYPRE_MEMORY_DEVICE ); */ if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ParcsrGetExternalRowsDeviceInit(B, hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)), hypre_ParCSRMatrixColMapOffd(A), hypre_ParCSRMatrixCommPkg(A), want_data, request_ptr); return hypre_error_flag; } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDeviceWait(void *request) { return hypre_ParcsrGetExternalRowsDeviceWait(request); } hypre_CSRMatrix* hypre_ParCSRMatrixExtractBExtDevice( hypre_ParCSRMatrix *B, hypre_ParCSRMatrix *A, HYPRE_Int want_data ) { void *request; hypre_ParCSRMatrixExtractBExtDeviceInit(B, A, want_data, &request); return hypre_ParCSRMatrixExtractBExtDeviceWait(request); } /* return B = [Adiag, Aoffd] */ #if 1 __global__ void hypreCUDAKernel_ConcatDiagAndOffd(HYPRE_Int nrows, HYPRE_Int diag_ncol, HYPRE_Int *d_diag_i, HYPRE_Int *d_diag_j, HYPRE_Complex *d_diag_a, HYPRE_Int *d_offd_i, HYPRE_Int *d_offd_j, HYPRE_Complex *d_offd_a, HYPRE_Int *cols_offd_map, HYPRE_Int *d_ib, HYPRE_Int *d_jb, HYPRE_Complex *d_ab) { const HYPRE_Int row = hypre_cuda_get_grid_warp_id<1, 1>(); if (row >= nrows) { return; } /* lane id inside the warp */ const HYPRE_Int lane_id = hypre_cuda_get_lane_id<1>(); HYPRE_Int i, j, k, p, istart, iend, bstart; /* diag part */ if (lane_id < 2) { j = read_only_load(d_diag_i + row + lane_id); } if (lane_id == 0) { k = read_only_load(d_ib + row); } istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); bstart = __shfl_sync(HYPRE_WARP_FULL_MASK, k, 0); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { d_jb[p + i] = read_only_load(d_diag_j + i); d_ab[p + i] = read_only_load(d_diag_a + i); } /* offd part */ if (lane_id < 2) { j = read_only_load(d_offd_i + row + lane_id); } bstart += iend - istart; istart = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 0); iend = __shfl_sync(HYPRE_WARP_FULL_MASK, j, 1); p = bstart - istart; for (i = istart + lane_id; i < iend; i += HYPRE_WARP_SIZE) { const HYPRE_Int t = read_only_load(d_offd_j + i); d_jb[p + i] = (cols_offd_map ? read_only_load(&cols_offd_map[t]) : t) + diag_ncol; d_ab[p + i] = read_only_load(d_offd_a + i); } } hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *B = hypre_CSRMatrixCreate( hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd), hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) ); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_CSRMatrixNumRows(B), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_CSRMatrixNumRows(B) + 1, hypre_CSRMatrixI(B) ); const dim3 bDim = hypre_GetDefaultCUDABlockDimension(); const dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), NULL, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); return B; } #else hypre_CSRMatrix* hypre_ConcatDiagAndOffdDevice(hypre_ParCSRMatrix *A) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); hypre_CSRMatrix *B; HYPRE_Int B_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int B_ncols = hypre_CSRMatrixNumCols(A_diag) + hypre_CSRMatrixNumCols(A_offd); HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(B_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, A_offd_j, A_offd_j + A_offd_nnz, thrust::make_constant_iterator(hypre_CSRMatrixNumCols(A_diag)), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(B_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(B_nrows, B_ncols, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; return B; } #endif /* return B = [Adiag, Aoffd; E] */ #if 1 HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *E_diag, *E_offd, *B; HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; hypre_CSRMatrixSplitDevice(E, hypre_ParCSRMatrixFirstColDiag(A), hypre_ParCSRMatrixLastColDiag(A), hypre_CSRMatrixNumCols(A_offd), hypre_ParCSRMatrixDeviceColMapOffd(A), &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag, &E_offd); B = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E), hypre_ParCSRMatrixNumCols(A) + num_cols_offd, hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros(A_offd) + hypre_CSRMatrixNumNonzeros(E)); hypre_CSRMatrixInitialize_v2(B, 0, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(hypre_ParCSRMatrixNumRows(A), NULL, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixI(B)); HYPRE_THRUST_CALL( exclusive_scan, hypre_CSRMatrixI(B), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) ); dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_ParCSRMatrixNumRows(A), "warp", bDim); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), hypre_CSRMatrixNumCols(A_diag), hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixData(A_offd), cols_offd_map, hypre_CSRMatrixI(B), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(E) + 1, HYPRE_Int, hypre_CSRMatrixNumRows(E), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + hypre_CSRMatrixNumRows(E) + 1, thrust::make_constant_iterator(hypre_CSRMatrixNumNonzeros(A_diag) + hypre_CSRMatrixNumNonzeros( A_offd)), hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A) + 1, thrust::plus<HYPRE_Int>() ); gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(E), "warp", bDim); hypre_assert(hypre_CSRMatrixNumCols(E_diag) == hypre_CSRMatrixNumCols(A_diag)); HYPRE_CUDA_LAUNCH( hypreCUDAKernel_ConcatDiagAndOffd, gDim, bDim, hypre_CSRMatrixNumRows(E_diag), hypre_CSRMatrixNumCols(E_diag), hypre_CSRMatrixI(E_diag), hypre_CSRMatrixJ(E_diag), hypre_CSRMatrixData(E_diag), hypre_CSRMatrixI(E_offd), hypre_CSRMatrixJ(E_offd), hypre_CSRMatrixData(E_offd), NULL, hypre_CSRMatrixI(B) + hypre_ParCSRMatrixNumRows(A), hypre_CSRMatrixJ(B), hypre_CSRMatrixData(B) ); hypre_CSRMatrixDestroy(E_diag); hypre_CSRMatrixDestroy(E_offd); *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #else HYPRE_Int hypre_ConcatDiagOffdAndExtDevice(hypre_ParCSRMatrix *A, hypre_CSRMatrix *E, hypre_CSRMatrix **B_ptr, HYPRE_Int *num_cols_offd_ptr, HYPRE_BigInt **cols_map_offd_ptr) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A_diag); HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A_diag); HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag); HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag); HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag); HYPRE_Int A_diag_nnz = hypre_CSRMatrixNumNonzeros(A_diag); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd); HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd); HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd); HYPRE_Int A_offd_nnz = hypre_CSRMatrixNumNonzeros(A_offd); HYPRE_BigInt first_col_A = hypre_ParCSRMatrixFirstColDiag(A); HYPRE_BigInt last_col_A = hypre_ParCSRMatrixLastColDiag(A); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); HYPRE_Int *E_i = hypre_CSRMatrixI(E); HYPRE_BigInt *E_bigj = hypre_CSRMatrixBigJ(E); HYPRE_Complex *E_a = hypre_CSRMatrixData(E); HYPRE_Int E_nrows = hypre_CSRMatrixNumRows(E); HYPRE_Int E_nnz = hypre_CSRMatrixNumNonzeros(E); HYPRE_Int E_diag_nnz, E_offd_nnz; hypre_CSRMatrix *B; HYPRE_Int B_nnz = A_diag_nnz + A_offd_nnz + E_nnz; HYPRE_Int *B_ii = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Int *B_j = hypre_TAlloc(HYPRE_Int, B_nnz, HYPRE_MEMORY_DEVICE); HYPRE_Complex *B_a = hypre_TAlloc(HYPRE_Complex, B_nnz, HYPRE_MEMORY_DEVICE); // E hypre_CSRMatrixSplitDevice_core(0, E_nrows, E_nnz, NULL, E_bigj, NULL, NULL, first_col_A, last_col_A, num_cols_offd_A, NULL, NULL, NULL, NULL, &E_diag_nnz, NULL, NULL, NULL, NULL, &E_offd_nnz, NULL, NULL, NULL, NULL); HYPRE_Int *cols_offd_map, num_cols_offd; HYPRE_BigInt *cols_map_offd; HYPRE_Int *E_ii = hypreDevice_CsrRowPtrsToIndices(E_nrows, E_nnz, E_i); hypre_CSRMatrixSplitDevice_core(1, E_nrows, E_nnz, E_ii, E_bigj, E_a, NULL, first_col_A, last_col_A, num_cols_offd_A, col_map_offd_A, &cols_offd_map, &num_cols_offd, &cols_map_offd, &E_diag_nnz, B_ii + A_diag_nnz + A_offd_nnz, B_j + A_diag_nnz + A_offd_nnz, B_a + A_diag_nnz + A_offd_nnz, NULL, &E_offd_nnz, B_ii + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_a + A_diag_nnz + A_offd_nnz + E_diag_nnz, NULL); hypre_TFree(E_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_ii + A_diag_nnz + A_offd_nnz, B_ii + B_nnz, thrust::make_constant_iterator(A_nrows), B_ii + A_diag_nnz + A_offd_nnz, thrust::plus<HYPRE_Int>() ); // Adiag HYPRE_Int *A_diag_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_diag_nnz, A_diag_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_diag_ii, A_diag_j, A_diag_a)), A_diag_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_j, B_a)) ); hypre_TFree(A_diag_ii, HYPRE_MEMORY_DEVICE); // Aoffd HYPRE_Int *A_offd_ii = hypreDevice_CsrRowPtrsToIndices(A_nrows, A_offd_nnz, A_offd_i); HYPRE_THRUST_CALL( copy_n, thrust::make_zip_iterator(thrust::make_tuple(A_offd_ii, A_offd_a)), A_offd_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_ii, B_a)) + A_diag_nnz ); hypre_TFree(A_offd_ii, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( gather, A_offd_j, A_offd_j + A_offd_nnz, cols_offd_map, B_j + A_diag_nnz); hypre_TFree(cols_offd_map, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz, B_j + A_diag_nnz + A_offd_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz, thrust::plus<HYPRE_Int>() ); HYPRE_THRUST_CALL( transform, B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, B_j + B_nnz, thrust::make_constant_iterator(A_ncols), B_j + A_diag_nnz + A_offd_nnz + E_diag_nnz, thrust::plus<HYPRE_Int>() ); // B HYPRE_THRUST_CALL( stable_sort_by_key, B_ii, B_ii + B_nnz, thrust::make_zip_iterator(thrust::make_tuple(B_j, B_a)) ); HYPRE_Int *B_i = hypreDevice_CsrRowIndicesToPtrs(A_nrows + E_nrows, B_nnz, B_ii); hypre_TFree(B_ii, HYPRE_MEMORY_DEVICE); B = hypre_CSRMatrixCreate(A_nrows + E_nrows, A_ncols + num_cols_offd, B_nnz); hypre_CSRMatrixI(B) = B_i; hypre_CSRMatrixJ(B) = B_j; hypre_CSRMatrixData(B) = B_a; hypre_CSRMatrixMemoryLocation(B) = HYPRE_MEMORY_DEVICE; *B_ptr = B; *num_cols_offd_ptr = num_cols_offd; *cols_map_offd_ptr = cols_map_offd; return hypre_error_flag; } #endif HYPRE_Int hypre_ParCSRMatrixGetRowDevice( hypre_ParCSRMatrix *mat, HYPRE_BigInt row, HYPRE_Int *size, HYPRE_BigInt **col_ind, HYPRE_Complex **values ) { HYPRE_Int nrows, local_row; HYPRE_BigInt row_start, row_end; hypre_CSRMatrix *Aa; hypre_CSRMatrix *Ba; if (!mat) { hypre_error_in_arg(1); return hypre_error_flag; } Aa = (hypre_CSRMatrix *) hypre_ParCSRMatrixDiag(mat); Ba = (hypre_CSRMatrix *) hypre_ParCSRMatrixOffd(mat); if (hypre_ParCSRMatrixGetrowactive(mat)) { return (-1); } hypre_ParCSRMatrixGetrowactive(mat) = 1; row_start = hypre_ParCSRMatrixFirstRowIndex(mat); row_end = hypre_ParCSRMatrixLastRowIndex(mat) + 1; nrows = row_end - row_start; if (row < row_start || row >= row_end) { return (-1); } local_row = row - row_start; /* if buffer is not allocated and some information is requested, allocate buffer with the max row_nnz */ if ( !hypre_ParCSRMatrixRowvalues(mat) && (col_ind || values) ) { HYPRE_Int max_row_nnz; HYPRE_Int *row_nnz = hypre_TAlloc(HYPRE_Int, nrows, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(nrows, NULL, hypre_CSRMatrixI(Aa), hypre_CSRMatrixI(Ba), row_nnz); hypre_TMemcpy(size, row_nnz + local_row, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); max_row_nnz = HYPRE_THRUST_CALL(reduce, row_nnz, row_nnz + nrows, 0, thrust::maximum<HYPRE_Int>()); /* HYPRE_Int *max_row_nnz_d = HYPRE_THRUST_CALL(max_element, row_nnz, row_nnz + nrows); hypre_TMemcpy( &max_row_nnz, max_row_nnz_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE ); */ hypre_TFree(row_nnz, HYPRE_MEMORY_DEVICE); hypre_ParCSRMatrixRowvalues(mat) = (HYPRE_Complex *) hypre_TAlloc(HYPRE_Complex, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); hypre_ParCSRMatrixRowindices(mat) = (HYPRE_BigInt *) hypre_TAlloc(HYPRE_BigInt, max_row_nnz, hypre_ParCSRMatrixMemoryLocation(mat)); } else { HYPRE_Int *size_d = hypre_TAlloc(HYPRE_Int, 1, HYPRE_MEMORY_DEVICE); hypreDevice_GetRowNnz(1, NULL, hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixI(Ba) + local_row, size_d); hypre_TMemcpy(size, size_d, HYPRE_Int, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TFree(size_d, HYPRE_MEMORY_DEVICE); } if (col_ind || values) { if (hypre_ParCSRMatrixDeviceColMapOffd(mat) == NULL) { hypre_ParCSRMatrixDeviceColMapOffd(mat) = hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE); hypre_TMemcpy( hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_ParCSRMatrixColMapOffd(mat), HYPRE_BigInt, hypre_CSRMatrixNumCols(Ba), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST ); } hypreDevice_CopyParCSRRows( 1, NULL, -1, Ba != NULL, hypre_ParCSRMatrixFirstColDiag(mat), hypre_ParCSRMatrixDeviceColMapOffd(mat), hypre_CSRMatrixI(Aa) + local_row, hypre_CSRMatrixJ(Aa), hypre_CSRMatrixData(Aa), hypre_CSRMatrixI(Ba) + local_row, hypre_CSRMatrixJ(Ba), hypre_CSRMatrixData(Ba), NULL, hypre_ParCSRMatrixRowindices(mat), hypre_ParCSRMatrixRowvalues(mat) ); } if (col_ind) { *col_ind = hypre_ParCSRMatrixRowindices(mat); } if (values) { *values = hypre_ParCSRMatrixRowvalues(mat); } hypre_SyncCudaComputeStream(hypre_handle()); return hypre_error_flag; } /* Get element-wise tolerances based on row norms for ParCSRMatrix * NOTE: Keep the diagonal, i.e. elmt_tol = 0.0 for diagonals * Output vectors have size nnz: * elmt_tols_diag[j] = tol * (norm of row i) for j in [ A_diag_i[i] , A_diag_i[i+1] ) * elmt_tols_offd[j] = tol * (norm of row i) for j in [ A_offd_i[i] , A_offd_i[i+1] ) * type == -1, infinity norm, * 1, 1-norm * 2, 2-norm */ template<HYPRE_Int type> __global__ void hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols( HYPRE_Int nrows, HYPRE_Real tol, HYPRE_Int *A_diag_i, HYPRE_Int *A_diag_j, HYPRE_Complex *A_diag_a, HYPRE_Int *A_offd_i, HYPRE_Complex *A_offd_a, HYPRE_Real *elmt_tols_diag, HYPRE_Real *elmt_tols_offd) { HYPRE_Int row_i = hypre_cuda_get_grid_warp_id<1, 1>(); if (row_i >= nrows) { return; } HYPRE_Int lane = hypre_cuda_get_lane_id<1>(); HYPRE_Int p_diag, p_offd, q_diag, q_offd; /* sum row norm over diag part */ if (lane < 2) { p_diag = read_only_load(A_diag_i + row_i + lane); } q_diag = __shfl_sync(HYPRE_WARP_FULL_MASK, p_diag, 1); p_diag = __shfl_sync(HYPRE_WARP_FULL_MASK, p_diag, 0); HYPRE_Real row_norm_i = 0.0; for (HYPRE_Int j = p_diag + lane; j < q_diag; j += HYPRE_WARP_SIZE) { HYPRE_Complex val = A_diag_a[j]; if (type == -1) { row_norm_i = hypre_max(row_norm_i, hypre_cabs(val)); } else if (type == 1) { row_norm_i += hypre_cabs(val); } else if (type == 2) { row_norm_i += val * val; } } /* sum row norm over offd part */ if (lane < 2) { p_offd = read_only_load(A_offd_i + row_i + lane); } q_offd = __shfl_sync(HYPRE_WARP_FULL_MASK, p_offd, 1); p_offd = __shfl_sync(HYPRE_WARP_FULL_MASK, p_offd, 0); for (HYPRE_Int j = p_offd + lane; j < q_offd; j += HYPRE_WARP_SIZE) { HYPRE_Complex val = A_offd_a[j]; if (type == -1) { row_norm_i = hypre_max(row_norm_i, hypre_cabs(val)); } else if (type == 1) { row_norm_i += hypre_cabs(val); } else if (type == 2) { row_norm_i += val * val; } } /* allreduce to get the row norm on all threads */ if (type == -1) { row_norm_i = warp_allreduce_max(row_norm_i); } else { row_norm_i = warp_allreduce_sum(row_norm_i); } if (type == 2) { row_norm_i = sqrt(row_norm_i); } /* set elmt_tols_diag */ for (HYPRE_Int j = p_diag + lane; j < q_diag; j += HYPRE_WARP_SIZE) { HYPRE_Int col = A_diag_j[j]; /* elmt_tol = 0.0 ensures diagonal will be kept */ if (col == row_i) { elmt_tols_diag[j] = 0.0; } else { elmt_tols_diag[j] = tol * row_norm_i; } } /* set elmt_tols_offd */ for (HYPRE_Int j = p_offd + lane; j < q_offd; j += HYPRE_WARP_SIZE) { elmt_tols_offd[j] = tol * row_norm_i; } } /* drop the entries that are not on the diagonal and smaller than: * type 0: tol * type 1: tol*(1-norm of row) * type 2: tol*(2-norm of row) * type -1: tol*(infinity norm of row) */ HYPRE_Int hypre_ParCSRMatrixDropSmallEntriesDevice( hypre_ParCSRMatrix *A, HYPRE_Complex tol, HYPRE_Int type) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd); HYPRE_BigInt *h_col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A); HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixDeviceColMapOffd(A); HYPRE_Real *elmt_tols_diag = NULL; HYPRE_Real *elmt_tols_offd = NULL; if (col_map_offd_A == NULL) { col_map_offd_A = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(col_map_offd_A, h_col_map_offd_A, HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A; } /* get elmement-wise tolerances if needed */ if (type != 0) { elmt_tols_diag = hypre_TAlloc(HYPRE_Real, hypre_CSRMatrixNumNonzeros(A_diag), HYPRE_MEMORY_DEVICE); elmt_tols_offd = hypre_TAlloc(HYPRE_Real, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE); } dim3 bDim = hypre_GetDefaultCUDABlockDimension(); dim3 gDim = hypre_GetDefaultCUDAGridDimension(hypre_CSRMatrixNumRows(A_diag), "warp", bDim); if (type == -1) { HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols < -1 >, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd); } if (type == 1) { HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols<1>, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd); } if (type == 2) { HYPRE_CUDA_LAUNCH( hypre_ParCSRMatrixDropSmallEntriesDevice_getElmtTols<2>, gDim, bDim, hypre_CSRMatrixNumRows(A_diag), tol, hypre_CSRMatrixI(A_diag), hypre_CSRMatrixJ(A_diag), hypre_CSRMatrixData(A_diag), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixData(A_offd), elmt_tols_diag, elmt_tols_offd); } /* drop entries from diag and offd CSR matrices */ hypre_CSRMatrixDropSmallEntriesDevice(A_diag, tol, elmt_tols_diag); hypre_CSRMatrixDropSmallEntriesDevice(A_offd, tol, elmt_tols_offd); hypre_ParCSRMatrixSetNumNonzeros(A); hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A); /* squeeze out zero columns of A_offd */ HYPRE_Int *tmp_j, *tmp_end, num_cols_A_offd_new; tmp_j = hypre_TAlloc(HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp_j, hypre_CSRMatrixJ(A_offd), HYPRE_Int, hypre_CSRMatrixNumNonzeros(A_offd), HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( sort, tmp_j, tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) ); tmp_end = HYPRE_THRUST_CALL( unique, tmp_j, tmp_j + hypre_CSRMatrixNumNonzeros(A_offd) ); num_cols_A_offd_new = tmp_end - tmp_j; hypre_assert(num_cols_A_offd_new <= num_cols_A_offd); if (num_cols_A_offd_new < num_cols_A_offd) { hypre_CSRMatrixNumCols(A_offd) = num_cols_A_offd_new; HYPRE_Int *offd_mark = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_DEVICE); HYPRE_BigInt *col_map_offd_A_new = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( scatter, thrust::counting_iterator<HYPRE_Int>(0), thrust::counting_iterator<HYPRE_Int>(num_cols_A_offd_new), tmp_j, offd_mark ); HYPRE_THRUST_CALL( gather, hypre_CSRMatrixJ(A_offd), hypre_CSRMatrixJ(A_offd) + hypre_CSRMatrixNumNonzeros(A_offd), offd_mark, hypre_CSRMatrixJ(A_offd) ); HYPRE_THRUST_CALL( gather, tmp_j, tmp_j + num_cols_A_offd_new, col_map_offd_A, col_map_offd_A_new ); hypre_TFree(offd_mark, HYPRE_MEMORY_DEVICE); hypre_TFree(col_map_offd_A, HYPRE_MEMORY_DEVICE); hypre_TFree(h_col_map_offd_A, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDeviceColMapOffd(A) = col_map_offd_A_new; hypre_ParCSRMatrixColMapOffd(A) = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(A), col_map_offd_A_new, HYPRE_BigInt, num_cols_A_offd_new, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } if (type != 0) { hypre_TFree(elmt_tols_diag, HYPRE_MEMORY_DEVICE); hypre_TFree(elmt_tols_offd, HYPRE_MEMORY_DEVICE); } hypre_TFree(tmp_j, HYPRE_MEMORY_DEVICE); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParCSRMatrixTransposeDevice *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRMatrixTransposeDevice( hypre_ParCSRMatrix *A, hypre_ParCSRMatrix **AT_ptr, HYPRE_Int data ) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *A_diagT; hypre_CSRMatrix *AT_offd; HYPRE_Int num_procs; HYPRE_Int num_cols_offd_AT = 0; HYPRE_BigInt *col_map_offd_AT = NULL; hypre_ParCSRMatrix *AT; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); if (num_procs > 1) { void *request; hypre_CSRMatrix *A_offdT, *Aext; HYPRE_Int *Aext_ii, *Aext_j, Aext_nnz; HYPRE_Complex *Aext_data; HYPRE_BigInt *tmp_bigj; hypre_CSRMatrixTranspose(A_offd, &A_offdT, data); hypre_CSRMatrixBigJ(A_offdT) = hypre_TAlloc(HYPRE_BigInt, hypre_CSRMatrixNumNonzeros(A_offdT), HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( transform, hypre_CSRMatrixJ(A_offdT), hypre_CSRMatrixJ(A_offdT) + hypre_CSRMatrixNumNonzeros(A_offdT), thrust::make_constant_iterator(hypre_ParCSRMatrixFirstRowIndex(A)), hypre_CSRMatrixBigJ(A_offdT), thrust::plus<HYPRE_BigInt>() ); if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); } hypre_ExchangeExternalRowsDeviceInit(A_offdT, hypre_ParCSRMatrixCommPkg(A), data, &request); hypre_CSRMatrixTranspose(A_diag, &A_diagT, data); Aext = hypre_ExchangeExternalRowsDeviceWait(request); hypre_CSRMatrixDestroy(A_offdT); // Aext contains offd of AT Aext_nnz = hypre_CSRMatrixNumNonzeros(Aext); Aext_ii = hypreDevice_CsrRowPtrsToIndices(hypre_CSRMatrixNumRows(Aext), Aext_nnz, hypre_CSRMatrixI(Aext)); hypre_ParCSRCommPkgCopySendMapElmtsToDevice(hypre_ParCSRMatrixCommPkg(A)); HYPRE_THRUST_CALL( gather, Aext_ii, Aext_ii + Aext_nnz, hypre_ParCSRCommPkgDeviceSendMapElmts(hypre_ParCSRMatrixCommPkg(A)), Aext_ii ); tmp_bigj = hypre_TAlloc(HYPRE_BigInt, Aext_nnz, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp_bigj, hypre_CSRMatrixBigJ(Aext), HYPRE_BigInt, Aext_nnz, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( sort, tmp_bigj, tmp_bigj + Aext_nnz ); HYPRE_BigInt *new_end = HYPRE_THRUST_CALL( unique, tmp_bigj, tmp_bigj + Aext_nnz ); num_cols_offd_AT = new_end - tmp_bigj; col_map_offd_AT = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(col_map_offd_AT, tmp_bigj, HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); hypre_TFree(tmp_bigj, HYPRE_MEMORY_DEVICE); Aext_j = hypre_TAlloc(HYPRE_Int, Aext_nnz, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( lower_bound, col_map_offd_AT, col_map_offd_AT + num_cols_offd_AT, hypre_CSRMatrixBigJ(Aext), hypre_CSRMatrixBigJ(Aext) + Aext_nnz, Aext_j ); Aext_data = hypre_CSRMatrixData(Aext); hypre_CSRMatrixData(Aext) = NULL; hypre_CSRMatrixDestroy(Aext); if (data) { hypreDevice_StableSortByTupleKey(Aext_nnz, Aext_ii, Aext_j, Aext_data, 0); } else { HYPRE_THRUST_CALL( stable_sort, thrust::make_zip_iterator(thrust::make_tuple(Aext_ii, Aext_j)), thrust::make_zip_iterator(thrust::make_tuple(Aext_ii, Aext_j)) + Aext_nnz ); } AT_offd = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumCols(A), num_cols_offd_AT, Aext_nnz); hypre_CSRMatrixJ(AT_offd) = Aext_j; hypre_CSRMatrixData(AT_offd) = Aext_data; hypre_CSRMatrixInitialize_v2(AT_offd, 0, HYPRE_MEMORY_DEVICE); hypreDevice_CsrRowIndicesToPtrs_v2(hypre_CSRMatrixNumRows(AT_offd), Aext_nnz, Aext_ii, hypre_CSRMatrixI(AT_offd)); hypre_TFree(Aext_ii, HYPRE_MEMORY_DEVICE); } else { hypre_CSRMatrixTransposeDevice(A_diag, &A_diagT, data); AT_offd = hypre_CSRMatrixCreate(hypre_ParCSRMatrixNumCols(A), 0, 0); hypre_CSRMatrixInitialize_v2(AT_offd, 0, HYPRE_MEMORY_DEVICE); } AT = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixColStarts(A), hypre_ParCSRMatrixRowStarts(A), num_cols_offd_AT, hypre_CSRMatrixNumNonzeros(A_diagT), hypre_CSRMatrixNumNonzeros(AT_offd)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(AT)); hypre_ParCSRMatrixDiag(AT) = A_diagT; hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(AT)); hypre_ParCSRMatrixOffd(AT) = AT_offd; if (num_cols_offd_AT) { hypre_ParCSRMatrixDeviceColMapOffd(AT) = col_map_offd_AT; hypre_ParCSRMatrixColMapOffd(AT) = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(AT), col_map_offd_AT, HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } *AT_ptr = AT; return hypre_error_flag; } HYPRE_Int hypre_ParCSRMatrixAddDevice( HYPRE_Complex alpha, hypre_ParCSRMatrix *A, HYPRE_Complex beta, hypre_ParCSRMatrix *B, hypre_ParCSRMatrix **C_ptr ) { hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A); hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A); hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B); hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B); HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd); HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd); HYPRE_Int num_cols_offd_C = 0; HYPRE_BigInt *d_col_map_offd_C = NULL; HYPRE_Int num_procs; hypre_MPI_Comm_size(hypre_ParCSRMatrixComm(A), &num_procs); hypre_CSRMatrix *C_diag = hypre_CSRMatrixAddDevice(alpha, A_diag, beta, B_diag); hypre_CSRMatrix *C_offd; //if (num_cols_offd_A || num_cols_offd_B) if (num_procs > 1) { hypre_ParCSRMatrixCopyColMapOffdToDevice(A); hypre_ParCSRMatrixCopyColMapOffdToDevice(B); HYPRE_BigInt *tmp = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_A + num_cols_offd_B, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp, hypre_ParCSRMatrixDeviceColMapOffd(A), HYPRE_BigInt, num_cols_offd_A, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(tmp + num_cols_offd_A, hypre_ParCSRMatrixDeviceColMapOffd(B), HYPRE_BigInt, num_cols_offd_B, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); HYPRE_THRUST_CALL( sort, tmp, tmp + num_cols_offd_A + num_cols_offd_B ); HYPRE_BigInt *new_end = HYPRE_THRUST_CALL( unique, tmp, tmp + num_cols_offd_A + num_cols_offd_B ); num_cols_offd_C = new_end - tmp; d_col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_DEVICE); hypre_TMemcpy(d_col_map_offd_C, tmp, HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_DEVICE); /* reuse memory of tmp */ HYPRE_Int *offd_A2C = (HYPRE_Int *) tmp; HYPRE_Int *offd_B2C = offd_A2C + num_cols_offd_A; HYPRE_THRUST_CALL( lower_bound, d_col_map_offd_C, d_col_map_offd_C + num_cols_offd_C, hypre_ParCSRMatrixDeviceColMapOffd(A), hypre_ParCSRMatrixDeviceColMapOffd(A) + num_cols_offd_A, offd_A2C ); HYPRE_THRUST_CALL( lower_bound, d_col_map_offd_C, d_col_map_offd_C + num_cols_offd_C, hypre_ParCSRMatrixDeviceColMapOffd(B), hypre_ParCSRMatrixDeviceColMapOffd(B) + num_cols_offd_B, offd_B2C ); HYPRE_Int *C_offd_i, *C_offd_j, nnzC_offd; HYPRE_Complex *C_offd_a; hypreDevice_CSRSpAdd( hypre_CSRMatrixNumRows(A_offd), hypre_CSRMatrixNumRows(B_offd), num_cols_offd_C, hypre_CSRMatrixNumNonzeros(A_offd), hypre_CSRMatrixNumNonzeros(B_offd), hypre_CSRMatrixI(A_offd), hypre_CSRMatrixJ(A_offd), alpha, hypre_CSRMatrixData(A_offd), offd_A2C, hypre_CSRMatrixI(B_offd), hypre_CSRMatrixJ(B_offd), beta, hypre_CSRMatrixData(B_offd), offd_B2C, NULL, &nnzC_offd, &C_offd_i, &C_offd_j, &C_offd_a ); hypre_TFree(tmp, HYPRE_MEMORY_DEVICE); C_offd = hypre_CSRMatrixCreate(hypre_CSRMatrixNumRows(A_offd), num_cols_offd_C, nnzC_offd); hypre_CSRMatrixI(C_offd) = C_offd_i; hypre_CSRMatrixJ(C_offd) = C_offd_j; hypre_CSRMatrixData(C_offd) = C_offd_a; hypre_CSRMatrixMemoryLocation(C_offd) = HYPRE_MEMORY_DEVICE; } else { C_offd = hypre_CSRMatrixCreate(hypre_CSRMatrixNumRows(A_offd), 0, 0); hypre_CSRMatrixInitialize_v2(C_offd, 0, HYPRE_MEMORY_DEVICE); } /* Create ParCSRMatrix C */ hypre_ParCSRMatrix *C = hypre_ParCSRMatrixCreate(hypre_ParCSRMatrixComm(A), hypre_ParCSRMatrixGlobalNumRows(A), hypre_ParCSRMatrixGlobalNumCols(A), hypre_ParCSRMatrixRowStarts(A), hypre_ParCSRMatrixColStarts(A), num_cols_offd_C, hypre_CSRMatrixNumNonzeros(C_diag), hypre_CSRMatrixNumNonzeros(C_offd)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C)); hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); hypre_ParCSRMatrixDiag(C) = C_diag; hypre_ParCSRMatrixOffd(C) = C_offd; if (num_cols_offd_C) { hypre_ParCSRMatrixDeviceColMapOffd(C) = d_col_map_offd_C; hypre_ParCSRMatrixColMapOffd(C) = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST); hypre_TMemcpy(hypre_ParCSRMatrixColMapOffd(C), d_col_map_offd_C, HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); } hypre_ParCSRMatrixSetNumNonzeros(C); hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C); /* create CommPkg of C */ hypre_MatvecCommPkgCreate(C); *C_ptr = C; return hypre_error_flag; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) /*-------------------------------------------------------------------------- * HYPRE_ParCSRDiagScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParCSRDiagScale( HYPRE_ParCSRMatrix HA, HYPRE_ParVector Hy, HYPRE_ParVector Hx ) { hypre_ParCSRMatrix *A = (hypre_ParCSRMatrix *) HA; hypre_ParVector *y = (hypre_ParVector *) Hy; hypre_ParVector *x = (hypre_ParVector *) Hx; HYPRE_Real *x_data = hypre_VectorData(hypre_ParVectorLocalVector(x)); HYPRE_Real *y_data = hypre_VectorData(hypre_ParVectorLocalVector(y)); HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A)); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A)); HYPRE_Int local_size = hypre_VectorSize(hypre_ParVectorLocalVector(x)); HYPRE_Int ierr = 0; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) hypreDevice_DiagScaleVector(local_size, A_i, A_data, y_data, 0.0, x_data); //hypre_SyncCudaComputeStream(hypre_handle()); #else /* #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(x_data,y_data,A_data,A_i) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < local_size; i++) { x_data[i] = y_data[i] / A_data[A_i[i]]; } #endif /* #if defined(HYPRE_USING_CUDA) */ return ierr; }
update_ops_pauli_multi.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif /** * perform multi_qubit_Pauli_gate with XZ mask. * * This function assumes bit_flip_mask is not 0, i.e., at least one bit is flipped. If no bit is flipped, use multi_qubit_Pauli_gate_Z_mask. * This function update the quantum state with Pauli operation. * bit_flip_mask, phase_flip_mask, global_phase_90rot_count, and pivot_qubit_index must be computed before calling this function. * See get_masks_from_*_list for the above four arguemnts. */ void multi_qubit_Pauli_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, CTYPE* state, ITYPE dim); void multi_qubit_Pauli_rotation_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, double angle, CTYPE* state, ITYPE dim); void multi_qubit_Pauli_gate_Z_mask(ITYPE phase_flip_mask, CTYPE* state, ITYPE dim); void multi_qubit_Pauli_rotation_gate_Z_mask(ITYPE phase_flip_mask, double angle, CTYPE* state, ITYPE dim); void multi_qubit_Pauli_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, CTYPE* state, ITYPE dim) { // pivot mask const ITYPE pivot_mask = 1ULL << pivot_qubit_index; // loop varaibles const ITYPE loop_dim = dim / 2; ITYPE state_index; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = insert_zero_to_basis_index(state_index, pivot_mask, pivot_qubit_index); // gather index ITYPE basis_1 = basis_0 ^ bit_flip_mask; // determine sign UINT sign_0 = count_population(basis_0 & phase_flip_mask) % 2; UINT sign_1 = count_population(basis_1 & phase_flip_mask) % 2; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = cval_1 * PHASE_M90ROT[(global_phase_90rot_count + sign_0 * 2) % 4]; state[basis_1] = cval_0 * PHASE_M90ROT[(global_phase_90rot_count + sign_1 * 2) % 4]; } } void multi_qubit_Pauli_rotation_gate_XZ_mask(ITYPE bit_flip_mask, ITYPE phase_flip_mask, UINT global_phase_90rot_count, UINT pivot_qubit_index, double angle, CTYPE* state, ITYPE dim) { // pivot mask const ITYPE pivot_mask = 1ULL << pivot_qubit_index; // loop varaibles const ITYPE loop_dim = dim / 2; ITYPE state_index; // coefs const double cosval = cos(angle / 2); const double sinval = sin(angle / 2); #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_0 = insert_zero_to_basis_index(state_index, pivot_mask, pivot_qubit_index); // gather index ITYPE basis_1 = basis_0 ^ bit_flip_mask; // determine parity int bit_parity_0 = count_population(basis_0 & phase_flip_mask) % 2; int bit_parity_1 = count_population(basis_1 & phase_flip_mask) % 2; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = cosval * cval_0 + 1.i * sinval * cval_1 * PHASE_M90ROT[(global_phase_90rot_count + bit_parity_0 * 2) % 4]; state[basis_1] = cosval * cval_1 + 1.i * sinval * cval_0 * PHASE_M90ROT[(global_phase_90rot_count + bit_parity_1 * 2) % 4]; } } void multi_qubit_Pauli_gate_Z_mask(ITYPE phase_flip_mask, CTYPE* state, ITYPE dim) { // loop varaibles const ITYPE loop_dim = dim; ITYPE state_index; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { // determine parity int bit_parity = count_population(state_index & phase_flip_mask) % 2; // set values if (bit_parity % 2 == 1) { state[state_index] *= -1; } } } void multi_qubit_Pauli_rotation_gate_Z_mask(ITYPE phase_flip_mask, double angle, CTYPE* state, ITYPE dim) { // loop variables const ITYPE loop_dim = dim; ITYPE state_index; // coefs const double cosval = cos(angle / 2); const double sinval = sin(angle / 2); #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim; ++state_index) { // determine sign int bit_parity = count_population(state_index & phase_flip_mask) % 2; int sign = 1 - 2 * bit_parity; // set value state[state_index] *= cosval + (CTYPE)sign * 1.i * sinval; } } void multi_qubit_Pauli_gate_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, CTYPE* state, ITYPE dim) { // create pauli mask and call function ITYPE bit_flip_mask = 0; ITYPE phase_flip_mask = 0; UINT global_phase_90rot_count = 0; UINT pivot_qubit_index = 0; get_Pauli_masks_partial_list(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count, &bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index); if (bit_flip_mask == 0) { multi_qubit_Pauli_gate_Z_mask(phase_flip_mask, state, dim); } else { multi_qubit_Pauli_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim); } } void multi_qubit_Pauli_gate_whole_list(const UINT* Pauli_operator_type_list, UINT qubit_count, CTYPE* state, ITYPE dim) { // create pauli mask and call function ITYPE bit_flip_mask = 0; ITYPE phase_flip_mask = 0; UINT global_phase_90rot_count = 0; UINT pivot_qubit_index = 0; get_Pauli_masks_whole_list(Pauli_operator_type_list, qubit_count, &bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index); if (bit_flip_mask == 0) { multi_qubit_Pauli_gate_Z_mask(phase_flip_mask, state, dim); } else { multi_qubit_Pauli_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, state, dim); } } void multi_qubit_Pauli_rotation_gate_partial_list(const UINT* target_qubit_index_list, const UINT* Pauli_operator_type_list, UINT target_qubit_index_count, double angle, CTYPE* state, ITYPE dim) { // create pauli mask and call function ITYPE bit_flip_mask = 0; ITYPE phase_flip_mask = 0; UINT global_phase_90rot_count = 0; UINT pivot_qubit_index = 0; get_Pauli_masks_partial_list(target_qubit_index_list, Pauli_operator_type_list, target_qubit_index_count, &bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index); if (bit_flip_mask == 0) { multi_qubit_Pauli_rotation_gate_Z_mask(phase_flip_mask, angle, state, dim); } else { multi_qubit_Pauli_rotation_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle, state, dim); } } void multi_qubit_Pauli_rotation_gate_whole_list(const UINT* Pauli_operator_type_list, UINT qubit_count, double angle, CTYPE* state, ITYPE dim) { // create pauli mask and call function ITYPE bit_flip_mask = 0; ITYPE phase_flip_mask = 0; UINT global_phase_90rot_count = 0; UINT pivot_qubit_index = 0; get_Pauli_masks_whole_list(Pauli_operator_type_list, qubit_count, &bit_flip_mask, &phase_flip_mask, &global_phase_90rot_count, &pivot_qubit_index); if (bit_flip_mask == 0) { multi_qubit_Pauli_rotation_gate_Z_mask(phase_flip_mask, angle, state, dim); } else { multi_qubit_Pauli_rotation_gate_XZ_mask(bit_flip_mask, phase_flip_mask, global_phase_90rot_count, pivot_qubit_index, angle, state, dim); } }
GeneralMatrixMatrix.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_GENERAL_MATRIX_MATRIX_H #define EIGEN_GENERAL_MATRIX_MATRIX_H namespace Eigen { namespace internal { template<typename _LhsScalar, typename _RhsScalar> class level3_blocking; /* Specialization for a row-major destination matrix => simple transposition of the product */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,RowMajor> { typedef gebp_traits<RhsScalar,LhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static EIGEN_STRONG_INLINE void run( Index rows, Index cols, Index depth, const LhsScalar* lhs, Index lhsStride, const RhsScalar* rhs, Index rhsStride, ResScalar* res, Index resStride, ResScalar alpha, level3_blocking<RhsScalar,LhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { // transpose the product such that the result is column major general_matrix_matrix_product<Index, RhsScalar, RhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateRhs, LhsScalar, LhsStorageOrder==RowMajor ? ColMajor : RowMajor, ConjugateLhs, ColMajor> ::run(cols,rows,depth,rhs,rhsStride,lhs,lhsStride,res,resStride,alpha,blocking,info); } }; /* Specialization for a col-major destination matrix * => Blocking algorithm following Goto's paper */ template< typename Index, typename LhsScalar, int LhsStorageOrder, bool ConjugateLhs, typename RhsScalar, int RhsStorageOrder, bool ConjugateRhs> struct general_matrix_matrix_product<Index,LhsScalar,LhsStorageOrder,ConjugateLhs,RhsScalar,RhsStorageOrder,ConjugateRhs,ColMajor> { typedef gebp_traits<LhsScalar,RhsScalar> Traits; typedef typename ScalarBinaryOpTraits<LhsScalar, RhsScalar>::ReturnType ResScalar; static void run(Index rows, Index cols, Index depth, const LhsScalar* _lhs, Index lhsStride, const RhsScalar* _rhs, Index rhsStride, ResScalar* _res, Index resStride, ResScalar alpha, level3_blocking<LhsScalar,RhsScalar>& blocking, GemmParallelInfo<Index>* info = 0) { typedef const_blas_data_mapper<LhsScalar, Index, LhsStorageOrder> LhsMapper; typedef const_blas_data_mapper<RhsScalar, Index, RhsStorageOrder> RhsMapper; typedef blas_data_mapper<typename Traits::ResScalar, Index, ColMajor> ResMapper; LhsMapper lhs(_lhs,lhsStride); RhsMapper rhs(_rhs,rhsStride); ResMapper res(_res, resStride); Index kc = blocking.kc(); // cache block size along the K direction Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction Index nc = (std::min)(cols,blocking.nc()); // cache block size along the N direction gemm_pack_lhs<LhsScalar, Index, LhsMapper, Traits::mr, Traits::LhsProgress, LhsStorageOrder> pack_lhs; gemm_pack_rhs<RhsScalar, Index, RhsMapper, Traits::nr, RhsStorageOrder> pack_rhs; gebp_kernel<LhsScalar, RhsScalar, Index, ResMapper, Traits::mr, Traits::nr, ConjugateLhs, ConjugateRhs> gebp; #ifdef EIGEN_HAS_OPENMP if(info) { // this is the parallel version! int tid = omp_get_thread_num(); int threads = omp_get_num_threads(); LhsScalar* blockA = blocking.blockA(); eigen_internal_assert(blockA!=0); std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, 0); // For each horizontal panel of the rhs, and corresponding vertical panel of the lhs... for(Index k=0; k<depth; k+=kc) { const Index actual_kc = (std::min)(k+kc,depth)-k; // => rows of B', and cols of the A' // In order to reduce the chance that a thread has to wait for the other, // let's start by packing B'. pack_rhs(blockB, rhs.getSubMapper(k,0), actual_kc, nc); // Pack A_k to A' in a parallel fashion: // each thread packs the sub block A_k,i to A'_i where i is the thread id. // However, before copying to A'_i, we have to make sure that no other thread is still using it, // i.e., we test that info[tid].users equals 0. // Then, we set info[tid].users to the number of threads to mark that all other threads are going to use it. while(info[tid].users!=0) {} info[tid].users += threads; pack_lhs(blockA+info[tid].lhs_start*actual_kc, lhs.getSubMapper(info[tid].lhs_start,k), actual_kc, info[tid].lhs_length); // Notify the other threads that the part A'_i is ready to go. info[tid].sync = k; // Computes C_i += A' * B' per A'_i for(int shift=0; shift<threads; ++shift) { int i = (tid+shift)%threads; // At this point we have to make sure that A'_i has been updated by the thread i, // we use testAndSetOrdered to mimic a volatile access. // However, no need to wait for the B' part which has been updated by the current thread! if (shift>0) { while(info[i].sync!=k) { } } gebp(res.getSubMapper(info[i].lhs_start, 0), blockA+info[i].lhs_start*actual_kc, blockB, info[i].lhs_length, actual_kc, nc, alpha); } // Then keep going as usual with the remaining B' for(Index j=nc; j<cols; j+=nc) { const Index actual_nc = (std::min)(j+nc,cols)-j; // pack B_k,j to B' pack_rhs(blockB, rhs.getSubMapper(k,j), actual_kc, actual_nc); // C_j += A' * B' gebp(res.getSubMapper(0, j), blockA, blockB, rows, actual_kc, actual_nc, alpha); } // Release all the sub blocks A'_i of A' for the current thread, // i.e., we simply decrement the number of users by 1 for(Index i=0; i<threads; ++i) #pragma omp atomic info[i].users -= 1; } } else #endif // EIGEN_HAS_OPENMP { EIGEN_UNUSED_VARIABLE(info); // this is the sequential version! std::size_t sizeA = kc*mc; std::size_t sizeB = kc*nc; ei_declare_aligned_stack_constructed_variable(LhsScalar, blockA, sizeA, blocking.blockA()); ei_declare_aligned_stack_constructed_variable(RhsScalar, blockB, sizeB, blocking.blockB()); const bool pack_rhs_once = mc!=rows && kc==depth && nc==cols; // For each horizontal panel of the rhs, and corresponding panel of the lhs... for(Index i2=0; i2<rows; i2+=mc) { const Index actual_mc = (std::min)(i2+mc,rows)-i2; for(Index k2=0; k2<depth; k2+=kc) { const Index actual_kc = (std::min)(k2+kc,depth)-k2; // OK, here we have selected one horizontal panel of rhs and one vertical panel of lhs. // => Pack lhs's panel into a sequential chunk of memory (L2/L3 caching) // Note that this panel will be read as many times as the number of blocks in the rhs's // horizontal panel which is, in practice, a very low number. pack_lhs(blockA, lhs.getSubMapper(i2,k2), actual_kc, actual_mc); // For each kc x nc block of the rhs's horizontal panel... for(Index j2=0; j2<cols; j2+=nc) { const Index actual_nc = (std::min)(j2+nc,cols)-j2; // We pack the rhs's block into a sequential chunk of memory (L2 caching) // Note that this block will be read a very high number of times, which is equal to the number of // micro horizontal panel of the large rhs's panel (e.g., rows/12 times). if((!pack_rhs_once) || i2==0) pack_rhs(blockB, rhs.getSubMapper(k2,j2), actual_kc, actual_nc); // Everything is packed, we can now call the panel * block kernel: gebp(res.getSubMapper(i2, j2), blockA, blockB, actual_mc, actual_kc, actual_nc, alpha); } } } } } }; /********************************************************************************* * Specialization of generic_product_impl for "large" GEMM, i.e., * implementation of the high level wrapper to general_matrix_matrix_product **********************************************************************************/ template<typename Scalar, typename Index, typename Gemm, typename Lhs, typename Rhs, typename Dest, typename BlockingType> struct gemm_functor { gemm_functor(const Lhs& lhs, const Rhs& rhs, Dest& dest, const Scalar& actualAlpha, BlockingType& blocking) : m_lhs(lhs), m_rhs(rhs), m_dest(dest), m_actualAlpha(actualAlpha), m_blocking(blocking) {} void initParallelSession(Index num_threads) const { m_blocking.initParallel(m_lhs.rows(), m_rhs.cols(), m_lhs.cols(), num_threads); m_blocking.allocateA(); } void operator() (Index row, Index rows, Index col=0, Index cols=-1, GemmParallelInfo<Index>* info=0) const { if(cols==-1) cols = m_rhs.cols(); Gemm::run(rows, cols, m_lhs.cols(), &m_lhs.coeffRef(row,0), m_lhs.outerStride(), &m_rhs.coeffRef(0,col), m_rhs.outerStride(), (Scalar*)&(m_dest.coeffRef(row,col)), m_dest.outerStride(), m_actualAlpha, m_blocking, info); } typedef typename Gemm::Traits Traits; protected: const Lhs& m_lhs; const Rhs& m_rhs; Dest& m_dest; Scalar m_actualAlpha; BlockingType& m_blocking; }; template<int StorageOrder, typename LhsScalar, typename RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor=1, bool FiniteAtCompileTime = MaxRows!=Dynamic && MaxCols!=Dynamic && MaxDepth != Dynamic> class gemm_blocking_space; template<typename _LhsScalar, typename _RhsScalar> class level3_blocking { typedef _LhsScalar LhsScalar; typedef _RhsScalar RhsScalar; protected: LhsScalar* m_blockA; RhsScalar* m_blockB; Index m_mc; Index m_nc; Index m_kc; public: level3_blocking() : m_blockA(0), m_blockB(0), m_mc(0), m_nc(0), m_kc(0) {} inline Index mc() const { return m_mc; } inline Index nc() const { return m_nc; } inline Index kc() const { return m_kc; } inline LhsScalar* blockA() { return m_blockA; } inline RhsScalar* blockB() { return m_blockB; } }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, true /* == FiniteAtCompileTime */> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor, ActualRows = Transpose ? MaxCols : MaxRows, ActualCols = Transpose ? MaxRows : MaxCols }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; enum { SizeA = ActualRows * MaxDepth, SizeB = ActualCols * MaxDepth }; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES EIGEN_ALIGN_MAX LhsScalar m_staticA[SizeA]; EIGEN_ALIGN_MAX RhsScalar m_staticB[SizeB]; #else EIGEN_ALIGN_MAX char m_staticA[SizeA * sizeof(LhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; EIGEN_ALIGN_MAX char m_staticB[SizeB * sizeof(RhsScalar) + EIGEN_DEFAULT_ALIGN_BYTES-1]; #endif public: gemm_blocking_space(Index /*rows*/, Index /*cols*/, Index /*depth*/, Index /*num_threads*/, bool /*full_rows = false*/) { this->m_mc = ActualRows; this->m_nc = ActualCols; this->m_kc = MaxDepth; #if EIGEN_MAX_STATIC_ALIGN_BYTES >= EIGEN_DEFAULT_ALIGN_BYTES this->m_blockA = m_staticA; this->m_blockB = m_staticB; #else this->m_blockA = reinterpret_cast<LhsScalar*>((internal::UIntPtr(m_staticA) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); this->m_blockB = reinterpret_cast<RhsScalar*>((internal::UIntPtr(m_staticB) + (EIGEN_DEFAULT_ALIGN_BYTES-1)) & ~std::size_t(EIGEN_DEFAULT_ALIGN_BYTES-1)); #endif } void initParallel(Index, Index, Index, Index) {} inline void allocateA() {} inline void allocateB() {} inline void allocateAll() {} }; template<int StorageOrder, typename _LhsScalar, typename _RhsScalar, int MaxRows, int MaxCols, int MaxDepth, int KcFactor> class gemm_blocking_space<StorageOrder,_LhsScalar,_RhsScalar,MaxRows, MaxCols, MaxDepth, KcFactor, false> : public level3_blocking< typename conditional<StorageOrder==RowMajor,_RhsScalar,_LhsScalar>::type, typename conditional<StorageOrder==RowMajor,_LhsScalar,_RhsScalar>::type> { enum { Transpose = StorageOrder==RowMajor }; typedef typename conditional<Transpose,_RhsScalar,_LhsScalar>::type LhsScalar; typedef typename conditional<Transpose,_LhsScalar,_RhsScalar>::type RhsScalar; typedef gebp_traits<LhsScalar,RhsScalar> Traits; Index m_sizeA; Index m_sizeB; public: gemm_blocking_space(Index rows, Index cols, Index depth, Index num_threads, bool l3_blocking) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; if(l3_blocking) { computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, this->m_nc, num_threads); } else // no l3 blocking { Index n = this->m_nc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, this->m_mc, n, num_threads); } m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void initParallel(Index rows, Index cols, Index depth, Index num_threads) { this->m_mc = Transpose ? cols : rows; this->m_nc = Transpose ? rows : cols; this->m_kc = depth; eigen_internal_assert(this->m_blockA==0 && this->m_blockB==0); Index m = this->m_mc; computeProductBlockingSizes<LhsScalar,RhsScalar,KcFactor>(this->m_kc, m, this->m_nc, num_threads); m_sizeA = this->m_mc * this->m_kc; m_sizeB = this->m_kc * this->m_nc; } void allocateA() { if(this->m_blockA==0) this->m_blockA = aligned_new<LhsScalar>(m_sizeA); } void allocateB() { if(this->m_blockB==0) this->m_blockB = aligned_new<RhsScalar>(m_sizeB); } void allocateAll() { allocateA(); allocateB(); } ~gemm_blocking_space() { aligned_delete(this->m_blockA, m_sizeA); aligned_delete(this->m_blockB, m_sizeB); } }; } // end namespace internal namespace internal { template<typename Lhs, typename Rhs> struct generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,GemmProduct> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; typedef typename Lhs::Scalar LhsScalar; typedef typename Rhs::Scalar RhsScalar; typedef internal::blas_traits<Lhs> LhsBlasTraits; typedef typename LhsBlasTraits::DirectLinearAccessType ActualLhsType; typedef typename internal::remove_all<ActualLhsType>::type ActualLhsTypeCleaned; typedef internal::blas_traits<Rhs> RhsBlasTraits; typedef typename RhsBlasTraits::DirectLinearAccessType ActualRhsType; typedef typename internal::remove_all<ActualRhsType>::type ActualRhsTypeCleaned; enum { MaxDepthAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(Lhs::MaxColsAtCompileTime,Rhs::MaxRowsAtCompileTime) }; typedef generic_product_impl<Lhs,Rhs,DenseShape,DenseShape,CoeffBasedProductMode> lazyproduct; template<typename Dst> static void evalTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::evalTo(dst, lhs, rhs); else { dst.setZero(); scaleAndAddTo(dst, lhs, rhs, Scalar(1)); } } template<typename Dst> static void addTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::addTo(dst, lhs, rhs); else scaleAndAddTo(dst,lhs, rhs, Scalar(1)); } template<typename Dst> static void subTo(Dst& dst, const Lhs& lhs, const Rhs& rhs) { if((rhs.rows()+dst.rows()+dst.cols())<20 && rhs.rows()>0) lazyproduct::subTo(dst, lhs, rhs); else scaleAndAddTo(dst, lhs, rhs, Scalar(-1)); } template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& a_lhs, const Rhs& a_rhs, const Scalar& alpha) { eigen_assert(dst.rows()==a_lhs.rows() && dst.cols()==a_rhs.cols()); if(a_lhs.cols()==0 || a_lhs.rows()==0 || a_rhs.cols()==0) return; typename internal::add_const_on_value_type<ActualLhsType>::type lhs = LhsBlasTraits::extract(a_lhs); typename internal::add_const_on_value_type<ActualRhsType>::type rhs = RhsBlasTraits::extract(a_rhs); Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(a_lhs) * RhsBlasTraits::extractScalarFactor(a_rhs); typedef internal::gemm_blocking_space<(Dest::Flags&RowMajorBit) ? RowMajor : ColMajor,LhsScalar,RhsScalar, Dest::MaxRowsAtCompileTime,Dest::MaxColsAtCompileTime,MaxDepthAtCompileTime> BlockingType; typedef internal::gemm_functor< Scalar, Index, internal::general_matrix_matrix_product< Index, LhsScalar, (ActualLhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(LhsBlasTraits::NeedToConjugate), RhsScalar, (ActualRhsTypeCleaned::Flags&RowMajorBit) ? RowMajor : ColMajor, bool(RhsBlasTraits::NeedToConjugate), (Dest::Flags&RowMajorBit) ? RowMajor : ColMajor>, ActualLhsTypeCleaned, ActualRhsTypeCleaned, Dest, BlockingType> GemmFunctor; BlockingType blocking(dst.rows(), dst.cols(), lhs.cols(), 1, true); internal::parallelize_gemm<(Dest::MaxRowsAtCompileTime>32 || Dest::MaxRowsAtCompileTime==Dynamic)> (GemmFunctor(lhs, rhs, dst, actualAlpha, blocking), a_lhs.rows(), a_rhs.cols(), a_lhs.cols(), Dest::Flags&RowMajorBit); } }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_GENERAL_MATRIX_MATRIX_H
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 29; static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal swift_name /// attribute for the decl \p D. Raise a diagnostic if the name is invalid /// for the given declaration. /// /// For a function, this will validate a compound Swift name, /// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, /// and the function will output the number of parameter names, and whether /// this is a single-arg initializer. /// /// For a type, enum constant, property, or variable declaration, this will /// validate either a simple identifier, or a qualified /// <code>context.identifier</code> name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation ArgLoc, const IdentifierInfo *AttrName); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); CompleteTypeKind Kind = CompleteTypeKind::Normal; if (T->isVLST()) Kind = CompleteTypeKind::AcceptSizeless; return RequireCompleteType(Loc, T, Kind, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); CompleteTypeKind Kind = CompleteTypeKind::Normal; if (E->getType()->isVLST()) Kind = CompleteTypeKind::AcceptSizeless; return RequireCompleteExprType(E, Kind, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name, bool Override); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand); ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E); ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType Ty); ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, Expr *E); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied because it was ill-formed. void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation, StringRef Diagnostic); void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old, SourceLocation New); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called to set rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = std::string(Ext); } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. unsigned DeclareTargetNestingLevel = 0; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. FunctionDecl * ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S, Declarator &D); /// Register \p FD as specialization of \p BaseFD in the current `omp /// begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( FunctionDecl *FD, FunctionDecl *BaseFD); public: /// Can we exit a scope at the moment. bool isInOpenMPDeclareVariantScope() { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return DeclareTargetNestingLevel > 0; } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); /// Check if the expression is allowed to be used in expressions for the /// offloading devices. void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, int ArgNum); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
core_ztslqt.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include "core_blas.h" #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <omp.h> #undef REAL #define COMPLEX /***************************************************************************//** * * @ingroup core_tslqt * * Computes an LQ factorization of a rectangular matrix * formed by coupling side-by-side a complex m-by-m * lower triangular tile A1 and a complex m-by-n tile A2: * * | A1 A2 | = L * Q * * The tile Q is represented as a product of elementary reflectors * * Q = H(k)^H . . . H(2)^H H(1)^H, where k = min(m,n). * * Each H(i) has the form * * H(i) = I - tau * v * v^H * * where tau is a complex scalar, and v is a complex vector with * v(1:i-1) = 0 and v(i) = 1; v(i+1:n)^H is stored on exit in * A2(i,1:n), and tau in tau(i). * ******************************************************************************* * * @param[in] m * The number of rows of the tile A1 and A2. m >= 0. * The number of columns of the tile A1. * * @param[in] n * The number of columns of the tile A2. n >= 0. * * @param[in] ib * The inner-blocking size. ib >= 0. * * @param[in,out] A1 * On entry, the m-by-m tile A1. * On exit, the elements on and below the diagonal of the array * contain the m-by-m lower trapezoidal tile L; * the elements above the diagonal are not referenced. * * @param[in] lda1 * The leading dimension of the array A1. lda1 >= max(1,m). * * @param[in,out] A2 * On entry, the m-by-n tile A2. * On exit, all the elements with the array tau, represent * the unitary tile Q as a product of elementary reflectors * (see Further Details). * * @param[in] lda2 * The leading dimension of the tile A2. lda2 >= max(1,m). * * @param[out] T * The ib-by-m triangular factor T of the block reflector. * T is upper triangular by block (economic storage); * The rest of the array is not referenced. * * @param[in] ldt * The leading dimension of the array T. ldt >= ib. * * @param tau * Auxiliarry workspace array of length m. * * @param work * Auxiliary workspace array of length ib*m. * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************/ int core_ztslqt(int m, int n, int ib, plasma_complex64_t *A1, int lda1, plasma_complex64_t *A2, int lda2, plasma_complex64_t *T, int ldt, plasma_complex64_t *tau, plasma_complex64_t *work) { // Check input arguments. if (m < 0) { coreblas_error("illegal value of m"); return -1; } if (n < 0) { coreblas_error("illegal value of n"); return -2; } if (ib < 0) { coreblas_error("illegal value of ib"); return -3; } if (A1 == NULL) { coreblas_error("NULL A1"); return -4; } if (lda1 < imax(1, m) && m > 0) { coreblas_error("illegal value of lda1"); return -5; } if (A2 == NULL) { coreblas_error("NULL A2"); return -6; } if (lda2 < imax(1, m) && m > 0) { coreblas_error("illegal value of lda2"); return -7; } if (T == NULL) { coreblas_error("NULL T"); return -8; } if (ldt < imax(1, ib) && ib > 0) { coreblas_error("illegal value of ldt"); return -9; } if (tau == NULL) { coreblas_error("NULL tau"); return -10; } if (work == NULL) { coreblas_error("NULL work"); return -11; } // quick return if (m == 0 || n == 0 || ib == 0) return PlasmaSuccess; static plasma_complex64_t zone = 1.0; static plasma_complex64_t zzero = 0.0; for (int ii = 0; ii < m; ii += ib) { int sb = imin(m-ii, ib); for (int i = 0; i < sb; i++) { // Generate elementary reflector H(ii*ib+i) to annihilate // A(ii*ib+i,ii*ib+i:n). #ifdef COMPLEX LAPACKE_zlacgv_work(n, &A2[ii+i], lda2); LAPACKE_zlacgv_work(1, &A1[lda1*(ii+i)+ii+i], lda1); #endif LAPACKE_zlarfg_work(n+1, &A1[lda1*(ii+i)+ii+i], &A2[ii+i], lda2, &tau[ii+i]); plasma_complex64_t alpha = -(tau[ii+i]); if (ii+i+1 < m) { // Apply H(ii+i-1) to A(ii+i:ii+ib-1, ii+i-1:n) from the right. cblas_zcopy(sb-i-1, &A1[lda1*(ii+i)+(ii+i+1)], 1, work, 1); cblas_zgemv(CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans, sb-i-1, n, CBLAS_SADDR(zone), &A2[ii+i+1], lda2, &A2[ii+i], lda2, CBLAS_SADDR(zone), work, 1); cblas_zaxpy(sb-i-1, CBLAS_SADDR(alpha), work, 1, &A1[lda1*(ii+i)+ii+i+1], 1); cblas_zgerc(CblasColMajor, sb-i-1, n, CBLAS_SADDR(alpha), work, 1, &A2[ii+i], lda2, &A2[ii+i+1], lda2); } // Calculate T. cblas_zgemv(CblasColMajor, (CBLAS_TRANSPOSE)PlasmaNoTrans, i, n, CBLAS_SADDR(alpha), &A2[ii], lda2, &A2[ii+i], lda2, CBLAS_SADDR(zzero), &T[ldt*(ii+i)], 1); #ifdef COMPLEX LAPACKE_zlacgv_work(n, &A2[ii+i], lda2); LAPACKE_zlacgv_work(1, &A1[lda1*(ii+i)+ii+i], lda1); #endif cblas_ztrmv( CblasColMajor, (CBLAS_UPLO)PlasmaUpper, (CBLAS_TRANSPOSE)PlasmaNoTrans, (CBLAS_DIAG)PlasmaNonUnit, i, &T[ldt*ii], ldt, &T[ldt*(ii+i)], 1); T[ldt*(ii+i)+i] = tau[ii+i]; } if (m > ii+sb) { core_ztsmlq(PlasmaRight, Plasma_ConjTrans, m-(ii+sb), sb, m-(ii+sb), n, ib, ib, &A1[lda1*ii+ii+sb], lda1, &A2[ii+sb], lda2, &A2[ii], lda2, &T[ldt*ii], ldt, work, lda1); } } return PlasmaSuccess; } /******************************************************************************/ void core_omp_ztslqt(int m, int n, int ib, plasma_complex64_t *A1, int lda1, plasma_complex64_t *A2, int lda2, plasma_complex64_t *T, int ldt, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A1[0:lda1*m]) \ depend(inout:A2[0:lda2*n]) \ depend(out:T[0:ib*m]) // T should be mxib, but is stored // as ibxm { if (sequence->status == PlasmaSuccess) { // Prepare workspaces. int tid = omp_get_thread_num(); plasma_complex64_t *tau = ((plasma_complex64_t*)work.spaces[tid]); // Call the kernel. int info = core_ztslqt(m, n, ib, A1, lda1, A2, lda2, T, ldt, tau, tau+m); if (info != PlasmaSuccess) { plasma_error("core_ztslqt() failed"); plasma_request_fail(sequence, request, PlasmaErrorInternal); } } } }
transformermain.h
#if ! defined TRANSFORMERMAIN_H #define TRANSFORMERMAIN_H #include <string> #include <vector> #include <ios> #include <stdexcept> #include "../common/hash_map_includer.h" #include <algorithm> #include <boost/lexical_cast.hpp> #include <boost/dynamic_bitset.hpp> #include <boost/format.hpp> #include <boost/optional/optional.hpp> #include <boost/pool/pool_alloc.hpp> #include "rawclonepairdata.h" #include "shapedfragmentcalculator.h" #include "metricmain.h" #include "../common/datastructureonfile.h" #include "../threadqueue/threadqueue.h" #pragma pack(push, 1) struct id_and_stop { boost::uint64_t id; bool stop; public: id_and_stop(const id_and_stop &right) : id(right.id), stop(right.stop) { } id_and_stop() : id(0), stop(false) { } id_and_stop(boost::uint64_t id_, bool stop_) : id(id_), stop(stop_) { } }; #pragma pack(pop) class TransformerMain { private: std:: string outputFile; std:: string inputFile; bool optionVerbose; bool optionRecalculateTks; int shapingLevel; // =1 easy shaper, =2 soft shaper, =3 hard shaper onfile::Array<id_and_stop> cloneIDTransformTable; ThreadFunction threadFunction; public: static int do_removingNonmaximalClonePairs(const std:: string &input, const std:: string &output, bool verbose) { NonmaximalPairRemover remover; rawclonepair::RawClonePairFileTransformer trans; if (! trans.filterFileByFile(output, input, &remover)) { std:: cerr << "error: " << trans.getErrorMessage() << std:: endl; return 1; } if (verbose) { std:: cerr << "> count of clone pairs removed by non-maximal clone pair filter: " << remover.getCountOfRemovedClonePairs() << std:: endl; } return 0; } static int do_id_transformation(const std:: string &input, const std:: string &output, bool verbose) { TransformerMain obj; obj.inputFile = input; obj.outputFile = output; obj.optionVerbose = verbose; obj.optionRecalculateTks = false; obj.shapingLevel = 0; { std:: string temp_file = ::make_temp_file_on_the_same_directory(obj.outputFile, "ccfxshaper1", ".tmp"); if (! obj.cloneIDTransformTable.create(temp_file, true)) { std:: cerr << "error: can't create a temporary file (5)" << std:: endl; return 1; } } { std:: string tempFileForShapedFragments = ::make_temp_file_on_the_same_directory(output, "ccfxshaper2", ".tmp"); DumShaper shaper(&obj); { rawclonepair::RawClonePairFileTransformer trans; if (! trans.filterFileByFile(tempFileForShapedFragments, input, &shaper)) { std:: cerr << "error: " << trans.getErrorMessage() << std:: endl; return 1; } } { IDTransformer idtransformer(&obj); rawclonepair::RawClonePairFileTransformer trans; if (! trans.filterFileByFile(output, tempFileForShapedFragments, &idtransformer)) { std:: cerr << "error: " << trans.getErrorMessage() << std:: endl; return 1; } } if (obj.optionVerbose) { std:: cerr << "> count of clone pairs removed by block shaper: " << shaper.getCountOfRemovedClonePairs() << std:: endl; } ::remove(tempFileForShapedFragments.c_str()); } std:: string temp_file = obj.cloneIDTransformTable.getFilePath(); obj.cloneIDTransformTable.close(); ::remove(temp_file.c_str()); return 0; } static int do_shaper(const std:: string &input, const std:: string &output, int shaping_level, bool recalculate_tks, bool verbose) { TransformerMain obj; obj.inputFile = input; obj.outputFile = output; obj.optionVerbose = verbose; obj.optionRecalculateTks = recalculate_tks; obj.shapingLevel = shaping_level; if (obj.shapingLevel >= 2) { std:: string temp_file = ::make_temp_file_on_the_same_directory(obj.outputFile, "ccfxshaper1", ".tmp"); if (! obj.cloneIDTransformTable.create(temp_file, true)) { std:: cerr << "error: can't create a temporary file (6)" << std:: endl; return 1; } } int r = obj.do_shaper(); if (r != 0) { return r; } std:: string temp_file = obj.cloneIDTransformTable.getFilePath(); obj.cloneIDTransformTable.close(); ::remove(temp_file.c_str()); return r; } static int do_majoritarianShaper(const std:: string &input, const std:: string &output, bool verbose, size_t maxTrimming) { MajoritarianCalculator calculator; calculator.setMaxTrim(maxTrimming, maxTrimming); //calculator.setAppVersionChecker(APPVERSION[0], APPVERSION[1]); calculator.calc(input); Trimmer remover; remover.attachTrimmerTable(&calculator.refTrimmerTable()); rawclonepair::RawClonePairFileTransformer trans; if (! trans.filterFileByFile(output, input, &remover)) { std:: cerr << "error: " << trans.getErrorMessage() << std:: endl; return 1; } if (verbose) { std:: cerr << "> count of clone pairs removed by majoritarian shaper: " << remover.getCountOfRemovedClonePairs() << std:: endl; } return 0; } TransformerMain() : optionVerbose(false), optionRecalculateTks(false), shapingLevel(2) { } int main(const std::vector<std::string> &argv) { assert(argv.size() >= 2); if (argv.size() <= 2 || argv[2] == "-h" || argv[2] == "--help") { std::cout << "Usage: ccfx T in.ccfxd OPTIONS -o out.ccfxd COMMAND" "\n" " Transforms clone data." "\n" "Command" "\n" " -s 2: applies soft shaper." "\n" " -s 3: applies hard shaper." "\n" "Options" "\n" //" -n dir: specify directory where preprocessed files are created." "\n" " --threads=number: max working threads (0)." "\n" ; return 0; } int shaping_level = 0; for (size_t i = 2; i < argv.size(); ++i) { std:: string argi = argv[i]; if (boost::starts_with(argi, "-")) { if (argi == "-o") { if (! (i + 1 < argv.size())) { std:: cerr << "error: option -o requires an argument" << std:: endl; return 1; } outputFile = argv[i + 1]; ++i; } else if (argi == "-v") { optionVerbose = true; } else if (argi == "-s") { if (shaping_level != 0) { std:: cerr << "error: command -s specified twice" << std:: endl; return 1; } if (! (i + 1 < argv.size())) { std:: cerr << "error: command -n requres an argument" << std:: endl; return 1; } try { int value = boost::lexical_cast<int>(argv[i + 1]); if (! (2 <= value && value <= 3)) { std:: cerr << "error: range error for command -s" << std:: endl; return 1; } shaping_level = value; } catch(boost::bad_lexical_cast &) { std:: cerr << "error: invalid argument is given to command -s" << std:: endl; return 1; } ++i; } //else if (argi == "-n") { // if (i + 1 < argv.size()) { // this->rawReader.addPreprocessFileDirectory(argv[i + 1]); // ++i; // } // else { // std:: cerr << "error: command -s requres an argument" << std:: endl; // return 1; // } //} else { std::pair<int, std::string> r = threadFunction.scanOption(argi, (i + 1 < argv.size()) ? argv[i + 1] : std::string()); if (r.first > 0) { i += r.first - 1; } else if (r.first < 0) { std::cerr << "error: " << r.second << std::endl; return 1; } else { std:: cerr << "error: unknown option '" << argi << "'" << std:: endl; return 1; } } } else { if (inputFile.empty()) { inputFile = argi; force_extension(&inputFile, pairBinaryDiploidExtension, ".tmp"); } else { std:: cerr << "error: too many command-line arguments" << std:: endl; return 1; } } } if (inputFile.empty()) { inputFile = "a" + pairBinaryDiploidExtension; } if (outputFile.empty()) { std:: cerr << "error: no output file is given" << std:: endl; return 1; } if (optionVerbose) { std::cerr << "> " << threadFunction.getVerboseMessage() << std::endl; } threadFunction.applyToSystem(); if (shaping_level == 0) { std:: cerr << "error: no command is given" << std:: endl; return 1; } shapingLevel = shaping_level; if (shapingLevel >= 2) { std:: string temp_file = ::make_temp_file_on_the_same_directory(outputFile, "ccfxshaper1", ".tmp"); if (! cloneIDTransformTable.create(temp_file, true)) { std:: cerr << "error: can't create a temporary file (7)" << std:: endl; return 1; } } optionRecalculateTks = true; int r = do_shaper(); if (r != 0) { return r; } std:: string temp_file = cloneIDTransformTable.getFilePath(); cloneIDTransformTable.close(); ::remove(temp_file.c_str()); return r; } private: rawclonepair::RawClonePairFileAccessor accessor; std:: string errorMessage; #if defined USE_BOOST_POOL typedef std:: map<rawclonepair::RawFileBeginEnd, std::vector<boost::uint64_t>, std::less<rawclonepair::RawFileBeginEnd>, boost::fast_pool_allocator<std::pair<rawclonepair::RawFileBeginEnd, std::vector<boost::uint64_t> > > > EquivalentTable; #else typedef std:: map<rawclonepair::RawFileBeginEnd, std::vector<boost::uint64_t> > EquivalentTable; #endif class ShaperError : public std:: runtime_error { public: ShaperError() : runtime_error("") { } }; static bool less_wo_reference(const rawclonepair::RawClonePair &a, const rawclonepair::RawClonePair &b) { if (a.left.file < b.left.file) { return true; } else if (a.left.file == b.left.file) { if (a.right.file < b.right.file) { return true; } else if (a.right.file == b.right.file) { if (a.left.begin < b.left.begin) { return true; } else if (a.left.begin == b.left.begin) { if (a.left.end < b.left.end) { return true; } else if (a.left.end == b.left.end) { if (a.right.begin < b.right.begin) { return true; } else if (a.right.begin == b.right.begin) { if (a.right.end < b.right.end) { return true; } else if (a.right.end == b.right.end) { NULL; } } } } } } return false; } static bool equal_wo_reference(const rawclonepair::RawClonePair &a, const rawclonepair::RawClonePair &b) { return a.left.file == b.left.file && a.right.file == b.right.file && a.left.begin == b.left.begin && a.left.end == b.left.end && a.right.begin == b.right.begin && a.right.end == b.right.end; } static void build_id_transfom_table_from_equivs( std::vector<std::pair<boost::uint64_t, boost::uint64_t> > *pIdTrans, std::vector<std::vector<boost::uint64_t> > *pEquivs) { std::vector<std::vector<boost::uint64_t> > &equivs = *pEquivs; for (size_t i = 0; i < equivs.size(); ++i) { std::vector<boost::uint64_t> &equiv = equivs[i]; std::sort(equiv.begin(), equiv.end()); std::vector<boost::uint64_t>::iterator endPos = std::unique(equiv.begin(), equiv.end()); equiv.erase(endPos, equiv.end()); } std::vector<size_t> heads; heads.resize(equivs.size(), 0); while (true) { // remove empty equiv { size_t size = 0; size_t j = 0; while (true) { while (j < equivs.size() && equivs[j].empty()) { ++j; } if (j == equivs.size()) { // not found break; // while true } if (j != size) { assert(equivs[size].empty()); assert(! equivs[j].empty()); equivs[size].swap(equivs[j]); std::swap(heads[size], heads[j]); } ++j; ++size; } equivs.resize(size); heads.resize(size); } // find the smallest head elemnt in equivs std::vector<size_t> smallests; bool allDone = true; for (size_t i = 0; i < heads.size(); ++i) { if (heads[i] < equivs[i].size()) { if (smallests.empty()) { smallests.push_back(i); } else { size_t smallest0 = smallests[0]; boost::uint64_t ei = equivs[i][heads[i]]; boost::uint64_t esi = equivs[smallest0][heads[smallest0]]; if (ei < esi) { smallests.clear(); smallests.push_back(i); } else if (ei == esi) { smallests.push_back(i); } } allDone = false; } } if (allDone) { break; // while true } // marge equivs that has same (the smallest head) element if (smallests.size() >= 2) { size_t smallest0 = smallests[0]; for (size_t i = 1; i < smallests.size(); ++i) { size_t smallesti = smallests[i]; //assert(equivs[smallesti][heads[smallesti]] == equivs[smallest0][heads[smallest0]]); equivs[smallest0].insert(equivs[smallest0].end(), equivs[smallesti].begin(), equivs[smallesti].end()); equivs[smallesti].clear(); } std::vector<boost::uint64_t> &eqs = equivs[smallest0]; boost::uint64_t value = eqs[heads[smallest0]]; std::sort(eqs.begin(), eqs.end()); std::vector<boost::uint64_t>::iterator endPos = std::unique(eqs.begin(), eqs.end()); equivs[smallest0].resize(endPos - eqs.begin()); size_t j = heads[smallest0]; while (eqs[j] != value) { ++j; assert(j < eqs.size()); } heads[smallest0] = j; } assert(smallests.size() >= 1); heads[smallests[0]] += 1; } std::vector<std::pair<boost::uint64_t, boost::uint64_t> > &idTrans = *pIdTrans; idTrans.clear(); for (std::vector<std::vector<boost::uint64_t> >::const_iterator i = equivs.begin(); i != equivs.end(); ++i) { const std::vector<boost::uint64_t> &equiv = *i; for (size_t j = 0; j < equiv.size(); ++j) { assert(equiv[0] <= equiv[j]); idTrans.push_back(std::pair<boost::uint64_t, boost::uint64_t>(equiv[j], equiv[0])); } } std::sort(idTrans.begin(), idTrans.end()); } friend class DumShaper; class DumShaper : public rawclonepair::RawClonePairFileTransformer::FilterFileByFile { private: TransformerMain &base; long long countOfRemovedClonePairs; public: DumShaper(TransformerMain *pBase) : base(*pBase), countOfRemovedClonePairs(0) { } bool isValidFileID(int fileID) { return true; // will not do filtering by fileID } bool isValidCloneID(boost::uint64_t cloneID) { return true; // will not do filtering by cloneID } void transformPairs(std:: vector<rawclonepair::RawClonePair> *pPairs) { std:: vector<rawclonepair::RawClonePair> &pairs = *pPairs; if (pairs.empty()) { return; } const rawclonepair::RawClonePair &p0 = pairs[0]; boost::int32_t leftFileID = p0.left.file; // determin the smallest clone id for each fragment std::vector<std::pair<boost::uint64_t, boost::uint64_t> > idTrans; { EquivalentTable equivalents; for (size_t i = 0; i < pairs.size(); ++i) { const rawclonepair::RawClonePair &pair = pairs[i]; if (pair.left.end - pair.left.begin > 0) { equivalents[pair.left].push_back(pair.reference); } } std::vector<std::vector<boost::uint64_t> > equivs; equivs.reserve(equivalents.size()); for (EquivalentTable::iterator i = equivalents.begin(); i != equivalents.end(); ++i) { equivs.resize(equivs.size() + 1); equivs.back().swap(i->second); } build_id_transfom_table_from_equivs(&idTrans, &equivs); } if (! idTrans.empty()) { const std::pair<boost::uint64_t, boost::uint64_t> &idTransLast = idTrans.back(); if (! (idTransLast.first < base.cloneIDTransformTable.size())) { unsigned long long curSize = base.cloneIDTransformTable.size(); std::vector<id_and_stop> newItems; newItems.reserve(idTransLast.first + 1 - curSize); for (unsigned long long i = curSize; i < idTransLast.first + 1; ++i) { newItems.push_back(id_and_stop(i, true)); } base.cloneIDTransformTable.extend(&newItems[0], newItems.size()); assert(base.cloneIDTransformTable.size() == idTransLast.first + 1); } } for (size_t i = 0; i < idTrans.size(); ++i) { const std::pair<boost::uint64_t, boost::uint64_t> &idTransI = idTrans[i]; boost::uint64_t fromID = idTransI.first; boost::uint64_t toID = idTransI.second; assert(toID <= fromID); id_and_stop r; base.cloneIDTransformTable.get(&r, fromID); assert(r.id <= fromID); if (toID < r.id) { base.cloneIDTransformTable.set(fromID, id_and_stop(toID, false)); base.cloneIDTransformTable.set(r.id, id_and_stop(toID, false)); } else if (toID > r.id) { base.cloneIDTransformTable.set(toID, id_and_stop(r.id, false)); } } std:: sort(pairs.begin(), pairs.end(), less_wo_reference); std:: vector<rawclonepair::RawClonePair>::iterator endPos = std::unique(pairs.begin(), pairs.end(), equal_wo_reference); countOfRemovedClonePairs += pairs.end() - endPos; pairs.resize(endPos - pairs.begin()); } public: long long getCountOfRemovedClonePairs() const { return countOfRemovedClonePairs; } }; friend class Shaper; class Shaper : public rawclonepair::RawClonePairFileTransformer::FilterFileByFile { private: TransformerMain &base; size_t tksValue; long long countOfRemovedClonePairs; PreprocessedFileReader scannotner; ThreadQueue<std::vector<std::pair<boost::uint64_t, boost::uint64_t> > *> *pQueIdTrans; boost::thread *pEaterIdTrans; public: virtual ~Shaper() { join(); delete pQueIdTrans; } Shaper(TransformerMain *pBase) : base(*pBase), tksValue(0), countOfRemovedClonePairs(0), pQueIdTrans(NULL), pEaterIdTrans(NULL) { pQueIdTrans = new ThreadQueue<std::vector<std::pair<boost::uint64_t, boost::uint64_t> > *>(10); pEaterIdTrans = new boost::thread(boost::bind(&Shaper::idtrans_reflect_to_base, this, pQueIdTrans)); } public: void join() { if (pEaterIdTrans != NULL) { (*pQueIdTrans).push(NULL); (*pEaterIdTrans).join(); // thread object deletion at the below seems cause error at runtime. why? //delete pEaterIdTrans; } } private: void idtrans_reflect_to_base(ThreadQueue<std::vector<std::pair<boost::uint64_t, boost::uint64_t> > *> *pQueIdTrans) { std::vector<std::pair<boost::uint64_t, boost::uint64_t> > *pIdTrans; while ((pIdTrans = (*pQueIdTrans).pop()) != NULL) { std::vector<std::pair<boost::uint64_t, boost::uint64_t> > &idTrans = *pIdTrans; if (! idTrans.empty()) { const std::pair<boost::uint64_t, boost::uint64_t> &idTransLast = idTrans.back(); if (! (idTransLast.first < base.cloneIDTransformTable.size())) { unsigned long long curSize = base.cloneIDTransformTable.size(); std::vector<id_and_stop> newItems; newItems.reserve(idTransLast.first + 1 - curSize); for (unsigned long long i = curSize; i < idTransLast.first + 1; ++i) { newItems.push_back(id_and_stop(i, true)); } base.cloneIDTransformTable.extend(&newItems[0], newItems.size()); assert(base.cloneIDTransformTable.size() == idTransLast.first + 1); } } for (size_t i = 0; i < idTrans.size(); ++i) { const std::pair<boost::uint64_t, boost::uint64_t> &idTransI = idTrans[i]; boost::uint64_t fromID = idTransI.first; boost::uint64_t toID = idTransI.second; assert(toID <= fromID); id_and_stop r; base.cloneIDTransformTable.get(&r, fromID); assert(r.id <= fromID); if (toID < r.id) { base.cloneIDTransformTable.set(fromID, id_and_stop(toID, false)); base.cloneIDTransformTable.set(r.id, id_and_stop(toID, false)); } else if (toID > r.id) { base.cloneIDTransformTable.set(toID, id_and_stop(r.id, false)); } } delete pIdTrans; } } public: void setRawReader(const PreprocessedFileRawReader &rawReader_) { scannotner.setRawReader(rawReader_); } bool isValidFileID(int fileID) { return true; // will not do filtering by fileID } bool isValidCloneID(boost::uint64_t cloneID) { return true; // will not do filtering by cloneID } void transformPairs(std:: vector<rawclonepair::RawClonePair> *pPairs) { assert(2 <= base.shapingLevel && base.shapingLevel <= 3); const std:: vector<rawclonepair::RawClonePair> &pairs = *pPairs; if (pairs.empty()) { return; } const rawclonepair::RawClonePair &p0 = pairs[0]; boost::int32_t leftFileID = p0.left.file; std:: string fileName; size_t fileLength; base.accessor.getFileDescription(leftFileID, &fileName, &fileLength); fileName = INNER2SYS(fileName); std::vector<std::string> p = base.accessor.getOptionValues(PREPROCESSED_FILE_POSTFIX); std::string postfix = (! p.empty()) ? p.back() : ("." + base.accessor.getPreprocessScript() + ".ccfxprep"); std:: vector<ccfx_token_t> seq; if (! getPreprocessedSequenceOfFile(&seq, fileName, postfix, &scannotner, &base.errorMessage)) { throw ShaperError(); } assert(seq.size() == fileLength + 1); shaper::ShapedFragmentsCalculator<ccfx_token_t> shaper; shaper.setParens(scannotner.refParens()); shaper.setPrefixes(scannotner.refPrefixes()); shaper.setSuffixes(scannotner.refSuffixes()); //std:: cout << std:: endl; //for (size_t i = 0; i < pairs.size(); ++i) { // const rawclonepair::RawClonePair &pair = pairs[i]; // std:: cout << pair.left.file << "." << pair.left.begin << "-" << pair.left.end << ", " << pair.right.file << "." << pair.right.begin << "-" << pair.right.end << ", " << pair.reference << std:: endl; //} const int shift_by_first_zero = 1; size_t minimumLength = 0; std::vector<std::string> minLenValue = base.accessor.getOptionValues("b"); if (! minLenValue.empty()) { try { minimumLength = boost::lexical_cast<int, std::string>(minLenValue.back()); } catch(boost::bad_lexical_cast &) { // do nothing } } // calc a set of shaped fragments from the left-side of the code fragments of clone pairs std:: vector<rawclonepair::RawFileBeginEnd> shapedLeftFragments; shapedLeftFragments.resize(pairs.size()); #pragma omp parallel for for (int i = 0; i < pairs.size(); ++i) { const rawclonepair::RawClonePair &pair = pairs[i]; if (i > 0 && pair.left == pairs[i - 1].left) { // do nothing } else { rawclonepair::RawFileBeginEnd &f = shapedLeftFragments[i]; f = to_shaped_fragment(pair.left, seq, &shaper); } } // determin the smallest clone id for each shaped fragment std::vector<std::pair<boost::uint64_t, boost::uint64_t> > *pIdTrans = new std::vector<std::pair<boost::uint64_t, boost::uint64_t> >(); std::vector<std::pair<boost::uint64_t, boost::uint64_t> > &idTrans = *pIdTrans; { EquivalentTable equivalents; for (size_t i = 0; i < pairs.size(); ++i) { const rawclonepair::RawClonePair &pair = pairs[i]; if (i > 0 && pair.left == pairs[i - 1].left) { shapedLeftFragments[i] = shapedLeftFragments[i - 1]; } const rawclonepair::RawFileBeginEnd &f = shapedLeftFragments[i]; if (f.end - f.begin > 0) { std::vector<boost::uint64_t> &e = equivalents[f]; if (e.empty() || e.back() != pair.reference) { e.push_back(pair.reference); } } } std::vector<std::vector<boost::uint64_t> > equivs; equivs.reserve(equivalents.size()); for (EquivalentTable::iterator i = equivalents.begin(); i != equivalents.end(); ++i) { equivs.resize(equivs.size() + 1); equivs.back().swap(i->second); } build_id_transfom_table_from_equivs(&idTrans, &equivs); } std:: vector<rawclonepair::RawClonePair> shapedPairs; { // make shaped pair, that is, determin left and right code fragments and add the clone-set id of the shortest original code fragment shapedPairs.reserve(pairs.size()); for (size_t i = 0; i < pairs.size(); ++i) { const rawclonepair::RawFileBeginEnd &f = shapedLeftFragments[i]; if (f.end - f.begin > 0) { const rawclonepair::RawClonePair &pair = pairs[i]; shapedPairs.resize(shapedPairs.size() + 1); rawclonepair::RawClonePair &p = shapedPairs.back(); p.left = f; p.right = pair.right; p.right.begin += f.begin - pair.left.begin; p.right.end -= pair.left.end - f.end; { std::vector<std::pair<boost::uint64_t, boost::uint64_t> >::const_iterator chk = std::lower_bound(idTrans.begin(), idTrans.end(), std::pair<boost::uint64_t, boost::uint64_t>(pair.reference, 0)); assert(chk != idTrans.end()); assert(chk->first == pair.reference); } p.reference = pair.reference; } else { ++countOfRemovedClonePairs; } } } (*pQueIdTrans).push(pIdTrans); std:: sort(shapedPairs.begin(), shapedPairs.end(), less_wo_reference); std:: vector<rawclonepair::RawClonePair>::iterator endPos = std::unique(shapedPairs.begin(), shapedPairs.end(), equal_wo_reference); countOfRemovedClonePairs += shapedPairs.end() - endPos; shapedPairs.erase(endPos, shapedPairs.end()); (*pPairs).swap(shapedPairs); } void filterOptions(std::vector<std::pair<std::string/* name */, std::string/* value */> > *pOptions) { std::vector<std::pair<std::string/* name */, std::string/* value */> > &table = *pOptions; for (size_t i = 0; i < table.size(); ++i) { const std::string &name = table[i].first; std::string &value = table[i].second; if (name == "s") { value = (boost::format("%d") % base.shapingLevel).str(); } else if (name == "t") { try { tksValue = boost::lexical_cast<int, std::string>(value); } catch(boost::bad_lexical_cast &) { // do nothing } } } } public: long long getCountOfRemovedClonePairs() const { return countOfRemovedClonePairs; } private: rawclonepair::RawFileBeginEnd to_shaped_fragment(const rawclonepair::RawFileBeginEnd &leftCode, const std:: vector<ccfx_token_t> &seq, shaper::ShapedFragmentsCalculator<ccfx_token_t> *pShaper) { const int shift_by_first_zero = 1; size_t minimumLength = 0; std::vector<std::string> minLenValue = base.accessor.getOptionValues("b"); if (! minLenValue.empty()) { try { minimumLength = boost::lexical_cast<int, std::string>(minLenValue.back()); } catch(boost::bad_lexical_cast &) { // do nothing } } shaper::ShapedFragmentsCalculator<ccfx_token_t> &shaper = *pShaper; rawclonepair::RawFileBeginEnd shapedLeft = leftCode; shapedLeft.end = shapedLeft.begin; // make it zero length // extract shaped code fragments from the left code fragment std:: vector<shaper::ShapedFragmentPosition> fragmentsLargerThanThreshold; { std:: vector<shaper::ShapedFragmentPosition> fragments; assert(leftCode.end + shift_by_first_zero <= seq.size()); shaper.calc(&fragments, seq, leftCode.begin + shift_by_first_zero, leftCode.end + shift_by_first_zero, base.shapingLevel == 2 ? shaper::HAT_FRAGMENT : shaper::CAP_FRAGMENT); fragmentsLargerThanThreshold.reserve(fragments.size()); for (size_t j = 0; j < fragments.size(); ++j) { const shaper::ShapedFragmentPosition &p = fragments[j]; if (p.end - p.begin >= minimumLength) { fragmentsLargerThanThreshold.push_back(p); } } } // merge the shaped code fragments included the left code fragment, into a fragment if (! fragmentsLargerThanThreshold.empty()) { const shaper::ShapedFragmentPosition &p0 = fragmentsLargerThanThreshold[0]; size_t minBegin = p0.begin; size_t maxEnd = p0.end; for (size_t j = 1; j < fragmentsLargerThanThreshold.size(); ++j) { const shaper::ShapedFragmentPosition &p = fragmentsLargerThanThreshold[j]; if (p.begin < minBegin) { minBegin = p.begin; } if (p.end > maxEnd) { maxEnd = p.end; } } assert(shift_by_first_zero <= minBegin && minBegin <= maxEnd); shapedLeft.begin = minBegin - shift_by_first_zero; shapedLeft.end = maxEnd - shift_by_first_zero; assert(shapedLeft.file == leftCode.file); assert(leftCode.begin <= shapedLeft.begin && shapedLeft.end <= leftCode.end); } if (base.optionRecalculateTks && tksValue >= 1) { size_t tks = metrics::calcTKS(seq, shapedLeft.begin + shift_by_first_zero, shapedLeft.end + shift_by_first_zero); if (tks < tksValue) { shapedLeft.end = shapedLeft.begin; // make it zero length } } return shapedLeft; } }; friend class IDTransformer; class IDTransformer : public rawclonepair::RawClonePairFileTransformer::FilterFileByFile { private: TransformerMain &base; public: IDTransformer(TransformerMain *pBase) : base(*pBase) { } bool isValidFileID(int fileID) { return true; // will not do filtering by fileID } bool isValidCloneID(boost::uint64_t cloneID) { return true; // will not do filtering by cloneID } void transformPairs(std:: vector<rawclonepair::RawClonePair> *pPairs) { std:: vector<rawclonepair::RawClonePair> &pairs = *pPairs; HASH_MAP<boost::uint64_t, boost::uint64_t> idTransCache; for (size_t i = 0; i < pairs.size(); ++i) { rawclonepair::RawClonePair &pair = pairs[i]; HASH_MAP<boost::uint64_t, boost::uint64_t>::iterator j = idTransCache.find(pair.reference); if (j == idTransCache.end()) { if (pair.reference < base.cloneIDTransformTable.size()) { boost::uint64_t stopID = getStopID(pair.reference); idTransCache[pair.reference] = stopID; pair.reference = stopID; } else { idTransCache[pair.reference] = pair.reference; //pair.reference = pair.reference; } } else { pair.reference = j->second; } } } private: boost::uint64_t getStopID(boost::uint64_t id) { id_and_stop r; base.cloneIDTransformTable.get(&r, id); assert(r.id <= id); if (r.stop || r.id == id) { return r.id; } else { boost::uint64_t stopID = getStopID(r.id); base.cloneIDTransformTable.set(id, id_and_stop(stopID, true)); return stopID; } } }; class NonmaximalPairRemover : public rawclonepair::RawClonePairFileTransformer::FilterFileByFile { private: long long countOfRemovedClonePairs; public: NonmaximalPairRemover() : countOfRemovedClonePairs(0) { } public: long long getCountOfRemovedClonePairs() const { return countOfRemovedClonePairs; } public: bool isValidFileID(int fileID) { return true; // will not do filtering by fileID } bool isValidCloneID(boost::uint64_t cloneID) { return true; // will not do filtering by cloneID } void transformPairs(std:: vector<rawclonepair::RawClonePair> *pPairs) { std:: vector<rawclonepair::RawClonePair> &pairs = *pPairs; // must be sorted boost::dynamic_bitset<> nonmaximal; nonmaximal.resize(pairs.size(), false); size_t i = 0; while (i < pairs.size()) { const rawclonepair::RawClonePair &pairI = pairs[i]; size_t j = i + 1; while (j < pairs.size() && pairs[j].right.file == pairI.right.file) { assert(pairs[j].left.file == pairI.left.file); ++j; } { size_t begin = i; size_t end = j; for (size_t k = begin; k < end; ++k) { if (nonmaximal.test(k)) { continue; // k } const rawclonepair::RawClonePair &pairK = pairs[k]; for (size_t m = k + 1; m < end; ++m) { if (nonmaximal.test(m)) { continue; // m } const rawclonepair::RawClonePair &pairM = pairs[m]; if (pairM.left.end - pairM.left.begin < pairK.left.end - pairK.left.begin) { if (pairK.left.begin <= pairM.left.begin && pairM.left.end <= pairK.left.end && pairK.right.begin <= pairM.right.begin && pairM.right.end <= pairK.right.end) { nonmaximal.set(m, true); } } else { if (pairM.left.begin <= pairK.left.begin && pairK.left.end <= pairM.left.end && pairM.right.begin <= pairK.right.begin && pairK.right.end <= pairM.right.end) { nonmaximal.set(k, true); } } } } } i = j; } std:: vector<rawclonepair::RawClonePair> temp; temp.reserve(nonmaximal.size() - nonmaximal.count()); for (i = 0; i < pairs.size(); ++i) { if (! nonmaximal.test(i)) { temp.push_back(pairs[i]); } } pairs.swap(temp); countOfRemovedClonePairs += nonmaximal.count(); } }; int do_shaper() { if (optionVerbose) { switch (shapingLevel) { case 2: std:: cerr << "> applying soft block shaper" << std:: endl; break; case 3: std:: cerr << "> applying hard block shaper" << std:: endl; break; default: assert(false); break; } } if (! accessor.open(inputFile, rawclonepair::RawClonePairFileAccessor::FILEDATA)) { std:: cerr << "error: " << accessor.getErrorMessage() << std:: endl; return 1; } boost::int32_t curShapingLevel = -1; std::vector<std::string> sl = accessor.getOptionValues("s"); if (! sl.empty()) { curShapingLevel = boost::lexical_cast<int, std::string>(sl.back()); } if (shapingLevel < curShapingLevel) { std:: cerr << "error: wrong level for block shaper" << std:: endl; return 1; } std::vector<std::string> prepDirs = accessor.getOptionValues("n"); for (std::vector<std::string>::iterator pi = prepDirs.begin(); pi != prepDirs.end(); ++pi) { *pi = INNER2SYS(*pi); } PreprocessedFileRawReader rawReader; rawReader.setPreprocessFileDirectories(prepDirs); std:: string tempFileForShapedFragments = ::make_temp_file_on_the_same_directory(outputFile, "ccfxshaper2", ".tmp"); boost::int64_t removedPairCount = 0; Shaper shaper(this); // debug, moved out from the below block { shaper.setRawReader(rawReader); try { rawclonepair::RawClonePairFileTransformer trans; if (! trans.filterFileByFile(tempFileForShapedFragments, inputFile, &shaper)) { std:: cerr << "error: " << trans.getErrorMessage() << std:: endl; return 1; } } catch (ShaperError &) { std:: cerr << "error: " << errorMessage << std:: endl; return 1; } removedPairCount = shaper.getCountOfRemovedClonePairs(); shaper.join(); // this thread uses the data generated by the sub-thread that has been forked by the "shaper" } { IDTransformer idtransformer(this); rawclonepair::RawClonePairFileTransformer trans; if (! trans.filterFileByFile(outputFile, tempFileForShapedFragments, &idtransformer)) { std:: cerr << "error: " << trans.getErrorMessage() << std:: endl; return 1; } } if (optionVerbose) { std:: cerr << "> count of clone pairs removed by block shaper: " << removedPairCount << std:: endl; } ::remove(tempFileForShapedFragments.c_str()); return 0; } struct TrimDown { public: boost::uint64_t targetID; std::pair<size_t, size_t> trimming; public: TrimDown() : targetID(0), trimming(0, 0) { } TrimDown(const TrimDown &right) : targetID(right.targetID), trimming(right.trimming) { } TrimDown(boost::uint64_t targetID_, size_t headTrimming, size_t tailTrimming) : targetID(targetID_), trimming(headTrimming, tailTrimming) { } public: inline size_t length() const { return trimming.first + trimming.second; } bool operator<(const TrimDown &right) const { if (targetID < right.targetID) { return true; } else if (targetID == right.targetID) { if (trimming < right.trimming) { return true; } else if (trimming == right.trimming) { } } return false; } bool operator==(const TrimDown &right) const { return targetID == right.targetID && trimming == right.trimming; } bool isTruelyIncludedBy(const TrimDown &right) const { return trimming.first <= right.trimming.first && trimming.second <= right.trimming.second && trimming != right.trimming; } }; class MajoritarianCalculator { private: HASH_MAP<boost::uint64_t, TrimDown> trimmerTable; std::pair<size_t, size_t> trimmingMaxes; public: MajoritarianCalculator() : trimmerTable(), trimmingMaxes(0, 0) { } public: void setMaxTrim(size_t headTrimmingMax, size_t tailTrimmingMax) { trimmingMaxes.first = headTrimmingMax; trimmingMaxes.second = tailTrimmingMax; } const HASH_MAP<boost::uint64_t, TrimDown> &refTrimmerTable() const { return trimmerTable; } void calc(const std::string &input) { trimmerTable.clear(); HASH_MAP<boost::uint64_t, std::vector<TrimDown> > trimDownTable; rawclonepair::RawClonePairFileAccessor accessor; accessor.open(input, rawclonepair::RawClonePairFileAccessor::FILEDATA | rawclonepair::RawClonePairFileAccessor::CLONEDATA); std::vector<int> fileIDs; accessor.getFiles(&fileIDs); if (fileIDs.size() > 0) { size_t fiPrefetched = 0; int fileIDPrefetched = fileIDs[fiPrefetched]; std::vector<rawclonepair::RawClonePair> clonePairsPrefetched; accessor.getRawClonePairsOfFile(fileIDPrefetched, &clonePairsPrefetched); for (size_t fi = 0; fi < fileIDs.size(); ++fi) { int fileID = fileIDs[fi]; assert(fi == fiPrefetched); std::vector<rawclonepair::RawClonePair> clonePairs; clonePairs.swap(clonePairsPrefetched); #pragma omp parallel sections { #pragma omp section { accumTrimDownTable(clonePairs, &trimDownTable); } #pragma omp section { if (++fiPrefetched < fileIDs.size()) { fileIDPrefetched = fileIDs[fiPrefetched]; accessor.getRawClonePairsOfFile(fileIDPrefetched, &clonePairsPrefetched); } } } // end #pragma omp sections } } for (HASH_MAP<boost::uint64_t, std::vector<TrimDown> >::iterator i = trimDownTable.begin(); i != trimDownTable.end(); ++i) { boost::uint64_t src = i->first; std::vector<TrimDown> &ts = i->second; //for (size_t j = 0; j < ts.size(); ++j) { // const TrimDown &t = ts[j]; // std::cout << (boost::format("targetid = %d, head = %d, tail = %d") % (int)t.targetID % (int)t.trimming.first % (int) t.trimming.second) << std::endl; //} if (ts.size() == 1) { TrimDown composed = ts[0]; HASH_MAP<boost::uint64_t, std::vector<TrimDown> >::const_iterator j; while ((j = trimDownTable.find(composed.targetID)) != trimDownTable.end() && j->second.size() == 1) { const TrimDown t = j->second[0]; composed.targetID = t.targetID; composed.trimming.first += t.trimming.first; composed.trimming.second += t.trimming.second; } ts[0] = composed; } } for (HASH_MAP<boost::uint64_t, std::vector<TrimDown> >::const_iterator i = trimDownTable.begin(); i != trimDownTable.end(); ++i) { boost::uint64_t src = i->first; const std::vector<TrimDown> &ts = i->second; if (ts.size() == 1) { trimmerTable[src] = ts[0]; } } } private: void accumTrimDownTable(const std::vector<rawclonepair::RawClonePair> &pairs, HASH_MAP<boost::uint64_t, std::vector<TrimDown> > *pTrimDownTable) { HASH_MAP<boost::uint64_t, std::vector<TrimDown> > &trimDownTable = *pTrimDownTable; for (size_t i = 0; i < pairs.size(); ++i) { const rawclonepair::RawClonePair &pairI = pairs[i]; for (size_t j = i + 1; j < pairs.size(); ++j) { const rawclonepair::RawClonePair &pairJ = pairs[j]; if (pairI.left == pairJ.left) { // do nothing } else if (pairI.left.begin <= pairJ.left.begin && pairJ.left.end <= pairI.left.end && pairJ.reference != pairI.reference) { assert(pairJ.left.file == pairI.left.file); size_t ht = pairJ.left.begin - pairI.left.begin; size_t tt = pairI.left.end - pairJ.left.end; if (ht < trimmingMaxes.first && tt < trimmingMaxes.second) { std::vector<TrimDown> &trimDowns = trimDownTable[pairI.reference]; add_and_remove(&trimDowns, TrimDown(pairJ.reference, ht, tt)); } } else if (pairJ.left.begin <= pairI.left.begin && pairI.left.end <= pairJ.left.end && pairJ.reference != pairI.reference) { assert(pairJ.left.file == pairI.left.file); size_t ht = pairI.left.begin - pairJ.left.begin; size_t tt = pairJ.left.end - pairI.left.end; if (ht < trimmingMaxes.first && tt < trimmingMaxes.second) { std::vector<TrimDown> &trimDowns = trimDownTable[pairJ.reference]; add_and_remove(&trimDowns, TrimDown(pairI.reference, ht, tt)); } } } } } static void add_and_remove(std::vector<TrimDown> *pTrimDowns, const TrimDown &newOne) { std::vector<TrimDown> &trimDowns = *pTrimDowns; bool newOneReplaceSomething = false; for (std::vector<TrimDown>::const_iterator i = trimDowns.begin(); i != trimDowns.end(); ++i) { if (newOne == *i || newOne.isTruelyIncludedBy(*i)) { return; // newOne is included by one of (*pTrimDowns), so do not add newOne to the set. } if ((*i).isTruelyIncludedBy(newOne)) { newOneReplaceSomething = true; } } trimDowns.push_back(newOne); if (newOneReplaceSomething) { std::vector<TrimDown> r; for (std::vector<TrimDown>::const_iterator i = trimDowns.begin(); i != trimDowns.end(); ++i) { if (! (*i).isTruelyIncludedBy(newOne)) { r.push_back(*i); } } trimDowns.swap(r); } } }; class Trimmer : public rawclonepair::RawClonePairFileTransformer::FilterFileByFile { private: long long countOfRemovedClonePairs; const HASH_MAP<boost::uint64_t, TrimDown> *pTrimmerTable; public: Trimmer() : countOfRemovedClonePairs(0) { } public: long long getCountOfRemovedClonePairs() const { return countOfRemovedClonePairs; } void attachTrimmerTable(const HASH_MAP<boost::uint64_t, TrimDown> *pTrimmerTable_) { pTrimmerTable = pTrimmerTable_; } public: bool isValidFileID(int fileID) { return true; // will not do filtering by fileID } bool isValidCloneID(boost::uint64_t cloneID) { return true; // will not do filtering by cloneID } void transformPairs(std:: vector<rawclonepair::RawClonePair> *pPairs) { std:: vector<rawclonepair::RawClonePair> &pairs = *pPairs; // must be sorted const HASH_MAP<boost::uint64_t, TrimDown> &trimmerTable = *pTrimmerTable; #pragma omp parallel for for (int i = 0; i < pairs.size(); ++i) { rawclonepair::RawClonePair &pair = pairs[i]; HASH_MAP<boost::uint64_t, TrimDown>::const_iterator j = trimmerTable.find(pair.reference); if (j != trimmerTable.end()) { const TrimDown &td = j->second; assert(pair.left.end - pair.left.begin >= td.length()); assert(pair.right.end - pair.right.begin >= td.length()); pair.left.begin += td.trimming.first; pair.left.end -= td.trimming.second; pair.right.begin += td.trimming.first; pair.right.end -= td.trimming.second; pair.reference = td.targetID; } } size_t preSize = pairs.size(); std::sort(pairs.begin(), pairs.end()); std:: vector<rawclonepair::RawClonePair>::iterator endi = std::unique(pairs.begin(), pairs.end()); pairs.resize(std::distance(pairs.begin(), endi)); size_t postSize = pairs.size(); countOfRemovedClonePairs += preSize - postSize; } }; }; #endif // TRANSFORMERMAIN_H
GB_binop__gt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_int32) // A.*B function (eWiseMult): GB (_AemultB_08__gt_int32) // A.*B function (eWiseMult): GB (_AemultB_02__gt_int32) // A.*B function (eWiseMult): GB (_AemultB_04__gt_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_int32) // A*D function (colscale): GB (_AxD__gt_int32) // D*A function (rowscale): GB (_DxB__gt_int32) // C+=B function (dense accum): GB (_Cdense_accumB__gt_int32) // C+=b function (dense accum): GB (_Cdense_accumb__gt_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_int32) // C=scalar+B GB (_bind1st__gt_int32) // C=scalar+B' GB (_bind1st_tran__gt_int32) // C=A+scalar GB (_bind2nd__gt_int32) // C=A'+scalar GB (_bind2nd_tran__gt_int32) // C type: bool // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_INT32 || GxB_NO_GT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__gt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__gt_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__gt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
subopt.c
/* * suboptimal folding - Stefan Wuchty, Walter Fontana & Ivo Hofacker * * Vienna RNA package */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <ctype.h> #include <string.h> #include <math.h> #include "ViennaRNA/fold.h" #include "ViennaRNA/constraints/hard.h" #include "ViennaRNA/constraints/soft.h" #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/utils/strings.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/datastructures/lists.h" #include "ViennaRNA/eval.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/cofold.h" #include "ViennaRNA/gquad.h" #include "ViennaRNA/alphabet.h" #include "ViennaRNA/subopt.h" #include "ViennaRNA/loops/external_hc.inc" #include "ViennaRNA/loops/hairpin_hc.inc" #include "ViennaRNA/loops/internal_hc.inc" #include "ViennaRNA/loops/multibranch_hc.inc" #include "ViennaRNA/loops/external_sc.inc" #include "ViennaRNA/loops/hairpin_sc.inc" #include "ViennaRNA/loops/internal_sc.inc" #include "ViennaRNA/loops/multibranch_sc.inc" /* hack */ #include "ViennaRNA/color_output.inc" #ifdef _OPENMP #include <omp.h> #endif #ifdef __GNUC__ # define INLINE inline #else # define INLINE #endif #define true 1 #define false 0 typedef struct { struct hc_ext_def_dat hc_dat_ext; vrna_callback_hc_evaluate *hc_eval_ext; struct hc_hp_def_dat hc_dat_hp; vrna_callback_hc_evaluate *hc_eval_hp; struct hc_int_def_dat hc_dat_int; eval_hc *hc_eval_int; struct hc_mb_def_dat hc_dat_mb; vrna_callback_hc_evaluate *hc_eval_mb; struct sc_f5_dat sc_dat_ext; struct sc_hp_dat sc_dat_hp; struct sc_int_dat sc_dat_int; struct sc_mb_dat sc_dat_mb; } constraint_helpers; /** * @brief Sequence interval stack element used in subopt.c */ typedef struct INTERVAL { int i; int j; int array_flag; } INTERVAL; typedef struct { char *structure; LIST *Intervals; int partial_energy; int is_duplex; /* int best_energy; */ /* best attainable energy */ } STATE; typedef struct { LIST *Intervals; LIST *Stack; int nopush; } subopt_env; struct old_subopt_dat { unsigned long max_sol; unsigned long n_sol; vrna_subopt_solution_t *SolutionList; FILE *fp; int cp; }; /* ################################# # GLOBAL VARIABLES # ################################# */ PUBLIC int subopt_sorted = 0; /* output sorted by energy */ PUBLIC int density_of_states[MAXDOS + 1]; PUBLIC double print_energy = 9999; /* printing threshold for use with logML */ /* ################################# # PRIVATE VARIABLES # ################################# */ /* some backward compatibility stuff */ PRIVATE int backward_compat = 0; PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; #ifdef _OPENMP #pragma omp threadprivate(backward_compat_compound, backward_compat) #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE void init_constraint_helpers(vrna_fold_compound_t *fc, constraint_helpers *d); PRIVATE void free_constraint_helpers(constraint_helpers *d); #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY PRIVATE vrna_subopt_solution_t * wrap_subopt(char *seq, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp); #endif PRIVATE void make_pair(int i, int j, STATE *state); /* mark a gquadruplex in the resulting dot-bracket structure */ PRIVATE void make_gquad(int i, int L, int l[3], STATE *state); PRIVATE INTERVAL * make_interval(int i, int j, int ml); PRIVATE STATE * make_state(LIST *Intervals, char *structure, int partial_energy, int is_duplex, int length); PRIVATE STATE * copy_state(STATE *state); PRIVATE void print_state(STATE *state); PRIVATE void UNUSED print_stack(LIST *list); PRIVATE LIST * make_list(void); PRIVATE void push(LIST *list, void *data); PRIVATE void * pop(LIST *list); PRIVATE int best_attainable_energy(vrna_fold_compound_t *fc, STATE *state); PRIVATE void scan_interval(vrna_fold_compound_t *fc, int i, int j, int array_flag, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat); PRIVATE INLINE void scan_mb(vrna_fold_compound_t *fc, int i, int j, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat); PRIVATE INLINE void scan_m1(vrna_fold_compound_t *fc, int i, int j, int array_flag, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat); PRIVATE INLINE void scan_pair(vrna_fold_compound_t *fc, int i, int j, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat); PRIVATE INLINE void scan_ext(vrna_fold_compound_t *fc, int i, int j, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat); PRIVATE INLINE void scan_circular(vrna_fold_compound_t *fc, int i, int j, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat); PRIVATE INLINE void scan_fms5(vrna_fold_compound_t *fc, unsigned int i, unsigned int strand, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat); PRIVATE INLINE void scan_fms3(vrna_fold_compound_t *fc, unsigned int i, unsigned int strand, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat); PRIVATE INLINE void scan_gquad(vrna_fold_compound_t *fc, int i, int j, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat); PRIVATE void free_interval_node(INTERVAL *node); PRIVATE void free_state_node(STATE *node); PRIVATE void push_back(LIST *Stack, STATE *state); PRIVATE char * get_structure(STATE *state); PRIVATE int compare(const void *a, const void *b); PRIVATE int compare_en(const void *a, const void *b); PRIVATE void make_output(vrna_subopt_solution_t *SL, int cp, FILE *fp); PRIVATE void repeat(vrna_fold_compound_t *fc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env, constraint_helpers *constraints_dat); PRIVATE void repeat_gquad(vrna_fold_compound_t *fc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env, constraint_helpers *constraints_dat); PRIVATE void old_subopt_print(const char *structure, float energy, void *data); PRIVATE void old_subopt_store(const char *structure, float energy, void *data); PRIVATE void old_subopt_store_compressed(const char *structure, float energy, void *data); /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PUBLIC vrna_subopt_solution_t * vrna_subopt(vrna_fold_compound_t *fc, int delta, int sorted, FILE *fp) { struct old_subopt_dat data; vrna_subopt_callback *cb; data.SolutionList = NULL; data.max_sol = 128; data.n_sol = 0; data.fp = fp; data.cp = fc->cutpoint; if (fc) { /* SolutionList stores the suboptimal structures found */ data.SolutionList = (vrna_subopt_solution_t *)vrna_alloc(data.max_sol * sizeof(vrna_subopt_solution_t)); /* end initialize ------------------------------------------------------- */ if (fp) { float min_en; char *SeQ, *energies = NULL; min_en = vrna_mfe(fc, NULL); SeQ = vrna_cut_point_insert(fc->sequence, fc->cutpoint); energies = vrna_strdup_printf(" %6.2f %6.2f", min_en, (float)delta / 100.); print_structure(fp, SeQ, energies); free(SeQ); free(energies); vrna_mx_mfe_free(fc); } cb = old_subopt_store; if (fp) cb = (sorted) ? old_subopt_store_compressed : old_subopt_print; /* call subopt() */ vrna_subopt_cb(fc, delta, cb, (void *)&data); if (sorted) { /* sort structures by energy */ if (data.n_sol > 0) { int (*compare_fun)(const void *a, const void *b); switch (sorted) { case VRNA_SORT_BY_ENERGY_ASC: compare_fun = compare_en; break; default: /* a.k.a. VRNA_SORT_BY_ENERGY_LEXICOGRAPHIC_ASC */ compare_fun = compare; break; } qsort(data.SolutionList, data.n_sol - 1, sizeof(vrna_subopt_solution_t), compare_fun); } if (fp) make_output(data.SolutionList, fc->cutpoint, fp); } if (fp) { /* we've printed everything -- free solutions */ vrna_subopt_solution_t *sol; for (sol = data.SolutionList; sol->structure != NULL; sol++) free(sol->structure); free(data.SolutionList); data.SolutionList = NULL; } } return data.SolutionList; } PUBLIC void vrna_subopt_cb(vrna_fold_compound_t *fc, int delta, vrna_subopt_callback *cb, void *data) { subopt_env *env; STATE *state; INTERVAL *interval; unsigned int *so, *ss; int maxlevel, count, partial_energy, old_dangles, logML, dangle_model, length, circular, threshold; double structure_energy, min_en, eprint; char *struc, *structure; float correction; vrna_param_t *P; vrna_md_t *md; int minimal_energy; int Fc; int *f5; constraint_helpers constraints_dat; vrna_fold_compound_prepare(fc, VRNA_OPTION_MFE); length = fc->length; so = fc->strand_order; ss = fc->strand_start; P = fc->params; md = &(P->model_details); /* * do mfe folding to get fill arrays and get ground state energy * in case dangles is neither 0 or 2, set dangles=2 while folding */ circular = md->circ; logML = md->logML; old_dangles = dangle_model = md->dangles; if (md->uniq_ML != 1) /* failsafe mechanism to enforce valid fM1 array */ md->uniq_ML = 1; /* temporarily set dangles to 2 if necessary */ if ((md->dangles != 0) && (md->dangles != 2)) md->dangles = 2; struc = (char *)vrna_alloc(sizeof(char) * (length + 1)); min_en = vrna_mfe(fc, struc); /* restore dangle model */ md->dangles = old_dangles; /* re-evaluate in case we're using logML etc */ min_en = vrna_eval_structure(fc, struc); f5 = fc->matrices->f5; Fc = fc->matrices->Fc; free(struc); eprint = print_energy + min_en; correction = (min_en < 0) ? -0.1 : 0.1; /* Initialize ------------------------------------------------------------ */ init_constraint_helpers(fc, &constraints_dat); maxlevel = 0; count = 0; partial_energy = 0; /* Initialize the stack ------------------------------------------------- */ minimal_energy = (circular) ? Fc : f5[length]; threshold = minimal_energy + delta; if (threshold >= INF) { vrna_message_warning("Energy range too high, limiting to reasonable value"); threshold = INF - EMAX; } /* init env data structure */ env = (subopt_env *)vrna_alloc(sizeof(subopt_env)); env->Stack = NULL; env->nopush = true; env->Stack = make_list(); /* anchor */ env->Intervals = make_list(); /* initial state: */ interval = make_interval(1, length, 0); /* interval [1,length,0] */ push(env->Intervals, interval); env->nopush = false; state = make_state(env->Intervals, NULL, partial_energy, 0, length); /* state->best_energy = minimal_energy; */ push(env->Stack, state); env->nopush = false; /* end initialize ------------------------------------------------------- */ while (1) { /* forever, til nothing remains on stack */ maxlevel = (env->Stack->count > maxlevel ? env->Stack->count : maxlevel); if (LST_EMPTY(env->Stack)) { /* * we are done! clean up and quit * fprintf(stderr, "maxlevel: %d\n", maxlevel); */ lst_kill(env->Stack, free_state_node); cb(NULL, 0, data); /* NULL (last time to call callback function */ break; } /* pop the last element ---------------------------------------------- */ state = pop(env->Stack); /* current state to work with */ if (LST_EMPTY(state->Intervals)) { int e; /* state has no intervals left: we got a solution */ count++; structure = get_structure(state); structure_energy = state->partial_energy / 100.; #ifdef CHECK_ENERGY structure_energy = vrna_eval_structure(fc, structure); if (!logML) { if ((double)(state->partial_energy / 100.) != structure_energy) { vrna_message_error("%s %6.2f %6.2f", structure, state->partial_energy / 100., structure_energy); exit(1); } } #endif if (logML || (dangle_model == 1) || (dangle_model == 3)) /* recalc energy */ structure_energy = vrna_eval_structure(fc, structure); e = (int)((structure_energy - min_en) * 10. - correction); /* avoid rounding errors */ if (e > MAXDOS) e = MAXDOS; density_of_states[e]++; if (structure_energy <= eprint) { char *outstruct = vrna_cut_point_insert(structure, (fc->strands > 1) ? ss[so[1]] : -1); cb((const char *)outstruct, structure_energy, data); free(outstruct); } free(structure); } else { /* get (and remove) next interval of state to analyze */ interval = pop(state->Intervals); scan_interval(fc, interval->i, interval->j, interval->array_flag, threshold, state, env, &constraints_dat); free_interval_node(interval); /* free the current interval */ } free_state_node(state); /* free the current state */ } /* end of while (1) */ /* cleanup memory */ free_constraint_helpers(&constraints_dat); free(env); } /* ##################################### # BEGIN OF STATIC HELPER FUNCTIONS # ##################################### */ PRIVATE void init_constraint_helpers(vrna_fold_compound_t *fc, constraint_helpers *d) { /* hard constraints first */ d->hc_eval_ext = prepare_hc_ext_def(fc, &(d->hc_dat_ext)); d->hc_eval_hp = prepare_hc_hp_def(fc, &(d->hc_dat_hp)); d->hc_eval_int = prepare_hc_int_def(fc, &(d->hc_dat_int)); d->hc_eval_mb = prepare_hc_mb_def(fc, &(d->hc_dat_mb)); init_sc_f5(fc, &(d->sc_dat_ext)); init_sc_hp(fc, &(d->sc_dat_hp)); init_sc_int(fc, &(d->sc_dat_int)); init_sc_mb(fc, &(d->sc_dat_mb)); } PRIVATE void free_constraint_helpers(constraint_helpers *d) { /* currently only required for comparative folding soft constraints, but here for consistency reasons */ free_sc_f5(&(d->sc_dat_ext)); free_sc_hp(&(d->sc_dat_hp)); free_sc_int(&(d->sc_dat_int)); free_sc_mb(&(d->sc_dat_mb)); } /* * --------------------------------------------------------------------------- * List routines-------------------------------------------------------------- *--------------------------------------------------------------------------- */ PRIVATE void make_pair(int i, int j, STATE *state) { state->structure[i - 1] = '('; state->structure[j - 1] = ')'; } PRIVATE void make_gquad(int i, int L, int l[3], STATE *state) { int x; for (x = 0; x < L; x++) { state->structure[i - 1 + x] = '+'; state->structure[i - 1 + x + L + l[0]] = '+'; state->structure[i - 1 + x + 2 * L + l[0] + l[1]] = '+'; state->structure[i - 1 + x + 3 * L + l[0] + l[1] + l[2]] = '+'; } } PRIVATE INTERVAL * make_interval(int i, int j, int array_flag) { INTERVAL *interval; interval = lst_newnode(sizeof(INTERVAL)); interval->i = i; interval->j = j; interval->array_flag = array_flag; return interval; } PRIVATE void free_interval_node(INTERVAL *node) { lst_freenode(node); } PRIVATE void free_state_node(STATE *node) { free(node->structure); if (node->Intervals) lst_kill(node->Intervals, lst_freenode); lst_freenode(node); } PRIVATE STATE * make_state(LIST *Intervals, char *structure, int partial_energy, int is_duplex, int length) { STATE *state; state = lst_newnode(sizeof(STATE)); if (Intervals) state->Intervals = Intervals; else state->Intervals = lst_init(); if (structure) { state->structure = structure; } else { int i; state->structure = (char *)vrna_alloc(length + 1); for (i = 0; i < length; i++) state->structure[i] = '.'; } state->partial_energy = partial_energy; return state; } PRIVATE STATE * copy_state(STATE *state) { STATE *new_state; void *after; INTERVAL *new_interval, *next; new_state = lst_newnode(sizeof(STATE)); new_state->Intervals = lst_init(); new_state->partial_energy = state->partial_energy; /* new_state->best_energy = state->best_energy; */ if (state->Intervals->count) { after = LST_HEAD(new_state->Intervals); for (next = lst_first(state->Intervals); next; next = lst_next(next)) { new_interval = lst_newnode(sizeof(INTERVAL)); *new_interval = *next; lst_insertafter(new_state->Intervals, new_interval, after); after = new_interval; } } new_state->structure = strdup(state->structure); if (!new_state->structure) vrna_message_error("out of memory"); return new_state; } /*@unused @*/ PRIVATE void print_state(STATE *state) { INTERVAL *next; if (state->Intervals->count) { printf("%d intervals:\n", state->Intervals->count); for (next = lst_first(state->Intervals); next; next = lst_next(next)) printf("[%d,%d],%d ", next->i, next->j, next->array_flag); printf("\n"); } printf("partial structure: %s\n", state->structure); printf("\n"); printf(" partial_energy: %d\n", state->partial_energy); /* printf(" best_energy: %d\n", state->best_energy); */ (void)fflush(stdout); } /*@unused @*/ PRIVATE void print_stack(LIST *list) { void *rec; printf("================\n"); printf("%d states\n", list->count); for (rec = lst_first(list); rec; rec = lst_next(rec)) { printf("state-----------\n"); print_state(rec); } printf("================\n"); } PRIVATE LIST * make_list(void) { return lst_init(); } PRIVATE void push(LIST *list, void *data) { lst_insertafter(list, data, LST_HEAD(list)); } /* * PRIVATE void * push_stack(STATE *state) { */ /* keep the stack sorted by energy * STATE *after, *next; * nopush = false; * next = after = LST_HEAD(Stack); * while ( next = lst_next(next)) { * if ( next->best_energy >= state->best_energy ) break; * after = next; * } * lst_insertafter(Stack, state, after); * } */ PRIVATE void * pop(LIST *list) { void *data; data = lst_deletenext(list, LST_HEAD(list)); return data; } /* * --------------------------------------------------------------------------- * auxiliary routines--------------------------------------------------------- *--------------------------------------------------------------------------- */ PRIVATE int best_attainable_energy(vrna_fold_compound_t *fc, STATE *state) { /* evaluation of best possible energy attainable within remaining intervals */ register int sum; INTERVAL *next; vrna_md_t *md; vrna_mx_mfe_t *matrices; int *indx; md = &(fc->params->model_details); matrices = fc->matrices; indx = fc->jindx; sum = state->partial_energy; /* energy of already found elements */ for (next = lst_first(state->Intervals); next; next = lst_next(next)) { if (next->array_flag == 0) sum += (md->circ) ? matrices->Fc : matrices->f5[next->j]; else if (next->array_flag == 1) sum += matrices->fML[indx[next->j] + next->i]; else if (next->array_flag == 2) sum += matrices->c[indx[next->j] + next->i]; else if (next->array_flag == 3) sum += matrices->fM1[indx[next->j] + next->i]; else if (next->array_flag == 4) sum += matrices->fms5[next->j][next->i]; else if (next->array_flag == 5) sum += matrices->fms3[next->j][next->i]; else if (next->array_flag == 6) sum += matrices->ggg[indx[next->j] + next->i]; } return sum; } PRIVATE void push_back(LIST *Stack, STATE *state) { push(Stack, copy_state(state)); return; } PRIVATE char * get_structure(STATE *state) { char *structure; structure = strdup(state->structure); return structure; } PRIVATE int compare(const void *a, const void *b) { if (((vrna_subopt_solution_t *)a)->energy > ((vrna_subopt_solution_t *)b)->energy) return 1; if (((vrna_subopt_solution_t *)a)->energy < ((vrna_subopt_solution_t *)b)->energy) return -1; return strcmp(((vrna_subopt_solution_t *)a)->structure, ((vrna_subopt_solution_t *)b)->structure); } PRIVATE int compare_en(const void *a, const void *b) { if (((vrna_subopt_solution_t *)a)->energy > ((vrna_subopt_solution_t *)b)->energy) return 1; if (((vrna_subopt_solution_t *)a)->energy < ((vrna_subopt_solution_t *)b)->energy) return -1; return 0; } PRIVATE void make_output(vrna_subopt_solution_t *SL, int cp, FILE *fp) /* prints stuff */ { vrna_subopt_solution_t *sol; for (sol = SL; sol->structure != NULL; sol++) { char *e_string = vrna_strdup_printf(" %6.2f", sol->energy); char *ss = vrna_db_unpack(sol->structure); char *s = vrna_cut_point_insert(ss, cp); print_structure(fp, s, e_string); free(s); free(ss); free(e_string); } } PRIVATE STATE * derive_new_state(int i, int j, STATE *s, int e, int flag) { STATE *s_new = copy_state(s); INTERVAL *ival = make_interval(i, j, flag); push(s_new->Intervals, ival); s_new->partial_energy += e; return s_new; } PRIVATE void fork_state(int i, int j, STATE *s, int e, int flag, subopt_env *env) { STATE *s_new = derive_new_state(i, j, s, e, flag); push(env->Stack, s_new); env->nopush = false; } PRIVATE void fork_int_state(int i, int j, int p, int q, STATE *s, int e, subopt_env *env) { STATE *s_new = derive_new_state(p, q, s, e, 2); make_pair(i, j, s_new); make_pair(p, q, s_new); push(env->Stack, s_new); env->nopush = false; } PRIVATE void fork_state_pair(int i, int j, STATE *s, int e, subopt_env *env) { STATE *new_state; new_state = copy_state(s); make_pair(i, j, new_state); new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void fork_two_states_pair(int i, int j, int k, STATE *s, int e, int flag1, int flag2, subopt_env *env) { INTERVAL *interval1, *interval2; STATE *new_state; new_state = copy_state(s); interval1 = make_interval(i + 1, k - 1, flag1); interval2 = make_interval(k, j - 1, flag2); if (k - i < j - k) { /* push larger interval first */ push(new_state->Intervals, interval1); push(new_state->Intervals, interval2); } else { push(new_state->Intervals, interval2); push(new_state->Intervals, interval1); } make_pair(i, j, new_state); new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void fork_state_pair_interval(int i, int j, int k, int l, STATE *s, int e, int flag, subopt_env *env) { INTERVAL *interval; STATE *new_state; new_state = copy_state(s); interval = make_interval(k, l, flag); push(new_state->Intervals, interval); make_pair(i, j, new_state); new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void fork_two_states_pair_ms(int i, int j, int sn1, int sn2, STATE *s, int e, subopt_env *env) { INTERVAL *interval1, *interval2; STATE *new_state; new_state = copy_state(s); interval1 = make_interval(i + 1, sn1, 4); interval2 = make_interval(j - 1, sn2, 5); push(new_state->Intervals, interval1); push(new_state->Intervals, interval2); make_pair(i, j, new_state); new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void fork_two_states(int i, int j, int p, int q, STATE *s, int e, int flag1, int flag2, subopt_env *env) { INTERVAL *interval1, *interval2; STATE *new_state; new_state = copy_state(s); interval1 = make_interval(i, j, flag1); interval2 = make_interval(p, q, flag2); if ((j - i) < (q - p)) { push(new_state->Intervals, interval1); push(new_state->Intervals, interval2); } else { push(new_state->Intervals, interval2); push(new_state->Intervals, interval1); } new_state->partial_energy += e; push(env->Stack, new_state); env->nopush = false; } PRIVATE void scan_interval(vrna_fold_compound_t *fc, int i, int j, int array_flag, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat) { /* real backtrack routine */ /* * array_flag = 0: trace back in f5-array * array_flag = 1: trace back in fML-array * array_flag = 2: trace back in repeat() * array_flag = 3: trace back in fM1-array */ env->nopush = true; switch (array_flag) { case 0: scan_ext(fc, i, j, threshold, state, env, constraints_dat); break; case 1: scan_mb(fc, i, j, threshold, state, env, constraints_dat); /* fall through */ case 3: scan_m1(fc, i, j, array_flag, threshold, state, env, constraints_dat); break; case 2: scan_pair(fc, i, j, threshold, state, env, constraints_dat); return; case 4: scan_fms5(fc, i, j, threshold, state, env, constraints_dat); break; case 5: scan_fms3(fc, i, j, threshold, state, env, constraints_dat); break; case 6: scan_gquad(fc, i, j, threshold, state, env, constraints_dat); return; } if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } } PRIVATE INLINE void scan_mb(vrna_fold_compound_t *fc, int i, int j, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat) { char *ptype; short *S1, s5, s3; unsigned int *sn, *so; int k, type, dangle_model, element_energy, best_energy, *c, *fML, *ggg, *indx, with_gquad, turn, stopp, k1j; vrna_param_t *P; vrna_md_t *md; struct hc_mb_def_dat *hc_dat; vrna_callback_hc_evaluate *evaluate; struct sc_mb_dat *sc_dat; sc_mb_red_cb *sc_red_stem; sc_mb_red_cb *sc_decomp_ml; STATE *temp_state; sn = fc->strand_number; so = fc->strand_order; indx = fc->jindx; ptype = fc->ptype; S1 = fc->sequence_encoding; P = fc->params; md = &(P->model_details); dangle_model = md->dangles; with_gquad = md->gquad; turn = md->min_loop_size; c = fc->matrices->c; fML = fc->matrices->fML; ggg = fc->matrices->ggg; hc_dat = &(constraints_dat->hc_dat_mb); evaluate = constraints_dat->hc_eval_mb; sc_dat = &(constraints_dat->sc_dat_mb); sc_red_stem = constraints_dat->sc_dat_mb.red_stem; sc_decomp_ml = constraints_dat->sc_dat_mb.decomp_ml; best_energy = best_attainable_energy(fc, state); /* .. on remaining intervals */ if ((j < i + turn + 1) && (sn[i] == so[j])) { if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } if ((sn[i - 1] == sn[i]) && (sn[j] == sn[j + 1])) { /*backtrack in FML only if multiloop is possible*/ for (k = i + turn + 1; k <= j - 1 - turn; k++) { /* Multiloop decomposition if i,j contains more than 1 stack */ if ((with_gquad) && (sn[k] == sn[k + 1]) && (fML[indx[k] + i] != INF) && (ggg[indx[j] + k + 1] != INF)) { element_energy = E_MLstem(0, -1, -1, P); if (fML[indx[k] + i] + ggg[indx[j] + k + 1] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k, state, 0, 1); env->nopush = false; repeat_gquad(fc, k + 1, j, temp_state, element_energy, fML[indx[k] + i], best_energy, threshold, env, constraints_dat); free_state_node(temp_state); } } k1j = indx[j] + k + 1; if ((evaluate(i, j, k, k + 1, VRNA_DECOMP_ML_ML_STEM, hc_dat)) && (fML[indx[k] + i] != INF) && (c[k1j] != INF)) { type = vrna_get_ptype(k1j, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (sn[i - 1] == sn[i]) ? S1[k] : -1; s3 = (sn[j] == sn[j + 1]) ? S1[j + 1] : -1; break; } element_energy = E_MLstem(type, s5, s3, P); if (sc_decomp_ml) element_energy += sc_decomp_ml(i, j, k, k + 1, sc_dat); if (sc_red_stem) element_energy += sc_red_stem(k + 1, j, k + 1, j, sc_dat); if (fML[indx[k] + i] + c[k1j] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(i, k, state, 0, 1); env->nopush = false; repeat(fc, k + 1, j, temp_state, element_energy, fML[indx[k] + i], best_energy, threshold, env, constraints_dat); free_state_node(temp_state); } } } } stopp = j - 1 - turn; int up = 1; for (k = i; k <= stopp; k++, up++) { k1j = indx[j] + k + 1; /* Multiloop decomposition if i,j contains only 1 stack */ if ((with_gquad) && (ggg[k1j] != INF) && (sn[i] == sn[j])) { element_energy = E_MLstem(0, -1, -1, P) + P->MLbase * up; if (sc_red_stem) element_energy += sc_red_stem(i, j, k + 1, j, sc_dat); if (ggg[k1j] + element_energy + best_energy <= threshold) { repeat_gquad(fc, k + 1, j, state, element_energy, 0, best_energy, threshold, env, constraints_dat); } } if (evaluate(i, j, k + 1, j, VRNA_DECOMP_ML_STEM, hc_dat)) { if (c[k1j] != INF) { type = vrna_get_ptype(k1j, ptype); switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (sn[k - 1] == sn[k]) ? S1[k] : -1; s3 = (sn[j] == sn[j + 1]) ? S1[j + 1] : -1; break; } element_energy = E_MLstem(type, s5, s3, P); element_energy += P->MLbase * up; if (sc_red_stem) element_energy += sc_red_stem(i, j, k + 1, j, sc_dat); if (c[k1j] + element_energy + best_energy <= threshold) { repeat(fc, k + 1, j, state, element_energy, 0, best_energy, threshold, env, constraints_dat); } } } } } PRIVATE INLINE void scan_m1(vrna_fold_compound_t *fc, int i, int j, int array_flag, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat) { char *ptype; short *S1; unsigned int *sn, *so; int fi, cij, ij, type, dangle_model, element_energy, best_energy, *c, *fML, *fM1, *ggg, length, *indx, circular, with_gquad, turn; vrna_param_t *P; vrna_md_t *md; struct hc_mb_def_dat *hc_dat; vrna_callback_hc_evaluate *evaluate; struct sc_mb_dat *sc_dat; sc_mb_red_cb *sc_red_stem; sc_mb_red_cb *sc_red_ml; length = fc->length; sn = fc->strand_number; so = fc->strand_order; indx = fc->jindx; ptype = fc->ptype; S1 = fc->sequence_encoding; P = fc->params; md = &(P->model_details); dangle_model = md->dangles; circular = md->circ; with_gquad = md->gquad; turn = md->min_loop_size; c = fc->matrices->c; fML = fc->matrices->fML; fM1 = fc->matrices->fM1; ggg = fc->matrices->ggg; hc_dat = &(constraints_dat->hc_dat_mb); evaluate = constraints_dat->hc_eval_mb; sc_dat = &(constraints_dat->sc_dat_mb); sc_red_stem = constraints_dat->sc_dat_mb.red_stem; sc_red_ml = constraints_dat->sc_dat_mb.red_ml; best_energy = best_attainable_energy(fc, state); /* .. on remaining intervals */ if ((j < i + turn + 1) && (sn[i] == so[j])) { if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } ij = indx[j] + i; if ((evaluate(i, j, i, j - 1, VRNA_DECOMP_ML_ML, hc_dat)) && (((array_flag == 3) && (fM1[indx[j - 1] + i] != INF)) || (fML[indx[j - 1] + i] != INF))) { element_energy = P->MLbase; if (sc_red_ml) element_energy += sc_red_ml(i, j, i, j - 1, sc_dat); if (array_flag == 3) fi = element_energy + fM1[indx[j - 1] + i]; else fi = element_energy + fML[indx[j - 1] + i]; if (fi + best_energy <= threshold) fork_state(i, j - 1, state, element_energy, array_flag, env); } if (evaluate(i, j, i, j, VRNA_DECOMP_ML_STEM, hc_dat)) { /* i,j may pair */ cij = c[ij]; if (cij != INF) { type = vrna_get_ptype(ij, ptype); switch (dangle_model) { case 0: element_energy = E_MLstem(type, -1, -1, P); break; default: element_energy = E_MLstem(type, (((i > 1) && (sn[i - 1] == sn[i])) || circular) ? S1[i - 1] : -1, (((j < length) && (sn[j] == sn[j + 1])) || circular) ? S1[j + 1] : -1, P); break; } if (sc_red_stem) element_energy += sc_red_stem(i, j, i, j, sc_dat); cij += element_energy; if (cij + best_energy <= threshold) { repeat(fc, i, j, state, element_energy, 0, best_energy, threshold, env, constraints_dat); } } } else if ((with_gquad) && (ggg[ij] != INF)) { element_energy = E_MLstem(0, -1, -1, P); if (sc_red_stem) element_energy += sc_red_stem(i, j, i, j, sc_dat); if (ggg[ij] + element_energy + best_energy <= threshold) { repeat_gquad(fc, i, j, state, element_energy, 0, best_energy, threshold, env, constraints_dat); } } return; } PRIVATE INLINE void scan_pair(vrna_fold_compound_t *fc, int i, int j, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat) { unsigned int *sn, turn, noLP; int best_energy; sn = fc->strand_number; turn = fc->params->model_details.min_loop_size; noLP = fc->params->model_details.noLP; best_energy = best_attainable_energy(fc, state); /* .. on remaining intervals */ if ((j < i + turn + 1) && (sn[i] == sn[j])) { if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } repeat(fc, i, j, state, 0, 0, best_energy, threshold, env, constraints_dat); if (env->nopush) if (!noLP) vrna_message_warning("%d,%d\nOops, no solution in repeat!", i, j); } PRIVATE INLINE void scan_ext(vrna_fold_compound_t *fc, int i, int j, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat) { char *ptype; short *S1, s5, s3; unsigned int *sn, *so; int k, type, dangle_model, element_energy, best_energy, *f5, *c, *ggg, length, *indx, circular, with_gquad, turn, kj, tmp_en; vrna_param_t *P; vrna_md_t *md; struct hc_ext_def_dat *hc_dat; vrna_callback_hc_evaluate *evaluate; struct sc_f5_dat *sc_dat; sc_f5_cb *sc_red_ext; sc_f5_cb *sc_red_stem; sc_f5_cb *sc_decomp_stem; STATE *temp_state; length = fc->length; sn = fc->strand_number; so = fc->strand_order; indx = fc->jindx; ptype = fc->ptype; S1 = fc->sequence_encoding; P = fc->params; md = &(P->model_details); dangle_model = md->dangles; circular = md->circ; with_gquad = md->gquad; turn = md->min_loop_size; f5 = fc->matrices->f5; c = fc->matrices->c; ggg = fc->matrices->ggg; if (circular) { scan_circular(fc, i, j, threshold, state, env, constraints_dat); return; } hc_dat = &(constraints_dat->hc_dat_ext); evaluate = constraints_dat->hc_eval_ext; sc_dat = &(constraints_dat->sc_dat_ext); sc_red_ext = sc_dat->red_ext5; sc_red_stem = sc_dat->red_stem5; sc_decomp_stem = sc_dat->decomp_stem5; best_energy = best_attainable_energy(fc, state); /* .. on remaining intervals */ if (i > 1) vrna_message_error("Error while backtracking!"); if ((j < i + turn + 1) && (sn[i] == so[j])) { /* * minimal structure element * do not forget to add f5[j], since it may contain pseudo energies from soft constraining */ state->partial_energy += f5[j]; if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } if ((evaluate(1, j, 1, j - 1, VRNA_DECOMP_EXT_EXT, hc_dat)) && (f5[j - 1] != INF)) { tmp_en = 0; if (sc_red_ext) tmp_en += sc_red_ext(j, 1, j - 1, sc_dat); if (f5[j - 1] + tmp_en + best_energy <= threshold) /* no basepair, nibbling of 3'-end */ fork_state(i, j - 1, state, tmp_en, 0, env); } for (k = j - turn - 1; k > 1; k--) { kj = indx[j] + k; if ((with_gquad) && (sn[k - 1] == sn[j]) && (f5[k - 1] != INF) && (ggg[kj] != INF)) { element_energy = 0; if (sc_decomp_stem) element_energy += sc_decomp_stem(j, k - 1, k, sc_dat); if (f5[k - 1] + ggg[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(1, k - 1, state, 0, 0); env->nopush = false; /* backtrace the quadruplex */ repeat_gquad(fc, k, j, temp_state, element_energy, f5[k - 1], best_energy, threshold, env, constraints_dat); free_state_node(temp_state); } } if ((evaluate(1, j, k - 1, k, VRNA_DECOMP_EXT_EXT_STEM, hc_dat)) && (f5[k - 1] != INF) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); /* k and j pair */ switch (dangle_model) { case 0: s5 = s3 = -1; break; default: s5 = (sn[k - 1] == sn[k]) ? S1[k - 1] : -1; s3 = ((j < length) && (sn[j] == sn[j + 1])) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc_decomp_stem) element_energy += sc_decomp_stem(j, k - 1, k, sc_dat); if (f5[k - 1] + c[kj] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(1, k - 1, state, 0, 0); env->nopush = false; repeat(fc, k, j, temp_state, element_energy, f5[k - 1], best_energy, threshold, env, constraints_dat); free_state_node(temp_state); } } } kj = indx[j] + 1; if ((with_gquad) && (sn[1] == sn[j]) && (ggg[kj] != INF)) { element_energy = 0; if (sc_red_stem) element_energy += sc_red_stem(j, 1, j, sc_dat); if (ggg[kj] + element_energy + best_energy <= threshold) { /* backtrace the quadruplex */ repeat_gquad(fc, 1, j, state, element_energy, 0, best_energy, threshold, env, constraints_dat); } } if ((evaluate(1, j, 1, j, VRNA_DECOMP_EXT_STEM, hc_dat)) && (c[kj] != INF)) { type = vrna_get_ptype(kj, ptype); s5 = -1; switch (dangle_model) { case 0: s3 = -1; break; default: s3 = (j < length) && (sn[j] == sn[j + 1]) ? S1[j + 1] : -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc_red_stem) element_energy += sc_red_stem(j, 1, j, sc_dat); if (c[kj] + element_energy + best_energy <= threshold) { repeat(fc, 1, j, state, element_energy, 0, best_energy, threshold, env, constraints_dat); } } } PRIVATE INLINE void scan_circular(vrna_fold_compound_t *fc, int i, int j, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat) { unsigned char *hard_constraints; char *ptype; short *S1; int k, l, p, q, tmp_en, best_energy, *c, *fML, *fM1, Fc, FcH, FcI, FcM, *fM2, length, *indx, *rtype, turn, kl, type, tmpE, u1, qmin, u2, type_2, tmpE2; vrna_param_t *P; vrna_md_t *md; vrna_hc_t *hc; vrna_sc_t *sc; struct hc_ext_def_dat *hc_dat_ext; struct hc_int_def_dat *hc_dat_int; struct hc_mb_def_dat *hc_dat_mb; vrna_callback_hc_evaluate *evaluate_ext; eval_hc *evaluate_int; vrna_callback_hc_evaluate *evaluate_mb; struct sc_int_dat *sc_dat_int; struct sc_mb_dat *sc_dat_mb; sc_int_cb *sc_int_pair_ext; sc_mb_red_cb *sc_mb_decomp_ml; STATE *new_state; INTERVAL *new_interval; length = fc->length; indx = fc->jindx; ptype = fc->ptype; S1 = fc->sequence_encoding; P = fc->params; md = &(P->model_details); rtype = &(md->rtype[0]); turn = md->min_loop_size; c = fc->matrices->c; fML = fc->matrices->fML; fM1 = fc->matrices->fM1; Fc = fc->matrices->Fc; FcH = fc->matrices->FcH; FcI = fc->matrices->FcI; FcM = fc->matrices->FcM; fM2 = fc->matrices->fM2; hc = fc->hc; hard_constraints = hc->mx; sc = fc->sc; hc_dat_ext = &(constraints_dat->hc_dat_ext); hc_dat_int = &(constraints_dat->hc_dat_int); hc_dat_mb = &(constraints_dat->hc_dat_mb); evaluate_ext = constraints_dat->hc_eval_ext; evaluate_int = constraints_dat->hc_eval_int; evaluate_mb = constraints_dat->hc_eval_mb; sc_dat_int = &(constraints_dat->sc_dat_int); sc_dat_mb = &(constraints_dat->sc_dat_mb); sc_int_pair_ext = constraints_dat->sc_dat_int.pair_ext; sc_mb_decomp_ml = constraints_dat->sc_dat_mb.decomp_ml; best_energy = best_attainable_energy(fc, state); /* .. on remaining intervals */ if (i > 1) vrna_message_error("Error while backtracking!"); if (j < i + turn + 1) { /* minimal structure element */ state->partial_energy += Fc; if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } /* * if we've done everything right, we will never reach this point more than once * right after the initilization of the stack with ([1,n], empty, 0) * lets check, if we can have an open chain without breaking the threshold * this is an ugly work-arround cause in case of an open chain we do not have to * backtrack anything further... */ if (evaluate_ext(1, length, 1, length, VRNA_DECOMP_EXT_UP, hc_dat_ext)) { tmp_en = 0; if (sc) { if (sc->energy_up) tmp_en += sc->energy_up[1][length]; if (sc->f) tmp_en += sc->f(1, j, 1, j, VRNA_DECOMP_EXT_UP, sc->data); } if (tmp_en <= threshold) { new_state = derive_new_state(1, 2, state, 0, 0); new_state->partial_energy = 0; push(env->Stack, new_state); env->nopush = false; } } /* * ok, lets check if we can do an exterior hairpin without breaking the threshold * best energy should be 0 if we are here */ if (FcH + best_energy <= threshold) { /* * lets search for all exterior hairpin cases, that fit into our threshold barrier * we use index k,l to avoid confusion with i,j index of our state... * if we reach here, i should be 1 and j should be n respectively */ for (k = i; k < j; k++) { if (hc->up_hp[1] < k) break; for (l = j; l >= k + turn + 1; l--) { kl = indx[l] + k; if (c[kl] != INF) { tmpE = vrna_E_hp_loop(fc, l, k); if (c[kl] + tmpE + best_energy <= threshold) /* * what we really have to do is something like this, isn't it? * we have to create a new state, with interval [k,l], then we * add our loop energy as initial energy of this state and put * the state onto the stack R... for further refinement... * we also denote this new interval to be scanned in C */ fork_state(k, l, state, tmpE, 2, env); } } } } /* now lets see, if we can do an exterior interior loop without breaking the threshold */ if (FcI + best_energy <= threshold) { /* now we search for our exterior interior loop possibilities */ for (k = i; k < j; k++) { for (l = j; l >= k + turn + 1; l--) { kl = indx[l] + k; /* just confusing these indices ;-) */ if ((hard_constraints[length * k + l] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) && (c[kl] != INF)) { type = rtype[vrna_get_ptype(kl, ptype)]; for (p = l + 1; p < j; p++) { u1 = p - l - 1; if (u1 + k - 1 > MAXLOOP) break; if (hc->up_int[l + 1] < u1) break; qmin = u1 + k - 1 + j - MAXLOOP; if (qmin < p + turn + 1) qmin = p + turn + 1; for (q = j; q >= qmin; q--) { if (hc->up_int[q + 1] < (j - q + k - 1)) break; if ((evaluate_int(k, l, p, q, hc_dat_int)) && (c[indx[q] + p] != INF)) { type_2 = rtype[vrna_get_ptype(indx[q] + p, ptype)]; u2 = k - 1 + j - q; if (u1 + u2 > MAXLOOP) continue; tmpE = E_IntLoop(u1, u2, type, type_2, S1[l + 1], S1[k - 1], S1[p - 1], S1[q + 1], P); if (sc_int_pair_ext) tmpE += sc_int_pair_ext(k, l, p, q, sc_dat_int); if (c[kl] + c[indx[q] + p] + tmpE + best_energy <= threshold) /* * ok, similar to the hairpin stuff, we add new states onto the stack R * but in contrast to the hairpin decomposition, we have to add two new * intervals, enclosed by k,l and p,q respectively and we also have to * add the partial energy, that comes from the exterior interior loop */ fork_two_states(k, l, p, q, state, tmpE, 2, 2, env); } } } } } } } /* and last but not least, we have a look, if we can do an exterior multiloop within the energy threshold */ if (FcM <= threshold) { /* * this decomposition will be somehow more complicated...so lets see what we do here... * first we want to find out which split inidices we can use without exceeding the threshold */ for (k = turn + 1; k < j - 2 * turn; k++) { if ((evaluate_mb(1, j, k, k + 1, VRNA_DECOMP_ML_ML_ML, hc_dat_mb)) && (fML[indx[k] + 1] != INF) && (fM2[k + 1] != INF)) { tmpE2 = fML[indx[k] + 1] + fM2[k + 1] + P->MLclosing; if (sc_mb_decomp_ml) { tmpE2 += sc_mb_decomp_ml(1, j, k, k + 1, sc_dat_mb); } if (tmpE2 + best_energy <= threshold) { /* * grmpfh, we have found a possible split index k so we have to split fM2 and fML now * lets do it first in fM2 anyway */ for (l = k + turn + 2; l < j - turn - 1; l++) { if ((evaluate_mb(k + 1, j, l, l + 1, VRNA_DECOMP_ML_ML_ML, hc_dat_mb)) && (fM1[indx[l] + k + 1] != INF) && (fM1[indx[j] + l + 1] != INF)) { tmpE2 = fM1[indx[l] + k + 1] + fM1[indx[j] + l + 1]; if (sc_mb_decomp_ml) tmpE2 += sc_mb_decomp_ml(k + 1, j, l, l + 1, sc_dat_mb); if (tmpE2 + fML[indx[k] + 1] + P->MLclosing <= threshold) { /* * we've (hopefully) found a valid decomposition of fM2 and therefor we have all * three intervals for our new state to be pushed on stack R */ new_state = copy_state(state); /* first interval leads for search in fML array */ new_interval = make_interval(1, k, 1); push(new_state->Intervals, new_interval); env->nopush = false; /* next, we have the first interval that has to be traced in fM1 */ new_interval = make_interval(k + 1, l, 3); push(new_state->Intervals, new_interval); env->nopush = false; /* and the last of our three intervals is also one to be traced within fM1 array... */ new_interval = make_interval(l + 1, j, 3); push(new_state->Intervals, new_interval); env->nopush = false; /* mmh, we add the energy for closing the multiloop now... */ new_state->partial_energy += P->MLclosing; /* next we push our state onto the R stack */ push(env->Stack, new_state); env->nopush = false; } } /* else we search further... */ } /* ok, we have to decompose fML now... */ } } } } } PRIVATE INLINE void scan_fms5(vrna_fold_compound_t *fc, unsigned int i, unsigned int strand, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat) { char *ptype; short *S1, s5, s3; unsigned int k, type, *sn, *se, end; int dangle_model, element_energy, best_energy, *c, *ggg, **fms5, *indx, with_gquad, turn; vrna_param_t *P; vrna_md_t *md; struct hc_ext_def_dat *hc_dat; vrna_callback_hc_evaluate *evaluate; struct sc_f5_dat *sc_dat; sc_ext_red_cb *sc_red_ext; sc_ext_red_cb *sc_red_stem; sc_ext_red_cb *sc_decomp; STATE *temp_state; sn = fc->strand_number; se = fc->strand_end; indx = fc->jindx; ptype = fc->ptype; S1 = fc->sequence_encoding; P = fc->params; md = &(P->model_details); dangle_model = md->dangles; with_gquad = md->gquad; turn = md->min_loop_size; c = fc->matrices->c; ggg = fc->matrices->ggg; fms5 = fc->matrices->fms5; hc_dat = &(constraints_dat->hc_dat_ext); evaluate = constraints_dat->hc_eval_ext; sc_dat = &(constraints_dat->sc_dat_ext); sc_red_ext = sc_dat->red_ext; sc_red_stem = sc_dat->red_stem; sc_decomp = sc_dat->decomp; best_energy = best_attainable_energy(fc, state); /* .. on remaining intervals */ end = se[strand]; /* no more pairs if too close to strand boundary ? */ if (i + turn + 1 > se[strand]) { state->partial_energy += fms5[strand][i]; if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } /* find split in fms5 */ if ((evaluate(i, end, i + 1, end, VRNA_DECOMP_EXT_EXT, hc_dat)) && (fms5[strand][i] != INF)) { element_energy = 0; if (sc_red_ext) element_energy += sc_red_ext(i, end, i + 1, end, sc_dat); if (fms5[strand][i + 1] + element_energy + best_energy <= threshold) /* no basepair, nibbling of 5'-end */ fork_state(i + 1, strand, state, element_energy, 4, env); } if (evaluate(i, end, i, end, VRNA_DECOMP_EXT_STEM, hc_dat)) { type = vrna_get_ptype(indx[end] + i, ptype); switch (dangle_model) { case 2: s5 = ((i > 1) && (sn[i - 1] == sn[i])) ? S1[i - 1] : -1; s3 = -1; break; default: s5 = -1; s3 = -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc_red_stem) element_energy += sc_red_stem(i, end, i, end, sc_dat); if (c[indx[end] + i] + element_energy + best_energy <= threshold) { repeat(fc, i, end, state, element_energy, 0, best_energy, threshold, env, constraints_dat); } } if ((with_gquad) && (ggg[indx[end] + i] != INF)) { element_energy = 0; if (sc_red_stem) element_energy += sc_red_stem(i, end, i, end, sc_dat); if (ggg[indx[end] + i] + element_energy + best_energy <= threshold) { repeat_gquad(fc, i, end, state, element_energy, 0, best_energy, threshold, env, constraints_dat); } } for (k = i + turn + 1; k < end; k++) { if ((with_gquad) && (fms5[strand][k + 1] != INF) && (ggg[indx[k] + i] != INF)) { element_energy = 0; if (sc_decomp) element_energy += sc_decomp(i, end, k, k + 1, sc_dat); if (sc_red_stem) element_energy += sc_red_stem(i, k, i, k, sc_dat); if (fms5[strand][k + 1] + ggg[indx[k] + i] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(k + 1, strand, state, 0, 4); env->nopush = false; repeat_gquad(fc, i, k, temp_state, element_energy, fms5[strand][k + 1], best_energy, threshold, env, constraints_dat); free_state_node(temp_state); } } if (evaluate(i, end, k, k + 1, VRNA_DECOMP_EXT_STEM_EXT, hc_dat)) { type = vrna_get_ptype(indx[k] + i, ptype); switch (dangle_model) { case 2: s5 = ((i > 1) && (sn[i - 1] == sn[i])) ? S1[i - 1] : -1; s3 = (sn[k] == sn[k + 1]) ? S1[k + 1] : -1; break; default: s5 = -1; s3 = -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc_decomp) element_energy += sc_decomp(i, end, k, k + 1, sc_dat); if (sc_red_stem) element_energy += sc_red_stem(i, k, i, k, sc_dat); if (fms5[strand][k + 1] + c[indx[k] + i] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(k + 1, strand, state, 0, 4); env->nopush = false; repeat(fc, i, k, temp_state, element_energy, fms5[strand][k + 1], best_energy, threshold, env, constraints_dat); free_state_node(temp_state); } } } } PRIVATE INLINE void scan_fms3(vrna_fold_compound_t *fc, unsigned int i, unsigned int strand, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat) { char *ptype; short *S1, s5, s3; unsigned int *sn, *ss, start, k, type; int dangle_model, element_energy, best_energy, *c, *ggg, **fms3, length, *indx, with_gquad, turn; vrna_param_t *P; vrna_md_t *md; struct hc_ext_def_dat *hc_dat; vrna_callback_hc_evaluate *evaluate; struct sc_f5_dat *sc_dat; sc_ext_red_cb *sc_red_ext; sc_ext_red_cb *sc_red_stem; sc_ext_red_cb *sc_decomp; STATE *temp_state; length = fc->length; sn = fc->strand_number; ss = fc->strand_start; indx = fc->jindx; ptype = fc->ptype; S1 = fc->sequence_encoding; P = fc->params; md = &(P->model_details); dangle_model = md->dangles; with_gquad = md->gquad; turn = md->min_loop_size; c = fc->matrices->c; ggg = fc->matrices->ggg; fms3 = fc->matrices->fms3; start = ss[strand]; hc_dat = &(constraints_dat->hc_dat_ext); evaluate = constraints_dat->hc_eval_ext; sc_dat = &(constraints_dat->sc_dat_ext); sc_red_ext = sc_dat->red_ext; sc_red_stem = sc_dat->red_stem; sc_decomp = sc_dat->decomp; best_energy = best_attainable_energy(fc, state); /* .. on remaining intervals */ /* no more pairs if too close to strand boundary ? */ if (i < ss[strand] + turn + 1) { state->partial_energy += fms3[strand][i]; if (env->nopush) { push_back(env->Stack, state); env->nopush = false; } return; } if ((evaluate(start, i, start, i - 1, VRNA_DECOMP_EXT_EXT, hc_dat)) && (fms3[strand][i - 1] != INF)) { element_energy = 0; if (sc_red_ext) element_energy += sc_red_ext(start, i, start, i - 1, sc_dat); if (fms3[strand][i - 1] + element_energy + best_energy <= threshold) /* no basepair, nibbling of 5'-end */ fork_state(i - 1, strand, state, element_energy, 5, env); } if (evaluate(start, i, start, i, VRNA_DECOMP_EXT_STEM, hc_dat)) { type = vrna_get_ptype(indx[i] + start, ptype); switch (dangle_model) { case 2: s5 = -1; s3 = ((i < length) && (sn[i] == sn[i + 1])) ? S1[i + 1] : -1; break; default: s5 = s3 = -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc_red_stem) element_energy += sc_red_stem(start, i, start, i, sc_dat); if (c[indx[i] + start] + element_energy + best_energy <= threshold) { repeat(fc, start, i, state, element_energy, 0, best_energy, threshold, env, constraints_dat); } } if ((with_gquad) && (ggg[indx[i] + start] != INF)) { element_energy = 0; if (sc_red_stem) element_energy += sc_red_stem(start, i, start, i, sc_dat); if (ggg[indx[i] + start] + element_energy + best_energy <= threshold) { repeat_gquad(fc, start, i, state, element_energy, 0, best_energy, threshold, env, constraints_dat); } } for (k = start; k < i - turn; k++) { if ((with_gquad) && (fms3[strand][k] != INF) && (ggg[indx[i] + k + 1] != INF)) { element_energy = 0; if (sc_decomp) element_energy += sc_decomp(start, i, k, k + 1, sc_dat); if (sc_red_stem) element_energy += sc_red_stem(k + 1, i, k + 1, i, sc_dat); if (fms3[strand][k] + ggg[indx[i] + k + 1] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(k, strand, state, 0, 5); env->nopush = false; repeat_gquad(fc, k + 1, i, temp_state, element_energy, fms3[strand][k], best_energy, threshold, env, constraints_dat); free_state_node(temp_state); } } if (evaluate(start, i, k, k + 1, VRNA_DECOMP_EXT_EXT_STEM, hc_dat)) { type = vrna_get_ptype(indx[i] + k + 1, ptype); switch (dangle_model) { case 2: s5 = (sn[k] == sn[k + 1]) ? S1[k] : -1; s3 = ((i < length) && (sn[i] == sn[i + 1])) ? S1[i + 1] : -1; break; default: s5 = s3 = -1; break; } element_energy = vrna_E_ext_stem(type, s5, s3, P); if (sc_decomp) element_energy += sc_decomp(start, i, k, k + 1, sc_dat); if (sc_red_stem) element_energy += sc_red_stem(k + 1, i, k + 1, i, sc_dat); if (fms3[strand][k] + c[indx[i] + k + 1] + element_energy + best_energy <= threshold) { temp_state = derive_new_state(k, strand, state, 0, 5); env->nopush = false; repeat(fc, k + 1, i, temp_state, element_energy, fms3[strand][k], best_energy, threshold, env, constraints_dat); free_state_node(temp_state); } } } } PRIVATE INLINE void scan_gquad(vrna_fold_compound_t *fc, int i, int j, int threshold, STATE *state, subopt_env *env, constraint_helpers *constraints_dat) { int best_energy; best_energy = best_attainable_energy(fc, state); /* .. on remaining intervals */ /* we have a gquad */ repeat_gquad(fc, i, j, state, 0, 0, best_energy, threshold, env, constraints_dat); if (env->nopush) vrna_message_warning("%d,%d\nOops, no solution in gquad-repeat!", i, j); } PRIVATE void repeat_gquad(vrna_fold_compound_t *fc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env, constraint_helpers *constraints_dat) { short *S1; unsigned int *sn; int *ggg, *indx, element_energy, cnt, *L, *l, num_gquads; vrna_param_t *P; indx = fc->jindx; sn = fc->strand_number; ggg = fc->matrices->ggg; S1 = fc->sequence_encoding; P = fc->params; /* find all gquads that fit into the energy range and the interval [i,j] */ STATE *new_state; best_energy += part_energy; /* energy of current structural element */ best_energy += temp_energy; /* energy from unpushed interval */ if (sn[i] == sn[j]) { element_energy = ggg[indx[j] + i]; if ((element_energy != INF) && (element_energy + best_energy <= threshold)) { /* find out how many gquads we might expect in the interval [i,j] */ num_gquads = get_gquad_count(S1, i, j); num_gquads++; L = (int *)vrna_alloc(sizeof(int) * num_gquads); l = (int *)vrna_alloc(sizeof(int) * num_gquads * 3); L[0] = -1; get_gquad_pattern_exhaustive(S1, i, j, P, L, l, threshold - best_energy); for (cnt = 0; L[cnt] != -1; cnt++) { new_state = copy_state(state); make_gquad(i, L[cnt], &(l[3 * cnt]), new_state); new_state->partial_energy += part_energy; new_state->partial_energy += element_energy; /* new_state->best_energy = * hairpin[unpaired] + element_energy + best_energy; */ push(env->Stack, new_state); env->nopush = false; } free(L); free(l); } } best_energy -= part_energy; best_energy -= temp_energy; return; } PRIVATE void repeat(vrna_fold_compound_t *fc, int i, int j, STATE *state, int part_energy, int temp_energy, int best_energy, int threshold, subopt_env *env, constraint_helpers *constraints_dat) { /* * routine to find stacks, bulges, internal loops and multiloops * within interval closed by basepair i,j */ char *ptype; short *S1; unsigned int n, *sn, *se, nick; int ij, k, p, q, energy, new, mm, no_close, type, type_2, element_energy, *c, *fML, *fM1, *ggg, **fms5, **fms3, rt, *indx, *rtype, noGUclosure, noLP, with_gquad, dangle_model, turn, minq, eee, aux_eee, cnt, *ps, *qs, *en, tmp_en; vrna_param_t *P; vrna_md_t *md; vrna_hc_t *hc; struct hc_int_def_dat *hc_dat_int; struct hc_ext_def_dat *hc_dat_ext; struct hc_mb_def_dat *hc_dat_mb; eval_hc *evaluate_int; vrna_callback_hc_evaluate *evaluate_ext; vrna_callback_hc_evaluate *evaluate_mb; struct sc_int_dat *sc_dat_int; struct sc_mb_dat *sc_dat_mb; sc_int_cb *sc_int_pair; sc_mb_pair_cb *sc_mb_pair; sc_mb_red_cb *sc_mb_decomp_ml; STATE *new_state; n = fc->length; S1 = fc->sequence_encoding; ptype = fc->ptype; indx = fc->jindx; sn = fc->strand_number; se = fc->strand_end; P = fc->params; md = &(P->model_details); rtype = &(md->rtype[0]); noGUclosure = md->noGUclosure; noLP = md->noLP; with_gquad = md->gquad; dangle_model = md->dangles; turn = md->min_loop_size; c = fc->matrices->c; fML = fc->matrices->fML; fM1 = fc->matrices->fM1; ggg = fc->matrices->ggg; fms5 = fc->matrices->fms5; fms3 = fc->matrices->fms3; hc = fc->hc; hc_dat_ext = &(constraints_dat->hc_dat_ext); hc_dat_int = &(constraints_dat->hc_dat_int); hc_dat_mb = &(constraints_dat->hc_dat_mb); evaluate_ext = constraints_dat->hc_eval_ext; evaluate_int = constraints_dat->hc_eval_int; evaluate_mb = constraints_dat->hc_eval_mb; sc_dat_int = &(constraints_dat->sc_dat_int); sc_dat_mb = &(constraints_dat->sc_dat_mb); sc_int_pair = constraints_dat->sc_dat_int.pair; sc_mb_pair = constraints_dat->sc_dat_mb.pair; sc_mb_decomp_ml = constraints_dat->sc_dat_mb.decomp_ml; ij = indx[j] + i; type = vrna_get_ptype(ij, ptype); /* * if (type==0) fprintf(stderr, "repeat: Warning: %d %d can't pair\n", i,j); */ no_close = (((type == 3) || (type == 4)) && noGUclosure); if ((noLP) && (i + turn + 2 < j)) { /* always consider the structure with additional stack */ if (evaluate_int(i, j, i + 1, j - 1, hc_dat_int)) { type_2 = rtype[vrna_get_ptype(indx[j - 1] + i + 1, ptype)]; energy = 0; energy = E_IntLoop(0, 0, type, type_2, S1[i + 1], S1[j - 1], S1[i + 1], S1[j - 1], P); if (sc_int_pair) { energy += sc_int_pair(i, j, i + 1, j - 1, sc_dat_int); } new_state = derive_new_state(i + 1, j - 1, state, part_energy + energy, 2); make_pair(i, j, new_state); make_pair(i + 1, j - 1, new_state); /* new_state->best_energy = new + best_energy; */ push(env->Stack, new_state); env->nopush = false; if (i == 1 || state->structure[i - 2] != '(' || state->structure[j] != ')') /* adding a stack is the only possible structure */ return; } } best_energy += part_energy; /* energy of current structural element */ best_energy += temp_energy; /* energy from unpushed interval */ if (hc->mx[n * i + j] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP) { for (p = i + 1; p <= MIN2(j - 2 - turn, i + MAXLOOP + 1); p++) { minq = j - i + p - MAXLOOP - 2; if (minq < p + 1 + turn) minq = p + 1 + turn; if (hc->up_int[i + 1] < (p - i - 1)) break; for (q = j - 1; q >= minq; q--) { if (hc->up_int[q + 1] < (j - q - 1)) break; /* skip stack if noLP, since we've already processed it above */ if ((noLP) && (p == i + 1) && (q == j - 1)) continue; if (!(hc->mx[n * p + q] & VRNA_CONSTRAINT_CONTEXT_INT_LOOP_ENC)) continue; if (c[indx[q] + p] == INF) continue; type_2 = vrna_get_ptype(indx[q] + p, ptype); if (noGUclosure) if (no_close || (type_2 == 3) || (type_2 == 4)) if ((p > i + 1) || (q < j - 1)) continue; if (evaluate_int(i, j, p, q, hc_dat_int)) { energy = E_IntLoop(p - i - 1, j - q - 1, type, rtype[type_2], S1[i + 1], S1[j - 1], S1[p - 1], S1[q + 1], P); new = energy + c[indx[q] + p]; if (sc_int_pair) energy += sc_int_pair(i, j, p, q, sc_dat_int); new = energy + c[indx[q] + p]; if (new + best_energy <= threshold) /* stack, bulge, or interior loop */ fork_int_state(i, j, p, q, state, part_energy + energy, env); } /*end of if block */ } /* end of q-loop */ } /* end of p-loop */ } /* base pair (i,j) encloses a loop with strand nick? */ if ((sn[i] != sn[j]) && (evaluate_ext(i, j, i, j, VRNA_DECOMP_EXT_STEM, hc_dat_ext))) { rt = rtype[type]; element_energy = P->DuplexInit; switch (dangle_model) { case 0: element_energy += vrna_E_ext_stem(rt, -1, -1, P); break; default: element_energy += vrna_E_ext_stem(rt, (sn[j - 1] == sn[j]) ? S1[j - 1] : -1, (sn[i] == sn[i + 1]) ? S1[i + 1] : -1, P); break; } if (sn[i] != sn[i + 1]) { if ((sn[j - 1] != sn[j]) && (i + 1 == j)) { if (element_energy + best_energy <= threshold) { fork_state_pair(i, j, state, part_energy + element_energy, env); } } else if (sn[j - 1] == sn[j]) { if (fms3[sn[i + 1]][j - 1] + element_energy + best_energy <= threshold) { /* continue backtracking in fms3[sn[i + 1]][j - 1] */ fork_state_pair_interval(i, j, j - 1, sn[i + 1], state, part_energy + element_energy, 5, env); } } } else if (sn[j - 1] != sn[j]) { if (fms5[sn[j - 1]][i + 1] + element_energy + best_energy <= threshold) { /* continue backtracking in fms5[sn[j - 1]][i + 1] */ fork_state_pair_interval(i, j, i + 1, sn[j - 1], state, part_energy + element_energy, 4, env); } } else { energy = 0; if (se[sn[i]] > i) energy += fms5[sn[i]][i + 1]; if (j - 1 > se[sn[i]]) energy += fms3[sn[se[sn[i]] + 1]][j - 1]; if (energy + element_energy + best_energy <= threshold) { fork_two_states_pair_ms(i, j, sn[i], sn[se[sn[i]] + 1], state, part_energy + element_energy, env); } nick = se[sn[i]] + 1; while (sn[nick] != sn[j]) { energy = 0; if (i + 1 <= se[sn[nick]]) energy += fms5[sn[nick]][i + 1]; if (se[sn[nick]] + 1 <= j - 1) energy += fms3[sn[se[sn[nick]] + 1]][j - 1]; if (energy + element_energy + best_energy <= threshold) { fork_two_states_pair_ms(i, j, sn[nick], sn[se[sn[nick]] + 1], state, part_energy + element_energy, env); } nick = se[sn[nick]] + 1; } } } mm = P->MLclosing; rt = rtype[type]; if (evaluate_mb(i, j, i + 1, j - 1, VRNA_DECOMP_PAIR_ML, hc_dat_mb)) { element_energy = mm; switch (dangle_model) { case 0: element_energy = E_MLstem(rt, -1, -1, P) + mm; break; default: element_energy = E_MLstem(rt, S1[j - 1], S1[i + 1], P) + mm; break; } if (sc_mb_pair) element_energy += sc_mb_pair(i, j, sc_dat_mb); /* multiloop decomposition */ for (k = i + turn + 2; k <= j - turn - 2; k++) { if (evaluate_mb(i + 1, j - 1, k - 1, k, VRNA_DECOMP_ML_ML_ML, hc_dat_mb)) { eee = fML[indx[k - 1] + i + 1]; if ((eee != INF) && (fM1[indx[j - 1] + k] != INF)) { eee += fM1[indx[j - 1] + k] + best_energy; aux_eee = element_energy; if (sc_mb_decomp_ml) aux_eee += sc_mb_decomp_ml(i + 1, j - 1, k - 1, k, sc_dat_mb); if ((eee + aux_eee) <= threshold) fork_two_states_pair(i, j, k, state, part_energy + aux_eee, 1, 3, env); } } } } if (sn[i] == sn[j]) { if (!no_close) { element_energy = vrna_E_hp_loop(fc, i, j); if (element_energy != INF) { if (element_energy + best_energy <= threshold) /* hairpin structure */ fork_state_pair(i, j, state, part_energy + element_energy, env); } } if (with_gquad) { /* now we have to find all loops where (i,j) encloses a gquad in an interior loops style */ ps = qs = en = NULL; en = E_GQuad_IntLoop_exhaustive(i, j, &ps, &qs, type, S1, ggg, threshold - best_energy, indx, P); for (cnt = 0; ps[cnt] != -1; cnt++) { if ((hc->up_int[i + 1] >= ps[cnt] - i - 1) && (hc->up_int[qs[cnt] + 1] >= j - qs[cnt] - 1)) { tmp_en = en[cnt]; if (sc_int_pair) tmp_en += sc_int_pair(i, j, ps[cnt], qs[cnt], sc_dat_int); new_state = derive_new_state(ps[cnt], qs[cnt], state, tmp_en + part_energy, 6); make_pair(i, j, new_state); /* new_state->best_energy = new + best_energy; */ push(env->Stack, new_state); env->nopush = false; } } free(en); free(ps); free(qs); } } best_energy -= part_energy; best_energy -= temp_energy; return; } PRIVATE void old_subopt_print(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; if (structure && d->fp) { char *e_string = vrna_strdup_printf(" %6.2f", energy); print_structure(d->fp, structure, e_string); free(e_string); } } PRIVATE void old_subopt_store(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; /* store solution */ if (d->n_sol + 1 == d->max_sol) { d->max_sol *= 2; d->SolutionList = (vrna_subopt_solution_t *)vrna_realloc(d->SolutionList, d->max_sol * sizeof(vrna_subopt_solution_t)); } if (structure) { d->SolutionList[d->n_sol].energy = energy; d->SolutionList[d->n_sol++].structure = strdup(structure); } else { d->SolutionList[d->n_sol].energy = 0; d->SolutionList[d->n_sol++].structure = NULL; } } PRIVATE void old_subopt_store_compressed(const char *structure, float energy, void *data) { struct old_subopt_dat *d = (struct old_subopt_dat *)data; /* store solution */ if (d->n_sol + 1 == d->max_sol) { d->max_sol *= 2; d->SolutionList = (vrna_subopt_solution_t *)vrna_realloc(d->SolutionList, d->max_sol * sizeof(vrna_subopt_solution_t)); } if (structure) { d->SolutionList[d->n_sol].energy = energy; if (d->cp > 0) { int cp = d->cp; char *s = vrna_cut_point_remove(structure, &cp); d->SolutionList[d->n_sol++].structure = vrna_db_pack(s); free(s); } else { d->SolutionList[d->n_sol++].structure = vrna_db_pack(structure); } } else { d->SolutionList[d->n_sol].energy = 0; d->SolutionList[d->n_sol++].structure = NULL; } } /* * ########################################### * # deprecated functions below # *########################################### */ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY PUBLIC SOLUTION * subopt(char *seq, char *structure, int delta, FILE *fp) { return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 0, fp); } PUBLIC SOLUTION * subopt_circ(char *seq, char *structure, int delta, FILE *fp) { return wrap_subopt(seq, structure, NULL, delta, fold_constrained, 1, fp); } PUBLIC SOLUTION * subopt_par(char *seq, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp) { return wrap_subopt(seq, structure, parameters, delta, is_constrained, is_circular, fp); } PRIVATE SOLUTION * wrap_subopt(char *string, char *structure, vrna_param_t *parameters, int delta, int is_constrained, int is_circular, FILE *fp) { vrna_fold_compound_t *fc; vrna_param_t *P; char *seq; #ifdef _OPENMP /* Explicitly turn off dynamic threads */ omp_set_dynamic(0); #endif /* we need the parameter structure for hard constraints */ if (parameters) { P = vrna_params_copy(parameters); } else { vrna_md_t md; set_model_details(&md); md.temperature = temperature; P = vrna_params(&md); } P->model_details.circ = is_circular; P->model_details.uniq_ML = uniq_ML = 1; /* * what about cofold sequences here? Is it safe to call the below cut_point_insert() ? * dirty hack to reinsert the '&' according to the global variable 'cut_point' */ seq = vrna_cut_point_insert(string, cut_point); fc = vrna_fold_compound(seq, &(P->model_details), ((is_circular == 0) ? VRNA_OPTION_HYBRID : VRNA_OPTION_DEFAULT)); if (parameters) { /* replace params if necessary */ free(fc->params); fc->params = P; } else { free(P); } /* handle hard constraints in pseudo dot-bracket format if passed via simple interface */ if (is_constrained && structure) { unsigned int constraint_options = 0; constraint_options |= VRNA_CONSTRAINT_DB | VRNA_CONSTRAINT_DB_PIPE | VRNA_CONSTRAINT_DB_DOT | VRNA_CONSTRAINT_DB_X | VRNA_CONSTRAINT_DB_ANG_BRACK | VRNA_CONSTRAINT_DB_RND_BRACK | VRNA_CONSTRAINT_DB_INTRAMOL | VRNA_CONSTRAINT_DB_INTERMOL; vrna_constraints_add(fc, (const char *)structure, constraint_options); } if (backward_compat_compound && backward_compat) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = fc; backward_compat = 1; /* cleanup */ free(seq); return vrna_subopt(fc, delta, subopt_sorted, fp); } #endif /* * --------------------------------------------------------------------------- * Well, that is the end!---------------------------------------------------- *--------------------------------------------------------------------------- */
piCalc.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include <math.h> #define MARGIN 1e-12 void Usage(char* prog_name); int sequential(long long points); int tasks(long long points); int glob_sum_seq; int glob_sum_par; int main(){ printf("\n\n 1000000 points"); printf("\nSEQUENTIAL\n"); sequential(1000000); printf("\nPARALLEL\n"); tasks(1000000); if(abs(glob_sum_seq - glob_sum_par) <= MARGIN) printf("\nTest PASSED \n"); else printf("\nTest FAILED \n"); printf("\n\n 10000000 points"); printf("\nSEQUENTIAL\n"); sequential(10000000); printf("\nPARALLEL\n"); tasks(10000000); if(abs(glob_sum_seq - glob_sum_par) <= MARGIN) printf("\nTest PASSED \n"); else printf("\nTest FAILED \n"); printf("\n\n 100000000 points"); printf("\nSEQUENTIAL\n"); sequential(100000000); printf("\nPARALLEL\n"); tasks(100000000); if(abs(glob_sum_seq - glob_sum_par) <= MARGIN) printf("\nTest PASSED \n"); else printf("\nTest FAILED \n"); printf("\n\n 1000000000 points"); printf("\nSEQUENTIAL\n"); sequential(1000000000); printf("\nPARALLEL\n"); tasks(1000000000); if(abs(glob_sum_seq - glob_sum_par) <= MARGIN) printf("\nTest PASSED \n"); else printf("\nTest FAILED \n"); printf("\n\n 10000000000 points"); printf("\nSEQUENTIAL\n"); sequential(10000000000); printf("\nPARALLEL\n"); tasks(10000000000); if(abs(glob_sum_seq - glob_sum_par) <= MARGIN) printf("\nTest PASSED \n"); else printf("\nTest FAILED \n"); } int sequential(long long points) { long long n, i,j, gran; double factor; double sum = 0.0; n = points; double timeStart = omp_get_wtime(); printf("Before for loop, factor = %f.\n", factor); for (i = 0; i < n; i++) { factor = (i % 2 == 0) ? 1.0 : -1.0; sum += factor/(2*i+1); } printf("After for loop, factor = %f.\n", factor); sum = 4.0*sum; printf("With n = %lld terms\n", n); printf(" Our estimate of pi = %.14f\n", sum); printf(" Ref estimate of pi = %.14f\n", 4.0*atan(1.0)); double timeStop = omp_get_wtime(); printf("Elapsed time: %f", timeStop - timeStart); return 0; } int tasks(long long points) { long long n, i,j, gran; double factor; double sum = 0.0; n = points; double timeStart = omp_get_wtime(); printf("Before for loop, factor = %f.\n", factor); #pragma omp parallel \ private(i, factor) \ shared(sum,n) { #pragma omp single { gran = n / omp_get_num_threads(); for (j = 0; j < (n+gran-1)/gran; j++) { #pragma omp task { double tempSum = 0.0; for (i = 0; i < gran; i++) { factor = ((j*gran+i) % 2 == 0) ? 1.0 : -1.0; tempSum += factor/(2*(j*gran+i)+1); } #pragma omp atomic sum += tempSum; } } } } printf("After for loop, factor = %f.\n", factor); sum = 4.0*sum; printf("With n = %lld terms\n", n); printf(" Our estimate of pi = %.14f\n", sum); printf(" Ref estimate of pi = %.14f\n", 4.0*atan(1.0)); double timeStop = omp_get_wtime(); printf("Elapsed time: %f", timeStop - timeStart); return 0; } void Usage(char* prog_name) { fprintf(stderr, "usage: %s <thread_count> <n>\n", prog_name); fprintf(stderr, " n is the number of terms and should be >= 1\n"); exit(0); }
cfae4d_so8_gcc.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; struct profiler { double section0; double section1; }; void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw); int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads, const int nthreads_nonaffine, struct profiler *timers) { int(*restrict block_sizes) __attribute__((aligned(64))) = (int(*))block_sizes_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); //for (int time = time_m, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3); time <= time_M; time += 1, t0 = (time + 2)%(3), t1 = (time)%(3), t2 = (time + 1)%(3)) //{ int sf = 4; int t_blk_size = 2 * sf * (time_M - time_m); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t0 = (time) % (3), t1 = (time + 2) % (3), t2 = (time + 1) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1))) % (3), t1 = (((time / sf) % (time_M - time_m + 1)) + 2 ) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); bf0(damp_vec, dt, u_vec, vp_vec, nnz_sp_source_mask_vec, sp_source_mask_vec, save_src_u_vec, source_id_vec, source_mask_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, sp_zi_m, nthreads, xb, yb, xb_size, yb_size, time, tw); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, x0_blk0_size, x_M - (x_M - x_m + 1) % (x0_blk0_size), x_m, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, y0_blk0_size, y_M - (y_M - y_m + 1) % (y0_blk0_size), y_m, z_M, z_m, nthreads); //bf0(damp_vec, dt, u_vec, vp_vec, t0, t1, t2, (x_M - x_m + 1) % (x0_blk0_size), x_M, x_M - (x_M - x_m + 1) % (x0_blk0_size) + 1, (y_M - y_m + 1) % (y0_blk0_size), y_M, y_M - (y_M - y_m + 1) % (y0_blk0_size) + 1, z_M, z_m, nthreads); } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec - start_section0.tv_sec) + (double)(end_section0.tv_usec - start_section0.tv_usec) / 1000000; } for (int time = time_m, t2 = (time + 1) % (3); time <= time_M; time += 1, t2 = (time + 1) % (3)) { struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec - start_section1.tv_sec) + (double)(end_section1.tv_usec - start_section1.tv_usec) / 1000000; } return 0; } void bf0(struct dataobj *restrict damp_vec, const float dt, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict save_src_u_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, const int t0, const int t1, const int t2, const int x0_blk0_size, const int x_M, const int x_m, const int y0_blk0_size, const int y_M, const int y_m, const int z_M, const int z_m, const int sp_zi_m, const int nthreads, const int xb, const int yb, const int xb_size, const int yb_size, const int time, const int tw) { float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; float(*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__((aligned(64))) = (float(*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]])u_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src_u)[save_src_u_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_u_vec->size[1]])save_src_u_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; int(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; if (x0_blk0_size == 0 || y0_blk0_size == 0) { return; } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, u, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r8 = 1.0/dt; float r7 = 1.0/(dt*dt); float r6 = 1.0/(vp[x - time + 8][y - time + 8][z + 8]*vp[x - time + 8][y - time + 8][z + 8]); u[t2][x - time + 8][y - time + 8][z + 8] = (r6*(-r7*(-2.0F*u[t0][x - time + 8][y - time + 8][z + 8] + u[t1][x - time + 8][y - time + 8][z + 8])) + r8*(damp[x - time + 1][y - time + 1][z + 1]*u[t0][x - time + 8][y - time + 8][z + 8]) - 7.93650813e-6F*(u[t0][x - time + 4][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 4][z + 8] + u[t0][x - time + 8][y - time + 8][z + 4] + u[t0][x - time + 8][y - time + 8][z + 12] + u[t0][x - time + 8][y - time + 12][z + 8] + u[t0][x - time + 12][y - time + 8][z + 8]) + 1.12874782e-4F*(u[t0][x - time + 5][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 5][z + 8] + u[t0][x - time + 8][y - time + 8][z + 5] + u[t0][x - time + 8][y - time + 8][z + 11] + u[t0][x - time + 8][y - time + 11][z + 8] + u[t0][x - time + 11][y - time + 8][z + 8]) - 8.8888891e-4F*(u[t0][x - time + 6][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 6][z + 8] + u[t0][x - time + 8][y - time + 8][z + 6] + u[t0][x - time + 8][y - time + 8][z + 10] + u[t0][x - time + 8][y - time + 10][z + 8] + u[t0][x - time + 10][y - time + 8][z + 8]) + 7.11111128e-3F*(u[t0][x - time + 7][y - time + 8][z + 8] + u[t0][x - time + 8][y - time + 7][z + 8] + u[t0][x - time + 8][y - time + 8][z + 7] + u[t0][x - time + 8][y - time + 8][z + 9] + u[t0][x - time + 8][y - time + 9][z + 8] + u[t0][x - time + 9][y - time + 8][z + 8]) - 3.79629639e-2F*u[t0][x - time + 8][y - time + 8][z + 8])/(r6*r7 + r8*damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, u, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src_u[tw][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; u[t2][x - time + 8][y - time + 8][zind + 8] += r0; } } } } } } }
smul.c
/* This file is part of ParTI!. ParTI! is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ParTI! is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ParTI!. If not, see <http://www.gnu.org/licenses/>. */ #include <ParTI.h> int sptSparseTensorMulScalar(sptSparseTensor *X, sptValue const a) { if(a != 0) { sptNnzIndex i; #pragma omp parallel for schedule(static) for(i = 0; i < X->nnz; ++i) { X->values.data[i] *= a; } } else { X->nnz = 0; X->values.len = 0; } return 0; }
asp.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #define N 2000 #define MAXD 42 int tab[N][N]__attribute__((aligned (32))); void asp() { for (int k = 0; k < N; ++k) { for (int i = 0; i < N; ++i) { if (i != k) { for (int j = 0; j < N; ++j) { int tmp = tab[i][k] + tab[k][j]; if (tmp < tab[i][j]) { tab[i][j] = tmp; } } } } } } void asp_parallel_k() { #pragma omp parallel for for (int k = 0; k < N; ++k) { for (int i = 0; i < N; ++i) { if (i != k) { for (int j = 0; j < N; ++j) { int tmp = tab[i][k] + tab[k][j]; if (tmp < tab[i][j]) { tab[i][j] = tmp; } } } } } } void asp_parallel_i() { for (int k = 0; k < N; ++k) { #pragma omp parallel for for (int i = 0; i < N; ++i) { if (i != k) { for (int j = 0; j < N; ++j) { int tmp = tab[i][k] + tab[k][j]; if (tmp < tab[i][j]) { tab[i][j] = tmp; } } } } } } void asp_swap_parallel_i() { #pragma omp parallel for for (int i = 0; i < N; ++i) { for (int k = 0; k < N; ++k) { if (i != k) { for (int j = 0; j < N; ++j) { int tmp = tab[i][k] + tab[k][j]; if (tmp < tab[i][j]) { tab[i][j] = tmp; } } } } } } void test(const char* func_name, void f()) { // populate matrix with random numbers for (size_t i = 0; i < N - 1; i++) { for (size_t j = 0; j < N - 1; j++) { if (j == i) tab[i][j] = 0; else tab[i][j] = 1 + rand() % MAXD; } } double time = omp_get_wtime(); f(); printf("%s: %fs\n", func_name, omp_get_wtime() - time); } int main(void) { time_t t; srand((unsigned) time(&t)); test("asp", asp); test("asp_parallel_k", asp_parallel_k); test("asp_parallel_i", asp_parallel_i); test("asp_swap_parallel_i", asp_swap_parallel_i); return 0; }
openmp_utils.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // #ifndef KRATOS_OPENMP_UTILS_H #define KRATOS_OPENMP_UTILS_H #include <stdio.h> #include <vector> #include <iostream> #ifdef _OPENMP #include <omp.h> #else #include <ctime> #endif namespace Kratos { ///@addtogroup KratosCore ///@{ ///@name Kratos Classes ///@{ /// Implements basic tasks for OpenMP parallelism and suitable scalar alternatives /** This class defines utility functions that implement some basic OpenMP capabilities and an equivalent scalar alternative to use in compilations where OpenMP is not enabled. The idea is to allow Kratos developers to design their code in parallel, knowing that it will work in scalar runs as well. */ class OpenMPUtils { public: ///@name Type definitions ///@{ /// Vector type for the output of DivideInPartitions method /** * @see OpenMPUtils::DivideInPartitions */ typedef std::vector<int> PartitionVector; ///@} ///@name Operations ///@{ /// Wrapper for omp_get_max_threads(). /** @return Maximum number of OpenMP threads that will be used in parallel regions. */ static inline int GetNumThreads() { #ifdef _OPENMP return omp_get_max_threads(); #else return 1; #endif } /// Wrapper for omp_get_num_threads(). /** @return Number of OpenMP threads in the current team. */ static int GetCurrentNumberOfThreads() { #ifdef _OPENMP return omp_get_num_threads(); #else return 1; #endif } /// Wrapper for omp_get_num_procs(). /** @return Number of processors available to the device. */ static int GetNumberOfProcessors() { #ifdef _OPENMP return omp_get_num_procs(); #else return 1; #endif } /// Wrapper for omp_get_dynamic(). /** @return Dynamic teams are enabled. */ static int IsDynamic() { #ifdef _OPENMP return omp_get_dynamic(); #else return 0; #endif } /// Wrapper for omp_get_thread_num(). /** @return The thread number for this thread, 0 if scalar run. */ static inline int ThisThread() { #ifdef _OPENMP return omp_get_thread_num(); #else return 0; #endif } /// Wrapper for omp_in_parallel(). /** @return Maximum number of OpenMP threads that will be used in parallel regions. */ static inline int IsInParallel() { #ifdef _OPENMP return omp_in_parallel(); #else return 0; #endif } /// Timing routine. /** Determine the current time by calling an appropiate (scalar or parallel) timer class. @return Current time */ static double GetCurrentTime() { #ifndef _OPENMP return std::clock()/static_cast<double>(CLOCKS_PER_SEC); #else return omp_get_wtime(); #endif } /// Divide an array of length NumTerms between NumThreads threads. /** Creates a std::vector containing NumThreads + 1 terms, where term k is the first and position of the array that corresponds to thread k. The k+1 term is the end of the array, so that the vector can be used to iterate the array between 'k' and 'k+1' in each thread. @param NumTerms Number of objects to be divided between the threads. @param NumThreads The number of parallel threads that will be used. @param Partitions This object will contain the begin and end positions for each thread. */ static inline void DivideInPartitions( const int NumTerms, const int NumThreads, PartitionVector& Partitions) { Partitions.resize(NumThreads + 1); int PartitionSize = NumTerms / NumThreads; Partitions[0] = 0; Partitions[NumThreads] = NumTerms; for(int i = 1; i < NumThreads; i++) Partitions[i] = Partitions[i-1] + PartitionSize ; } /// Generate a partition for an std::vector-like array, providing iterators to the begin and end positions for each thread. /** This function assumes that the vector class will have an iterator type and implement begin(), end() and size() methods. * @param rVector An arary containing the elements to be distributed between the threads. * @param rBegin Iterator pointing to the first element in rVector to be used in the current thread. * @param rEnd Iterator pointing to the end position for the current thread in rVector. */ template< class TVector > static void PartitionedIterators(TVector& rVector, typename TVector::iterator& rBegin, typename TVector::iterator& rEnd) { #ifdef _OPENMP int NumTerms = rVector.size(); int ThreadNum = omp_get_thread_num(); int NumThreads = omp_get_max_threads(); int PartitionSize = NumTerms / NumThreads; // Set Partition start rBegin = rVector.begin() + ThreadNum * PartitionSize; // Partition ends after 'PartitionSize' terms, except if this is the last partition if ( (ThreadNum + 1) != NumThreads ) rEnd = rBegin + PartitionSize; else rEnd = rVector.end(); #else rBegin = rVector.begin(); rEnd = rVector.end(); #endif } /// A function to set the number of threads from Python. /** This is an auxiliary mainly intended for test purposes, to help with the detection of race conditions. @param NumThreads Number of threads to use in parallel regions. Note that values greater than the environment variable OMP_NUM_THREADS will be ignored. */ static inline void SetNumThreads(int NumThreads = 1) { #ifdef _OPENMP int procs = omp_get_num_procs(); if( procs < NumThreads ){ std::cout<<" WARNING: Maximimun number of threads is EXCEEDED "<<std::endl; /* Set thread number */ omp_set_num_threads(procs); std::cout<<" Number of Threads Set To : "<<procs<<std::endl; } else{ /* Set thread number */ omp_set_num_threads(NumThreads); } #endif } /** A method to print the OMP information */ static inline void PrintOMPInfo() { #ifdef _OPENMP int nthreads,tid, procs, maxt, inpar, dynamic, nested; /* Start parallel region */ #pragma omp parallel private(nthreads, tid) { /* Obtain thread number */ tid = omp_get_thread_num(); /* Only master thread does this */ if (tid == 0) { printf(" Thread %d getting environment info...\n", tid); /* Get environment information */ procs = omp_get_num_procs(); nthreads = omp_get_num_threads(); maxt = omp_get_max_threads(); inpar = omp_in_parallel(); //omp_set_dynamic(true); dynamic = omp_get_dynamic(); //omp_set_nested(true); nested = omp_get_nested(); /* Print environment information */ printf( " | ------------ OMP IN USE --------- |\n"); printf( " | Machine number of processors = %d |\n", procs); printf( " | Number of threads set = %d |\n", nthreads); printf( " | Max threads in use = %d |\n", maxt); printf( " | In parallel? = %d |\n", inpar); printf( " | Dynamic threads enabled? = %d |\n", dynamic); printf( " | Nested parallelism supported? = %d |\n", nested); printf( " | --------------------------------- |\n"); if( procs < nthreads ) std::cout<<" ( WARNING: Maximimun number of threads is EXCEEDED )"<<std::endl; } } #endif } template<class T> static inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, T& partitions) { partitions.resize(number_of_threads+1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for(unsigned int i = 1; i<number_of_threads; i++) partitions[i] = partitions[i-1] + partition_size ; } ///@} //Operations }; ///@} //Kratos classes ///@} addtogroup block } #endif /* KRATOS_OPENMP_UTILS_H */
DRB085-threadprivate-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A file-scope variable used within a function called by a parallel region. Use threadprivate to avoid data races. */ #include <stdio.h> #include <assert.h> int sum0=0, sum1=0; #pragma omp threadprivate(sum0) void foo (int i) { sum0=sum0+i; } int main() { int len=1000; int i, sum=0; #pragma omp parallel copyin(sum0) { #pragma omp for for (i=0;i<len;i++) { foo (i); } #pragma omp critical { sum= sum+sum0; } } /* reference calculation */ for (i=0;i<len;i++) { sum1=sum1+i; } printf("sum=%d; sum1=%d\n",sum,sum1); assert(sum==sum1); return 0; }
middle6r.c
/* * Date: 11 December 2015 * Contact: Thomas Peyrin - thomas.peyrin@gmail.com */ /* * Simulation of boomerang analysis for Skinny * Date: March 21, 2020 * Author: Hosein Hadipour * Contact: hsn.hadipour@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include <omp.h> #include <stdbool.h> // #define DEBUG 1 #define Nthreads 2 // Number of parallel threads utilized in this program #define NumOfExperiments 128 // Number of independent experiments // Table that encodes the parameters of the various Skinny versions: // (block size, key size, number of rounds) //Skinny-64-64: 32 rounds //Skinny-64-128: 36 rounds //Skinny-64-192: 40 rounds //Skinny-128-128: 40 rounds //Skinny-128-256: 48 rounds //Skinny-128-384: 56 rounds int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}}; // Packing of data is done as follows (state[i][j] stands for row i and column j): // 0 1 2 3 // 4 5 6 7 // 8 9 10 11 //12 13 14 15 // 4-bit Sbox const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15}; const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15}; // 8-bit Sbox const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff}; const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff}; // ShiftAndSwitchRows permutation const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12}; const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14}; // Tweakey permutation const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7}; const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1}; // round constants const unsigned char RC[62] = { 0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F, 0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B, 0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E, 0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A, 0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13, 0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28, 0x10, 0x20}; FILE *fic; void init_prng(int offset) { // unsigned int initial_seed = 0x5ED90662; // unsigned int initial_seed = 0x30051991; My birthday! unsigned int initial_seed = 10*time(NULL) + 11*offset; srand(initial_seed); // Initialization, should only be called once. int r = rand(); printf("[+] PRNG initialized to 0x%08X\n", initial_seed); } void display_matrix(unsigned char state[4][4], int ver) { int i; unsigned char input[16]; if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); for (i = 0; i < 8; i++) fprintf(fic, "%02x", input[i]); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; for (i = 0; i < 16; i++) fprintf(fic, "%02x", input[i]); } } void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int k; fprintf(fic, "S = "); display_matrix(state, ver); for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { fprintf(fic, " - TK%i = ", k + 1); display_matrix(keyCells[k], ver); } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the TWEAKEY permutation pos = TWEAKEY_P[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { //application of LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } } // Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function} void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver) { int i, j, k; unsigned char pos; unsigned char keyCells_tmp[3][4][4]; // update the subtweakey states with the permutation for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse TWEAKEY permutation pos = TWEAKEY_P_inv[j + 4 * i]; keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3]; } } } // update the subtweakey states with the LFSRs for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 2; i <= 3; i++) { for (j = 0; j < 4; j++) { //application of inverse LFSRs for TK updates if (k == 1) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80); } else if (k == 2) { if (versions[ver][0] == 64) keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1); else keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01); } } } } for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++) { for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { keyCells[k][i][j] = keyCells_tmp[k][i][j]; } } } // apply the subtweakey to the internal state for (i = 0; i <= 1; i++) { for (j = 0; j < 4; j++) { state[i][j] ^= keyCells[0][i][j]; if (2 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j]; else if (3 * versions[ver][0] == versions[ver][1]) state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j]; } } } // Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state void AddConstants(unsigned char state[4][4], int r) { state[0][0] ^= (RC[r] & 0xf); state[1][0] ^= ((RC[r] >> 4) & 0x3); state[2][0] ^= 0x2; } // apply the 4-bit Sbox void SubCell4(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4[state[i][j]]; } // apply the 4-bit inverse Sbox void SubCell4_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_4_inv[state[i][j]]; } // apply the 8-bit Sbox void SubCell8(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8[state[i][j]]; } // apply the 8-bit inverse Sbox void SubCell8_inv(unsigned char state[4][4]) { int i, j; for (i = 0; i < 4; i++) for (j = 0; j < 4; j++) state[i][j] = sbox_8_inv[state[i][j]]; } // Apply the ShiftRows function void ShiftRows(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the ShiftRows permutation pos = P[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the inverse ShiftRows function void ShiftRows_inv(unsigned char state[4][4]) { int i, j, pos; unsigned char state_tmp[4][4]; for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { //application of the inverse ShiftRows permutation pos = P_inv[j + 4 * i]; state_tmp[i][j] = state[pos >> 2][pos & 0x3]; } } for (i = 0; i < 4; i++) { for (j = 0; j < 4; j++) { state[i][j] = state_tmp[i][j]; } } } // Apply the linear diffusion matrix //M = //1 0 1 1 //1 0 0 0 //0 1 1 0 //1 0 1 0 void MixColumn(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { state[1][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[3][j] ^= state[2][j]; temp = state[3][j]; state[3][j] = state[2][j]; state[2][j] = state[1][j]; state[1][j] = state[0][j]; state[0][j] = temp; } } // Apply the inverse linear diffusion matrix void MixColumn_inv(unsigned char state[4][4]) { int j; unsigned char temp; for (j = 0; j < 4; j++) { temp = state[3][j]; state[3][j] = state[0][j]; state[0][j] = state[1][j]; state[1][j] = state[2][j]; state[2][j] = temp; state[3][j] ^= state[2][j]; state[2][j] ^= state[0][j]; state[1][j] ^= state[2][j]; } } // decryption function of Skinny void dec(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char dummy[4][4] = {{0}}; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } for (i = r - 1; i >= 0; i--) { AddKey(dummy, keyCells, ver); } #ifdef DEBUG fprintf(fic, "DEC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = r - 1; i >= 0; i--) { MixColumn_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey_inv(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) SubCell4_inv(state); else SubCell8_inv(state); #ifdef DEBUG fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } #ifdef DEBUG fprintf(fic, "DEC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // encryption function of Skinny void enc(unsigned char *input, const unsigned char *userkey, int ver, int r) { unsigned char state[4][4]; unsigned char keyCells[3][4][4]; int i; memset(keyCells, 0, 48); for (i = 0; i < 16; i++) { if (versions[ver][0] == 64) { if (i & 1) { state[i >> 2][i & 0x3] = input[i >> 1] & 0xF; keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF; } else { state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF; keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF; if (versions[ver][1] >= 128) keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF; if (versions[ver][1] >= 192) keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF; } } else if (versions[ver][0] == 128) { state[i >> 2][i & 0x3] = input[i] & 0xFF; keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF; if (versions[ver][1] >= 256) keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF; if (versions[ver][1] >= 384) keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF; } } #ifdef DEBUG fprintf(fic, "ENC - initial state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif for (i = 0; i < r; i++) { if (versions[ver][0] == 64) SubCell4(state); else SubCell8(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after SubCell: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddConstants(state, i); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddConstants: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif AddKey(state, keyCells, ver); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after AddKey: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif ShiftRows(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif MixColumn(state); #ifdef DEBUG fprintf(fic, "ENC - round %.2i - after MixColumn: ", i); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif } //The last subtweakey should not be added #ifdef DEBUG fprintf(fic, "ENC - final state: "); display_cipher_state(state, keyCells, ver); fprintf(fic, "\n"); #endif if (versions[ver][0] == 64) { for (i = 0; i < 8; i++) input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF); } else if (versions[ver][0] == 128) { for (i = 0; i < 16; i++) input[i] = state[i >> 2][i & 0x3] & 0xFF; } } // generate test vectors for all the versions of Skinny void TestVectors(int ver) { unsigned char p[16]; unsigned char c[16]; unsigned char k[48]; int n; for (n = 1; n < 10; n++) { int i; for (i = 0; i < (versions[ver][0] >> 3); i++) c[i] = p[i] = rand() & 0xff; for (i = 0; i < (versions[ver][0] >> 3); i++) printf("%02x", p[i]); printf("\n"); for (i = 0; i < (versions[ver][1] >> 3); i++) k[i] = rand() & 0xff; fprintf(fic, "TK = "); for (i = 0; i < (versions[ver][1] >> 3); i++) fprintf(fic, "%02x", k[i]); fprintf(fic, "\n"); fprintf(fic, "P = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", p[i]); fprintf(fic, "\n"); enc(c, k, ver, 10); fprintf(fic, "C = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n"); dec(c, k, ver, 10); fprintf(fic, "P' = "); for (i = 0; i < (versions[ver][0] >> 3); i++) fprintf(fic, "%02x", c[i]); fprintf(fic, "\n\n"); } } int boomerang(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *k1, unsigned char *k2, unsigned char *k3, unsigned char *k4) { int i; unsigned char p1[16], p2[16]; unsigned char c3[16], c4[16]; int num = 0; for (int t = 0; t < N3; t++) { // randomly choose p1 for (i = 0; i < (versions[ver][0] >> 3); i++) p1[i] = rand() & 0xff; // derive p2 for (i = 0; i < (versions[ver][0] >> 3); i++) p2[i] = p1[i] ^ dp[i]; enc(p1, k1, ver, r); enc(p2, k2, ver, r); // derive c3 for (i = 0; i < (versions[ver][0] >> 3); i++) c3[i] = p1[i] ^ dc[i]; // derive c4 for (i = 0; i < (versions[ver][0] >> 3); i++) c4[i] = p2[i] ^ dc[i]; dec(c3, k3, ver, r); dec(c4, k4, ver, r); bool flag = 1; for (i = 0; i < (versions[ver][0] >> 3); i++) if ((c3[i] ^ c4[i]) != dp[i]) flag = 0; if (flag) { num++; } } return num; } double send_boomerangs(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2) { // Parallel execution int NUM[N1]; int counter; printf("#Rounds: %d rounds\n", R); printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2)); // randomly choose k1 srand((unsigned)time(NULL)); unsigned char k1[48], k2[48], k3[48], k4[48]; // randomly choose k1 for (int i = 0; i < (versions[ver][1] >> 3); i++) k1[i] = rand() & 0xff; // derive k2 for (int i = 0; i < (versions[ver][1] >> 3); i++) k2[i] = k1[i] ^ dk1[i]; // derive k3 for (int i = 0; i < (versions[ver][1] >> 3); i++) k3[i] = k1[i] ^ dk2[i]; // derive k4 for (int i = 0; i < (versions[ver][1] >> 3); i++) k4[i] = k2[i] ^ dk2[i]; clock_t clock_timer; double wall_timer; clock_timer = clock(); wall_timer = omp_get_wtime(); omp_set_num_threads(N1); #pragma omp parallel for for (counter = 0; counter < N1; counter++) { int num = 0; int ID = omp_get_thread_num(); init_prng(ID); for (int j = 0; j < N2; j++) { num += boomerang(R, ver, N3, dp, dc, k1, k2, k3, k4); } NUM[ID] = num; } printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC); printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer); double sum = 0; double sum_temp = 1; for (int i = 0; i < N1; i++) sum += NUM[i]; printf("sum = %f\n", sum); sum_temp = (double)(N1 * N2 * N3) / sum; printf("2^(-%f)\n\n", log(sum_temp) / log(2)); printf("##########################\n"); return sum; } void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16]) { for (int i = 0; i < (versions[ver][0] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48]) { for (int i = 0; i < (versions[ver][1] >> 3); i++) { char hex[2]; hex[0] = hex_str[2 * i]; hex[1] = hex_str[2 * i + 1]; dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff); } } int main() { // srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand(); // init_prng(1); // //test all versions of Skinny // for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++) // { // sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]); // fic = fopen(name, "w"); // fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]); // TestVectors(i); // fclose(fic); // printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]); // } unsigned char dp[16]; unsigned char dc[16]; unsigned char dk1[48]; unsigned char dk2[48]; // ####################################################################################################### // ####################################################################################################### // ############################## User must change only the following lines ############################## int R = 6; // Number of rounds int ver = 5; // Determine the version: // [0 = Skinny-64-64] // [1 = Skinny-64-128] // [2 = Skinny-64-192] // [3 = Skinny-128-128] // [4 = Skinny-128-256] // [5 = Skinny-128-384] char dp_str[] = "00000000000000000000004000000000"; char dc_str[] = "00000000000000000000000000000000"; char dk1_str[] = "00000000000000000000000000002a00000000000000000000000000000099000000000000000000000000000000f300"; char dk2_str[] = "000000000000000000000054000000000000000000000000000000f30000000000000000000000000000007f00000000"; // ####################################################################################################### // ####################################################################################################### convert_hexstr_to_statearray(ver, dp_str, dp); convert_hexstr_to_statearray(ver, dc_str, dc); convert_hexstr_to_tweakarray(ver, dk1_str, dk1); convert_hexstr_to_tweakarray(ver, dk2_str, dk2); //########################## Number of queries ######################### int N1 = Nthreads; // Number of parallel threads : N1 int deg1 = 12; int deg2 = 12; int N2 = 1 << deg1; // Number of bunches per thread : N2 = 2^(deg) int N3 = 1 << deg2; // Number of queries per bunch : N3 //################### Number of total queries : N1*N2*N3 ############### char all_results[NumOfExperiments][20]; double sum = 0; double sum_temp = 0; for (int i = 0; i < NumOfExperiments; i++) { printf("Experiment Number %d:\n", i); sum_temp = send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2); sum += sum_temp; sum_temp = (double)(N1 * N2 * N3) / sum_temp; sprintf(all_results[i], "2^(-%0.2f), ", log(sum_temp) / log(2)); } printf("A summary of all results:\n"); for (int i = 0; i < NumOfExperiments; i++) { printf("%s", all_results[i]); } printf("\n##########################\nAverage = 2^(-%0.4f)\n", (log(NumOfExperiments) + log(N1) + log(N2) + log(N3) - log(sum))/log(2)); return 0; }
omp_overhead.c
// // omp_overhead.c // // Measures OMP overhead and speedup, also reports thread affinity status. // 1. Measure the time to execute a serial function // 2. Measure the time to execute the same serial function on individual // threads. The same amount of work is performed as in the single // thread case. (weak scaling) // 3. Reports task and thread affinity information. // * MPI Rank ID // * Physical PU ID, same as hardware thread ID, same as PU# from // lstopo -p output // * Physical core ID // * NUMA ID, same as socket ID // * Hostname // // Options: // --results Whether to output timing results (off by default) // --csv Output in simple csv format (off by default) // --mpi MPI ranks will print out their affinity info // --logical Display core and PU IDs using the logical indexing // --nth <int> Have the synthetic work function find the nth prime number #include "hwloc.h" #include <mpi.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/syscall.h> #include <getopt.h> #include <string.h> #include <math.h> #include "omp_common.h" // Given a physical PU id, return the logical PU id int lpu_from_pu(hwloc_topology_t, int); // Get a physical core ID from a processing unit (PU) ID int core_from_pu(hwloc_topology_t, int); // Get a logical core index from a logial PU index int lcore_from_pu(hwloc_topology_t , int ); // This is how we get the physical PU and node IDs. // rdtscp: Read Time-Stamp Counter and Processor ID IA assembly instruction // Introduced in Core i7 and newer. // Read 64-bit time-stamp counter and 32-bit IA32_TSC_AUX value into EDX:EAX and ECX. // The OS should write the core id into IA32_TSC_AUX. Linux does this, as well as // storing the NUMA region id // if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP)) // write_rdtscp_aux((node << 12) | cpu); // // /* // * Store cpu number in limit so that it can be loaded quickly // * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node) // */ // // This makes it the easist way to determine which numa region (socket) and which // core (actually hardware thread ID in the case of hyperthreading) a process is running on. // // This is a generic TSC reader if the compiler does not provide an intrinsic for it. // (The Intel intrisic is simply __rdtscp.) // #ifdef __INTEL64_TSC__ unsigned long generic_rdtscp(int *pu_id, int *numa_id) { unsigned int a, d, c; __asm__ volatile("rdtscp" : "=a" (a), "=d" (d), "=c" (c)); *numa_id = (c & 0xFFF000)>>12; // Mask off lower bits and then shift to get numa_id *pu_id = c & 0xFFF; // Mask off higher bits and then read pu_id from lower 8 bits return ( (unsigned long)a ) | ( ((unsigned long)d ) << 32 );; } #endif // __INTEL64_TSC__ // A time-consuming function long findPrimeNumber(int); int main(int argc, char** argv) { int i, err, rank, total_ranks; int opt = 0; int longIndex = 0; int nth; long nthPrime; int tid, nThreads; #ifdef __INTEL64_TSC__ unsigned long int tscValue; #endif int pu_id, core_id, numa_id; // Physical index values int lpu_id, lcore_id; // Logically indexed pu and core id values. hwloc_topology_t topology; double startTime; double sTime; // Serial loop time double pTime; // Parallel loop time // MPI timing variables double mpi_stime, mpi_etime; char hnbuf[64]; // holds the hostname (node ID) MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &total_ranks); mpi_stime = MPI_Wtime(); // Create the topology hwloc_topology_init(&topology); hwloc_topology_load(topology); // Parse options // --nth <integer>> - Find the nth prime number if (rank == 0 ) { int long_index =0; nth = NTH; while ((opt = getopt_long(argc, argv,"n:", longOpts, &long_index )) != -1) { switch (opt) { case 0: // If a flag was set, do nothing else if (longOpts[long_index].flag != 0) break; case 'n' : nth = atoi(optarg); break; } } } // end-if // Broadcast nth MPI_Bcast(&result_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&csv_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&logical_flag,1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&mpi_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(&nth, 1, MPI_INT, 0, MPI_COMM_WORLD); memset(hnbuf, 0, sizeof(hnbuf)); err = gethostname(hnbuf, sizeof(hnbuf)); // Serial loop (every MPI rank does this) startTime = omp_get_wtime(); nthPrime = findPrimeNumber(nth); sTime = (omp_get_wtime() - startTime); if (mpi_flag) { #ifdef __INTEL64_TSC__ tscValue = generic_rdtscp(&pu_id, &numa_id); // sets the value of pu_id and numa_id #else syscall(SYS_getcpu, &pu_id, &numa_id, NULL); #endif if (logical_flag) { lpu_id = lpu_from_pu(topology, pu_id); lcore_id = lcore_from_pu(topology, pu_id); printf("MPI-only: Rank %d, lPU %d, lcore %d, NUMA id %d (%s).\n", rank, lpu_id, lcore_id, numa_id, hnbuf); } else { core_id = core_from_pu(topology, pu_id); printf("MPI-only: Rank %d, PU %d, core %d, NUMA id %d (%s).\n", rank, pu_id, core_id, numa_id, hnbuf); } } #pragma omp master { if (csv_flag) printf("Rank, Thread, PU ID, Core ID, NUMA ID, Host\n"); } omp_set_dynamic(0); #pragma omp parallel private(tid, pu_id, core_id, lpu_id, lcore_id, numa_id, nthPrime) shared(topology) { tid = omp_get_thread_num(); nThreads = omp_get_num_threads(); startTime = omp_get_wtime(); #pragma omp for for(int i = 0; i < nThreads; i++) { nthPrime = findPrimeNumber( nth ); // All threads do the same amount of work } pTime = (omp_get_wtime() - startTime); #pragma omp barrier #ifdef __INTEL64_TSC__ tscValue = generic_rdtscp(&pu_id, &numa_id); // sets the value of pu_id and numa_id #else syscall(SYS_getcpu, &pu_id, &numa_id, NULL); #endif if (logical_flag) { lpu_id = lpu_from_pu(topology, pu_id); lcore_id = lcore_from_pu(topology, pu_id); } else { core_id = core_from_pu(topology, pu_id); } if (csv_flag) { if (logical_flag) { printf("%d, %d, %d, %d, %d, %s.\n", rank, tid, lpu_id, lcore_id, numa_id, hnbuf); } else { printf("%d, %d, %d, %d, %d, %s.\n", rank, tid, pu_id, core_id, numa_id, hnbuf); } // logical flag } else { // normal output if (logical_flag) { printf("OMP: Rank %d, thread %d of %d on lPU %d, lcore %d, NUMA id %d (%s).\n", rank, tid, nThreads, lpu_id, lcore_id, numa_id, hnbuf); } else { printf("OMP: Rank %d, thread %d of %d on PU %d, core %d, NUMA id %d (%s).\n", rank, tid, nThreads, pu_id, core_id, numa_id, hnbuf); } // logical flag } } // output results if ( rank == 0 && result_flag ) { printf("Results (time in seconds)\n"); printf("Rank\tSerial\tParallel\tOverhead\tnThreads\n"); printf("%d:\t%8.4f\t%8.4f\t%8.5f\t%8d\n", rank, sTime, pTime, (pTime-sTime), nThreads); } MPI_Barrier(MPI_COMM_WORLD); for (i=1;i<total_ranks;i++) { if ( i == rank && result_flag ) { printf("%d:\t%8.4f\t%8.4f\t%8.5f\t%8d\n", rank, sTime, pTime, (pTime-sTime), nThreads); } MPI_Barrier(MPI_COMM_WORLD); } mpi_etime = MPI_Wtime(); if (rank == 0) printf("Elapsed time: %f\n", mpi_etime - mpi_stime); MPI_Finalize(); return 0; } // Find the nth prime number long findPrimeNumber(int n) { int count=0; long a = 2; while ( count < n ) { long b = 2; int prime = 1; // to check if found a prime while ( b * b <= a) { if ( a % b == 0 ) { prime = 0; break; } b++; } if( prime > 0 ) { count++; } a++; } return (--a); } // Given a physical PU index, return the logical PU index // May have to revisit this, see if it's necessary. // On Broadwell, for example, these are always the same int lpu_from_pu(hwloc_topology_t topology, int puid) { int lpuid = -1; hwloc_obj_t pu_obj; // Find the pu obj with physical index puid // Find the first pu object pu_obj = hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 0); while( pu_obj != NULL ) { if ( puid == pu_obj->os_index) { lpuid = pu_obj->logical_index; } pu_obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PU, pu_obj); } return lpuid; } // Given a physical PU (CPU) ID, return the physical core that owns it // Physical PU indexes are guaranteed unique across a node. int core_from_pu(hwloc_topology_t topology, int puid) { int coreid = -1; hwloc_obj_t parent = NULL; hwloc_obj_t pu_obj; // Find the pu obj with ID puid // Find the first pu object pu_obj = hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 0); while( pu_obj != NULL ) { if ( puid == pu_obj->os_index) { parent = pu_obj->parent; } pu_obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PU, pu_obj); } if (parent != NULL) coreid = parent->os_index; return coreid; } // Given a physical PU index, return the logical core index that owns it int lcore_from_pu(hwloc_topology_t topology, int puid) { int lcoreid = -1; hwloc_obj_t parent = NULL; hwloc_obj_t pu_obj; // Find the pu obj with logical index puid // Find the first pu object pu_obj = hwloc_get_obj_by_type(topology, HWLOC_OBJ_PU, 0); while( pu_obj != NULL ) { if ( puid == pu_obj->os_index) { parent = pu_obj->parent; } pu_obj = hwloc_get_next_obj_by_type(topology, HWLOC_OBJ_PU, pu_obj); } if (parent != NULL) lcoreid = parent->logical_index; return lcoreid; }
matrix.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "base.h" #include "matrix.h" #include "util.h" #include "timer.h" #include "splatt_lapack.h" #include <math.h> #ifdef SPLATT_USE_MPI #include <mpi.h> #else /* define MPI_Comm to make life easier without MPI */ typedef int MPI_Comm; #endif /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ /** * @brief Normalize each column of a and store the column l_2 norms in 'lambda'. * If SPLATt_USE_MPI is defined, it will aggregate the norms over MPI * communicator 'comm'. 'comm' is not touched if SPLATT_USE_MPI is not * defined. * * @param[out] A The matrix whose columns we normalze. * @param[out] lambda The column norms. * @param comm The MPI communicator. */ static void p_mat_2norm( matrix_t * const A, val_t * const restrict lambda, MPI_Comm comm) { idx_t const I = A->I; idx_t const J = A->J; val_t * const restrict vals = A->vals; #pragma omp parallel { int const tid = splatt_omp_get_thread_num(); val_t * restrict mylambda = splatt_malloc(J * sizeof(*mylambda)); for(idx_t j=0; j < J; ++j) { mylambda[j] = 0; } #pragma omp for schedule(static) for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { mylambda[j] += vals[j + (i*J)] * vals[j + (i*J)]; } } /* do reduction on partial sums */ thread_allreduce(mylambda, J, SPLATT_REDUCE_SUM); #pragma omp master { #ifdef SPLATT_USE_MPI /* now do an MPI reduction to get the global lambda */ timer_start(&timers[TIMER_MPI_NORM]); timer_start(&timers[TIMER_MPI_COMM]); MPI_Allreduce(mylambda, lambda, J, SPLATT_MPI_VAL, MPI_SUM, comm); timer_stop(&timers[TIMER_MPI_COMM]); timer_stop(&timers[TIMER_MPI_NORM]); #else for(idx_t j=0; j < J; ++j) { lambda[j] = mylambda[j]; } #endif /* compute the final norms */ for(idx_t j=0; j < J; ++j) { lambda[j] = sqrt(lambda[j]); } } #pragma omp barrier /* do the normalization */ #pragma omp for schedule(static) for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { vals[j+(i*J)] /= lambda[j]; } } splatt_free(mylambda); } /* end omp parallel */ } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void mat_cholesky( matrix_t const * const A) { timer_start(&timers[TIMER_CHOLESKY]); /* check dimensions */ assert(A->I == A->J); /* Cholesky factorization */ splatt_blas_int N = A->I; val_t * const restrict neqs = A->vals; char uplo = 'L'; splatt_blas_int order = N; splatt_blas_int lda = N; splatt_blas_int info; LAPACK_DPOTRF(&uplo, &order, neqs, &lda, &info); if(info) { fprintf(stderr, "SPLATT: DPOTRF returned %d\n", info); } timer_stop(&timers[TIMER_CHOLESKY]); } void mat_solve_cholesky( matrix_t * const cholesky, matrix_t * const rhs) { /* Chunked AO-ADMM will call this from a parallel region. */ if(!splatt_omp_in_parallel()) { timer_start(&timers[TIMER_BACKSOLVE]); } splatt_blas_int N = cholesky->I; /* Solve against rhs */ char tri = 'L'; splatt_blas_int lda = N; splatt_blas_int info; splatt_blas_int nrhs = rhs->I; splatt_blas_int ldb = N; LAPACK_DPOTRS(&tri, &N, &nrhs, cholesky->vals, &lda, rhs->vals, &ldb, &info); if(info) { fprintf(stderr, "SPLATT: DPOTRS returned %d\n", info); } if(!splatt_omp_in_parallel()) { timer_stop(&timers[TIMER_BACKSOLVE]); } } val_t mat_trace( matrix_t const * const A) { assert(A->I == A->J); idx_t const N = A->I; val_t const * const restrict vals = A->vals; val_t trace = 0.; for(idx_t i=0; i < N; ++i) { trace += vals[i + (i*N)]; } return trace; } void mat_aTa( matrix_t const * const A, matrix_t * const ret) { timer_start(&timers[TIMER_ATA]); /* check matrix dimensions */ assert(ret->I == ret->J); assert(ret->I == A->J); assert(ret->vals != NULL); assert(A->rowmajor); assert(ret->rowmajor); idx_t const I = A->I; idx_t const F = A->J; char uplo = 'L'; char trans = 'N'; /* actually do A * A' due to row-major ordering */ splatt_blas_int N = (splatt_blas_int) F; splatt_blas_int K = (splatt_blas_int) I; splatt_blas_int lda = N; splatt_blas_int ldc = N; val_t alpha = 1.; val_t beta = 0.; SPLATT_BLAS(syrk)( &uplo, &trans, &N, &K, &alpha, A->vals, &lda, &beta, ret->vals, &ldc); timer_stop(&timers[TIMER_ATA]); } #ifdef SPLATT_USE_MPI void mat_aTa_mpi( matrix_t const * const A, matrix_t * const ret, MPI_Comm comm) { /* local matrix multiplication */ mat_aTa(A, ret); /* aggregate results */ idx_t const F = A->J; timer_start(&timers[TIMER_ATA]); timer_start(&timers[TIMER_MPI_ATA]); timer_start(&timers[TIMER_MPI_COMM]); MPI_Allreduce(MPI_IN_PLACE, ret->vals, F * F, SPLATT_MPI_VAL, MPI_SUM, comm); timer_stop(&timers[TIMER_MPI_COMM]); timer_stop(&timers[TIMER_MPI_ATA]); timer_stop(&timers[TIMER_ATA]); } #endif void mat_matmul( matrix_t const * const A, matrix_t const * const B, matrix_t * const C) { timer_start(&timers[TIMER_MATMUL]); C->I = A->I; C->J = B->J; /* check dimensions */ assert(A->J == B->I); assert(C->I * C->J <= A->I * B->J); /* set dimensions */ C->I = A->I; C->J = B->J; /* This calls column-major BLAS by instead computing: C^T = B^T * A^T. */ char transA = 'N'; char transB = 'N'; val_t * a_vals = B->vals; val_t * b_vals = A->vals; val_t * c_vals = C->vals; splatt_blas_int M = B->J; splatt_blas_int N = A->I; splatt_blas_int K = A->J; splatt_blas_int lda = M; splatt_blas_int ldb = K; splatt_blas_int ldc = M; val_t alpha = 1.; val_t beta = 0.; SPLATT_BLAS(gemm)( &transA, &transB, &M, &N, &K, &alpha, a_vals, &lda, b_vals, &ldb, &beta, c_vals, &ldc); timer_stop(&timers[TIMER_MATMUL]); } void mat_normalize( matrix_t * const A, val_t * const restrict lambda) { timer_start(&timers[TIMER_MATNORM]); #ifdef SPLATT_USE_MPI /* passing comm=0 will break things in MPI mode */ fprintf(stderr, "SPLATT: mat_normalize() is invalid in MPI mode. "); fprintf(stderr, "Use mat_normalize_mpi() instead.\n"); return; #endif p_mat_2norm(A, lambda, 0); timer_stop(&timers[TIMER_MATNORM]); } #ifdef SPLATT_USE_MPI void mat_normalize_mpi( matrix_t * const A, val_t * const restrict lambda, MPI_Comm comm) { timer_start(&timers[TIMER_MATNORM]); p_mat_2norm(A, lambda, comm); timer_stop(&timers[TIMER_MATNORM]); } #endif void mat_form_gram( matrix_t * * aTa, matrix_t * out_mat, idx_t nmodes, idx_t mode) { idx_t const N = aTa[mode]->J; val_t * const restrict gram = out_mat->vals; #pragma omp parallel { /* first initialize */ #pragma omp for schedule(static, 1) for(idx_t i=0; i < N; ++i) { for(idx_t j=i; j < N; ++j) { gram[j+(i*N)] = 1.; } } for(idx_t m=0; m < nmodes; ++m) { if(m == mode) { continue; } /* only work with upper triangular */ val_t const * const restrict mat = aTa[m]->vals; #pragma omp for schedule(static, 1) nowait for(idx_t i=0; i < N; ++i) { for(idx_t j=i; j < N; ++j) { gram[j+(i*N)] *= mat[j+(i*N)]; } } } } /* omp parallel */ } void mat_add_diag( matrix_t * const A, val_t const scalar) { idx_t const rank = A->J; val_t * const restrict vals = A->vals; for(idx_t i=0; i < rank; ++i) { vals[i + (i*rank)] += scalar; } } matrix_t * mat_alloc( idx_t const nrows, idx_t const ncols) { matrix_t * mat = (matrix_t *) splatt_malloc(sizeof(matrix_t)); mat->I = nrows; mat->J = ncols; mat->vals = (val_t *) splatt_malloc(nrows * ncols * sizeof(val_t)); mat->rowmajor = 1; return mat; } matrix_t * mat_rand( idx_t const nrows, idx_t const ncols) { matrix_t * mat = mat_alloc(nrows, ncols); val_t * const vals = mat->vals; fill_rand(vals, nrows * ncols); return mat; } matrix_t * mat_zero( idx_t const nrows, idx_t const ncols) { matrix_t * mat = mat_alloc(nrows, ncols); /* Initialize in parallel in case system is NUMA. This may bring a small * improvement. */ #pragma omp parallel for schedule(static) for(idx_t i=0; i < nrows; ++i) { for(idx_t j=0; j < ncols; ++j) { mat->vals[j + (i*ncols)] = 0.; } } return mat; } matrix_t * mat_mkptr( val_t * const data, idx_t rows, idx_t cols, int rowmajor) { matrix_t * mat = splatt_malloc(sizeof(*mat)); mat_fillptr(mat, data, rows, cols, rowmajor); return mat; } void mat_fillptr( matrix_t * ptr, val_t * const data, idx_t rows, idx_t cols, int rowmajor) { ptr->I = rows; ptr->J = cols; ptr->rowmajor = rowmajor; ptr->vals = data; } void mat_free( matrix_t * mat) { if(mat == NULL) { return; } splatt_free(mat->vals); splatt_free(mat); } matrix_t * mat_mkrow( matrix_t const * const mat) { assert(mat->rowmajor == 0); idx_t const I = mat->I; idx_t const J = mat->J; matrix_t * row = mat_alloc(I, J); val_t * const restrict rowv = row->vals; val_t const * const restrict colv = mat->vals; for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { rowv[j + (i*J)] = colv[i + (j*I)]; } } return row; } matrix_t * mat_mkcol( matrix_t const * const mat) { assert(mat->rowmajor == 1); idx_t const I = mat->I; idx_t const J = mat->J; matrix_t * col = mat_alloc(I, J); val_t * const restrict colv = col->vals; val_t const * const restrict rowv = mat->vals; for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { colv[i + (j*I)] = rowv[j + (i*J)]; } } col->rowmajor = 0; return col; } spmatrix_t * spmat_alloc( idx_t const nrows, idx_t const ncols, idx_t const nnz) { spmatrix_t * mat = (spmatrix_t*) splatt_malloc(sizeof(spmatrix_t)); mat->I = nrows; mat->J = ncols; mat->nnz = nnz; mat->rowptr = (idx_t*) splatt_malloc((nrows+1) * sizeof(idx_t)); mat->colind = (idx_t*) splatt_malloc(nnz * sizeof(idx_t)); mat->vals = (val_t*) splatt_malloc(nnz * sizeof(val_t)); return mat; } void spmat_free( spmatrix_t * mat) { free(mat->rowptr); free(mat->colind); free(mat->vals); free(mat); } val_t mat_norm( matrix_t const * const A) { val_t norm = 0.; val_t const * const restrict vals = A->vals; #pragma omp parallel for schedule(static) reduction(+:norm) for(idx_t x=0; x < A->I * A->J; ++x) { norm += vals[x] * vals[x]; } return sqrt(norm); } val_t mat_norm_diff( matrix_t const * const A, matrix_t const * const B) { assert(A->I == B->I); assert(A->J == B->J); val_t norm = 0.; val_t const * const restrict avals = A->vals; val_t const * const restrict bvals = B->vals; #pragma omp parallel for schedule(static) reduction(+:norm) for(idx_t x=0; x < A->I * A->J; ++x) { val_t const diff = avals[x] - bvals[x]; norm += diff * diff; } return sqrt(norm); }
trmm_x_sky_n_hi_row_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { #ifdef COMPLEX ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT i = 0; i < mat->rows; i++) for(ALPHA_INT j = 0; j < columns; j++) alpha_mul(y[index2(i, j, ldy)], y[index2(i, j, ldy)], beta); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT cc = 0; cc < columns; ++cc) { for (ALPHA_INT cr = 0; cr < mat->rows; ++cr) { ALPHA_INT start = mat->pointers[cr]; ALPHA_INT end = mat->pointers[cr + 1]; ALPHA_INT idx = 1; ALPHA_INT eles_num = end - start; for (ALPHA_INT ai = start; ai < end; ++ai) { ALPHA_INT ac = cr - eles_num + idx; if (ac <= cr) { ALPHA_Number t; alpha_mul_3c(t, alpha, mat->values[ai]); alpha_madde(y[index2(cr, cc, ldy)], t, x[index2(ac, cc, ldx)]); } idx++; } } } return ALPHA_SPARSE_STATUS_SUCCESS; #else return ALPHA_SPARSE_STATUS_INVALID_VALUE; #endif }
omp_whereami.c
#include <stdio.h> #include <omp.h> void load_cpu_nsec(int nsec); void omp_report_mask(); int map_to_cpuid( int icore); int main(){ int nthrds, thrd, cpuid; //Thread info int nsec = 10; // Load, default time int ierr; // Error number #pragma omp parallel private(thrd,ierr) { thrd = omp_get_thread_num(); nthrds = omp_get_num_threads(); cpuid = thrd; // set cpuid to thread number (thrd) ierr = map_to_cpuid( cpuid ); // set your own affinity here omp_report_mask(); // Call mask reporter load_cpu_nsec( nsec ); // Load up rank process so user can watch top. } }
test7.c
int g1; void bar() { g1=0; #pragma omp barrier g1=1; } void foo() { 2+g1; #pragma omp barrier g1=3; bar(); 4+g1; } int main () { #pragma omp parallel { g1=5; if (6) { g1=7; foo(); 8+g1; } else { g1=9; #pragma omp barrier 10+g1; #pragma omp barrier g1=11; } 12+g1; } }
Matrix.h
#pragma once #include <iostream> #include <vector> #include <algorithm> #include <functional> #include <exception> #include <stdexcept> #include <type_traits> #include <omp.h> namespace cppmath { template <typename T> class Matrix { static_assert(std::is_floating_point<T>::value, "An specialization of the matrix class has to be of a floating point type!"); public: using MatrixDataType = std::vector<std::vector<T>>; Matrix() = delete; Matrix(std::size_t rows, std::size_t cols); Matrix(std::size_t rows, std::size_t cols, const T &value); ~Matrix() noexcept = default; Matrix(const Matrix &other) = default; Matrix &operator=(const Matrix &other) = default; Matrix(Matrix &&other) noexcept = default; Matrix &operator=(Matrix &&other) noexcept = default; Matrix operator+(const Matrix &rhs); Matrix &operator+=(const Matrix &rhs); Matrix operator-(const Matrix &rhs); Matrix &operator-=(const Matrix &rhs); Matrix operator*(const T &scalar); Matrix &operator*=(const T &scalar); Matrix operator/(const T &scalar); Matrix &operator/=(const T &scalar); Matrix operator*(const Matrix &rhs); Matrix &operator*=(const Matrix &rhs); void dot(const Matrix &matrixA, const Matrix &matrixB, Matrix &result); void parallel_dot(const Matrix &matrixA, const Matrix &matrixB, Matrix &result); void print_matrix() const; std::size_t num_rows() const; std::size_t num_cols() const; private: std::size_t m_rows; std::size_t m_cols; MatrixDataType m_data; }; template <typename T> Matrix<T>::Matrix(std::size_t rows, std::size_t cols) : m_rows(rows), m_cols(cols), m_data(m_rows, std::vector<T>(m_cols, 0)) { } template <typename T> Matrix<T>::Matrix(std::size_t rows, std::size_t cols, const T &value) : m_rows(rows), m_cols(cols), m_data(m_rows, std::vector<T>(m_cols, value)) { } template <typename T> Matrix<T> Matrix<T>::operator+(const Matrix<T> &rhs) { if (m_rows != rhs.m_rows) { throw(std::invalid_argument("Number of rows are not equal!")); } if (m_cols != rhs.m_cols) { throw(std::invalid_argument("Number of cols are not equal!")); } Matrix<T> result(m_rows, m_cols); for (std::size_t i = 0; i != m_rows; ++i) { std::transform( m_data[i].begin(), m_data[i].end(), rhs.m_data[i].begin(), result.m_data[i].begin(), std::plus<T>() ); } return result; } template <typename T> Matrix<T> &Matrix<T>::operator+=(const Matrix<T> &rhs) { if (m_rows != rhs.m_rows) { throw(std::invalid_argument("Number of rows are not equal!")); } if (m_cols != rhs.m_cols) { throw(std::invalid_argument("Number of cols are not equal!")); } for (std::size_t i = 0; i != m_rows; ++i) { std::transform( m_data[i].begin(), m_data[i].end(), rhs.m_data[i].begin(), m_data[i].begin(), std::plus<T>() ); } return *this; } template <typename T> Matrix<T> Matrix<T>::operator-(const Matrix<T> &rhs) { if (m_rows != rhs.m_rows) { throw(std::invalid_argument("Number of rows are not equal!")); } if (m_cols != rhs.m_cols) { throw(std::invalid_argument("Number of cols are not equal!")); } Matrix<T> result(m_rows, m_cols); for (std::size_t i = 0; i != m_rows; ++i) { std::transform( m_data[i].begin(), m_data[i].end(), rhs.m_data[i].begin(), result.m_data[i].begin(), std::minus<T>() ); } return result; } template <typename T> Matrix<T> &Matrix<T>::operator-=(const Matrix<T> &rhs) { if (m_rows != rhs.m_rows) { throw(std::invalid_argument("Number of rows are not equal!")); } if (m_cols != rhs.m_cols) { throw(std::invalid_argument("Number of cols are not equal!")); } for (std::size_t i = 0; i != m_rows; ++i) { std::transform( m_data[i].begin(), m_data[i].end(), rhs.m_data[i].begin(), m_data[i].begin(), std::minus<T>() ); } return *this; } template <typename T> Matrix<T> Matrix<T>::operator*(const T &scalar) { Matrix<T> result(m_rows, m_cols); for (std::size_t i = 0; i != m_rows; ++i) { std::transform( m_data[i].begin(), m_data[i].end(), result.m_data[i].begin(), [scalar](const T val) -> T { return val * scalar; } ); } return result; } template <typename T> Matrix<T> &Matrix<T>::operator*=(const T &scalar) { for (std::size_t i = 0; i != m_rows; ++i) { std::transform( m_data[i].begin(), m_data[i].end(), m_data[i].begin(), [scalar](const T val) -> T { return val * scalar; } ); } return *this; } template <typename T> Matrix<T> Matrix<T>::operator/(const T &scalar) { if (scalar == 0) { throw(std::overflow_error("You cannot divide by a scalar value of zero!")); } Matrix<T> result(m_rows, m_cols); for (std::size_t i = 0; i != m_rows; ++i) { std::transform( m_data[i].begin(), m_data[i].end(), result.m_data[i].begin(), [scalar](const T val) -> T { return val / scalar; } ); } return result; } template <typename T> Matrix<T> &Matrix<T>::operator/=(const T &scalar) { for (std::size_t i = 0; i != m_rows; ++i) { std::transform( m_data[i].begin(), m_data[i].end(), m_data[i].begin(), [scalar](const T val) -> T { return val / scalar; } ); } return *this; } template <typename T> Matrix<T> Matrix<T>::operator*(const Matrix<T> &rhs) { if (m_cols != rhs.m_rows) { throw(std::invalid_argument("Number of cols are not equal!")); } Matrix<T> result(m_rows, rhs.m_cols); if (m_rows < 250 && m_cols < 250) { dot(*this, rhs, result); } else { parallel_dot(*this, rhs, result); } return result; } template <typename T> Matrix<T> &Matrix<T>::operator*=(const Matrix<T> &rhs) { if (m_cols != rhs.m_rows) { throw(std::invalid_argument("Number of cols are not equal!")); } *this = (*this) * rhs; return *this; } template <typename T> void Matrix<T>::dot(const Matrix<T> &matrixA, const Matrix<T> &matrixB, Matrix<T> &result) { for (std::size_t i = 0; i != matrixA.m_rows; ++i) { for (std::size_t j = 0; j != matrixB.m_cols; ++j) { for (std::size_t k = 0; k != matrixB.m_rows; ++k) { result.m_data[i][j] = result.m_data[i][j] + matrixA.m_data[i][k] * matrixB.m_data[k][j]; } } } } template <typename T> void Matrix<T>::parallel_dot(const Matrix<T> &matrixA, const Matrix<T> &matrixB, Matrix<T> &result) { std::size_t i, j, k; #pragma omp parallel for shared(result) private(i, j, k) num_threads(12) for (i = 0; i != matrixA.m_rows; ++i) { for (j = 0; j != matrixB.m_cols; ++j) { for (k = 0; k != matrixB.m_rows; ++k) { result.m_data[i][j] = result.m_data[i][j] + matrixA.m_data[i][k] * matrixB.m_data[k][j]; } } } } template <typename T> void Matrix<T>::print_matrix() const { for (std::size_t i = 0; i < m_rows; ++i) { for (std::size_t j = 0; j < m_cols; ++j) { std::cout << m_data[i][j] << " "; } std::cout << std::endl; } std::cout << std::endl; } template <typename T> std::size_t Matrix<T>::num_rows() const { return m_rows; } template <typename T> std::size_t Matrix<T>::num_cols() const { return m_cols; } } // namespace cppmath
resource_manager.h
// ----------------------------------------------------------------------------- // // Copyright (C) The BioDynaMo Project. // All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // // See the LICENSE file distributed with this work for details. // See the NOTICE file distributed with this work for additional information // regarding copyright ownership. // // ----------------------------------------------------------------------------- #ifndef RESOURCE_MANAGER_H_ #define RESOURCE_MANAGER_H_ #include <Rtypes.h> #include <limits> #include <memory> #include <ostream> #include <string> #include <tuple> #include <utility> #include <vector> #ifdef USE_OPENCL #define __CL_ENABLE_EXCEPTIONS #ifdef __APPLE__ #include <OpenCL/cl.hpp> #else #include <CL/cl.hpp> #endif #endif #include "backend.h" #include "diffusion_grid.h" #include "simulation.h" #include "tuple_util.h" #include "variadic_template_parameter_util.h" namespace bdm { /// Unique identifier of a simulation object. Acts as a type erased pointer. /// Has the same type for every simulation object. /// The id is split into two parts: Type index and element index. /// The first one is used to obtain the container in the ResourceManager, the /// second specifies the element within this vector. class SoHandle { public: constexpr SoHandle() noexcept : type_idx_(std::numeric_limits<decltype(type_idx_)>::max()), element_idx_(std::numeric_limits<decltype(element_idx_)>::max()) {} SoHandle(uint16_t type_idx, uint32_t element_idx) : type_idx_(type_idx), element_idx_(element_idx) {} uint16_t GetTypeIdx() const { return type_idx_; } uint32_t GetElementIdx() const { return element_idx_; } void SetElementIdx(uint32_t element_idx) { element_idx_ = element_idx; } bool operator==(const SoHandle& other) const { return type_idx_ == other.type_idx_ && element_idx_ == other.element_idx_; } bool operator!=(const SoHandle& other) const { return !(*this == other); } bool operator<(const SoHandle& other) const { if (type_idx_ == other.type_idx_) { return element_idx_ < other.element_idx_; } else { return type_idx_ < other.type_idx_; } } friend std::ostream& operator<<(std::ostream& stream, const SoHandle& handle) { stream << "Type idx: " << handle.type_idx_ << " element idx: " << handle.element_idx_; return stream; } private: // TODO(lukas) add using TypeIdx_t = uint16_t and // using ElementIdx_t = uint32_t uint16_t type_idx_; /// changed element index to uint32_t after issues with std::atomic with /// size 16 -> max element_idx: 4.294.967.296 uint32_t element_idx_; ClassDefNV(SoHandle, 1); }; constexpr SoHandle kNullSoHandle; namespace detail { /// \see bdm::ConvertToContainerTuple, VariadicTypedef template <typename Backend, typename... Types> struct ConvertToContainerTuple {}; /// \see bdm::ConvertToContainerTuple, VariadicTypedef template <typename Backend, typename... Types> struct ConvertToContainerTuple<Backend, VariadicTypedef<Types...>> { // Helper alias to get the container type associated with Backend template <typename T> using Container = typename Backend::template Container<T>; // Helper type alias to get a type with certain Backend template <typename T> using ToBackend = typename T::template Self<Backend>; using type = std::tuple<Container<ToBackend<Types>>...>; // NOLINT }; /// Type trait to obtain the index of a type within a tuple. /// Required to extract variadic types from withi a `VariadicTypedef` template <typename TSo, typename... Types> struct ToIndex; template <typename TSo, typename... Types> struct ToIndex<TSo, VariadicTypedef<Types...>> { static constexpr uint16_t value = GetIndex<TSo, Types...>(); // NOLINT }; } // namespace detail /// Create a tuple of types in the parameter pack and wrap each type with /// container. /// @tparam Backend in which the variadic types should be stored in /// @tparam TVariadicTypedefWrapper type that wraps a VariadicTypedef /// which in turn contains the variadic template parameters /// \see VariadicTypedefWrapper template <typename Backend, typename TVariadicTypedef> struct ConvertToContainerTuple { typedef typename detail::ConvertToContainerTuple<Backend, TVariadicTypedef>::type type; // NOLINT }; /// ResourceManager holds a container for each atomic type in the simulation. /// It provides methods to get a certain container, execute a function on a /// a certain element, all elements of a certain type or all elements inside /// the ResourceManager. Elements are uniquely identified with its SoHandle. /// Furthermore, the types specified in AtomicTypes are backend invariant /// Hence it doesn't matter which version of the Backend is specified. /// ResourceManager internally uses the TBackendWrapper parameter to convert /// all atomic types to the desired backend. /// This makes user code easier since atomic types can be specified as scalars. /// @tparam TCompileTimeParam type that containes the compile time parameter for /// a specific simulation. ResourceManager extracts Backend and AtomicTypes. template <typename TCompileTimeParam = CompileTimeParam<>> class ResourceManager { public: using Backend = typename TCompileTimeParam::SimulationBackend; using Types = typename TCompileTimeParam::AtomicTypes; /// Determine Container based on the Backend template <typename T> using TypeContainer = typename Backend::template Container<T>; /// Helper type alias to get a type with certain Backend template <typename T> using ToBackend = typename T::template Self<Backend>; /// Returns the number of simulation object types static constexpr size_t NumberOfTypes() { return std::tuple_size<decltype(data_)>::value; } template <typename TSo> static constexpr uint16_t GetTypeIndex() { return detail::ToIndex<TSo, Types>::value; } explicit ResourceManager(TRootIOCtor* r) {} /// Default constructor. Unfortunately needs to be public although it is /// a singleton to be able to use ROOT I/O ResourceManager() { // Soa container contain one element upon construction Clear(); } /// Free the memory that was reserved for the diffusion grids virtual ~ResourceManager() { for (auto* grid : diffusion_grids_) { delete grid; } } ResourceManager& operator=(ResourceManager&& other) { data_ = std::move(other.data_); diffusion_grids_ = std::move(other.diffusion_grids_); return *this; } /// Return the container of this Type /// @tparam Type atomic type whose container should be returned /// invariant to the Backend. This means that even if ResourceManager /// stores e.g. `SoaCell`, Type can be `Cell` and still returns the /// correct container. template <typename Type> TypeContainer<ToBackend<Type>>* Get() { return &std::get<TypeContainer<ToBackend<Type>>>(data_); } /// Return the container of diffusion grids std::vector<DiffusionGrid*>& GetDiffusionGrids() { return diffusion_grids_; } /// Return the diffusion grid which holds the substance of specified id DiffusionGrid* GetDiffusionGrid(size_t substance_id) { assert(substance_id < diffusion_grids_.size() && "You tried to access a diffusion grid that does not exist!"); return diffusion_grids_[substance_id]; } /// Return the diffusion grid which holds the substance of specified name /// Caution: using this function in a tight loop will result in a slow /// simulation. Use `GetDiffusionGrid(size_t)` in those cases. DiffusionGrid* GetDiffusionGrid(std::string substance_name) { for (auto dg : diffusion_grids_) { if (dg->GetSubstanceName() == substance_name) { return dg; } } assert(false && "You tried to access a diffusion grid that does not exist! " "Did you specify the correct substance name?"); return nullptr; } /// Returns the total number of simulation objects size_t GetNumSimObjects() { size_t num_so = 0; for (uint16_t i = 0; i < std::tuple_size<decltype(data_)>::value; i++) { ::bdm::Apply(&data_, i, [&](auto* container) { num_so += container->size(); }); } return num_so; } /// Apply a function on a certain element /// @param handle - simulation object id; specifies the tuple index and /// element index \see SoHandle /// @param function that will be called with the element as a parameter /// /// rm->ApplyOnElement(handle, [](auto& element) { /// std::cout << element << std::endl; /// }); template <typename TFunction> auto ApplyOnElement(SoHandle handle, TFunction&& function) { auto type_idx = handle.GetTypeIdx(); auto element_idx = handle.GetElementIdx(); return ::bdm::Apply(&data_, type_idx, [&](auto* container) -> decltype( function((*container)[0])) { return function((*container)[element_idx]); }); } /// Apply a function on all container types /// @param function that will be called with each container as a parameter /// /// rm->ApplyOnAllTypes([](auto* container, uint16_t type_idx) { /// std::cout << container->size() << std::endl; /// }); template <typename TFunction> void ApplyOnAllTypes(TFunction&& function) { // runtime dispatch - TODO(lukas) replace with c++17 std::apply for (uint16_t i = 0; i < std::tuple_size<decltype(data_)>::value; i++) { ::bdm::Apply(&data_, i, [&](auto* container) { function(container, i); }); } } /// Apply a function on all container types. Function invocations are /// parallelized /// @param function that will be called with each container as a parameter /// /// rm->ApplyOnAllTypes([](auto* container, uint16_t type_idx) { /// std::cout << container->size() << std::endl; /// }); template <typename TFunction> void ApplyOnAllTypesParallel(TFunction&& function) { // runtime dispatch - TODO(lukas) replace with c++17 std::apply for (uint16_t i = 0; i < std::tuple_size<decltype(data_)>::value; i++) { ::bdm::Apply(&data_, i, [&](auto* container) { function(container, i); }); } } /// Apply a function on all elements in every container /// @param function that will be called with each container as a parameter /// /// rm->ApplyOnAllElements([](auto& element, SoHandle handle) { /// std::cout << element << std::endl; /// }); template <typename TFunction> void ApplyOnAllElements(TFunction&& function) { // runtime dispatch - TODO(lukas) replace with c++17 std::apply for (uint16_t i = 0; i < std::tuple_size<decltype(data_)>::value; i++) { ::bdm::Apply(&data_, i, [&](auto* container) { for (size_t e = 0; e < container->size(); e++) { function((*container)[e], SoHandle(i, e)); } }); } } /// Apply a function on all elements in every container /// Function invocations are parallelized /// \see ApplyOnAllElements template <typename TFunction> void ApplyOnAllElementsParallel(TFunction&& function) { // runtime dispatch - TODO(lukas) replace with c++17 std::apply for (uint16_t i = 0; i < std::tuple_size<decltype(data_)>::value; i++) { ::bdm::Apply(&data_, i, [&](auto* container) { #pragma omp parallel for for (size_t e = 0; e < container->size(); e++) { function((*container)[e], SoHandle(i, e)); } }); } } /// Remove elements from each type void Clear() { ApplyOnAllTypes( [](auto* container, uint16_t type_idx) { container->clear(); }); } template <typename TSo> void push_back(const TSo& so) { // NOLINT Get<TSo>()->push_back(so); } #ifdef USE_OPENCL cl::Context* GetOpenCLContext() { return &opencl_context_; } cl::CommandQueue* GetOpenCLCommandQueue() { return &opencl_command_queue_; } std::vector<cl::Device>* GetOpenCLDeviceList() { return &opencl_devices_; } std::vector<cl::Program>* GetOpenCLProgramList() { return &opencl_programs_; } #endif /// Create a new simulation object and return a reference to it. /// @tparam TScalarSo simulation object type with scalar backend /// @param args arguments which will be forwarded to the TScalarSo constructor /// @remarks Note that this function is not thread safe. template <typename TScalarSo, typename... Args, typename TBackend = Backend> typename std::enable_if<std::is_same<TBackend, Soa>::value, typename TScalarSo::template Self<SoaRef>>::type New(Args... args) { auto container = Get<TScalarSo>(); auto idx = container->DelayedPushBack(TScalarSo(std::forward<Args>(args)...)); return (*container)[idx]; } template <typename TScalarSo, typename... Args, typename TBackend = Backend> typename std::enable_if<std::is_same<TBackend, Scalar>::value, TScalarSo&>::type New(Args... args) { auto container = Get<TScalarSo>(); auto idx = container->DelayedPushBack(TScalarSo(std::forward<Args>(args)...)); return (*container)[idx]; } private: /// creates one container for each type in Types. /// Container type is determined based on the specified Backend typename ConvertToContainerTuple<Backend, Types>::type data_; std::vector<DiffusionGrid*> diffusion_grids_; #ifdef USE_OPENCL cl::Context opencl_context_; //! cl::CommandQueue opencl_command_queue_; //! // Currently only support for one GPU device std::vector<cl::Device> opencl_devices_; //! std::vector<cl::Program> opencl_programs_; //! #endif friend class SimulationBackup; ClassDefNV(ResourceManager, 1); }; } // namespace bdm #endif // RESOURCE_MANAGER_H_
pr79940.c
/* PR c/79940 */ int main () { int i, j, l, m; int a[10000], b[10000], c[10000]; for (i = 0; i < 10000; i++) { a[i] = i; b[i] = i & 31; } #pragma omp parallel shared(a, b, c) #pragma omp single #pragma omp taskloop shared(a, b, c) for (i = 0; i < 10000; i++) c[i] = a[i] + b[i]; #pragma omp parallel #pragma omp single { #pragma omp taskloop shared(a, b, c) lastprivate (i) for (i = 0; i < 10000; i++) c[i] += a[i] + b[i]; l = i; } #pragma omp parallel #pragma omp single #pragma omp taskloop shared(a, b, c) collapse(2) for (i = 0; i < 100; i++) for (j = 0; j < 100; j++) c[i * 100 + j] += a[i * 100 + j] + b[i * 100 + j]; #pragma omp parallel #pragma omp single { #pragma omp taskloop shared(a, b, c) lastprivate (i, j) for (i = 0; i < 100; i++) for (j = 0; j < 100; j++) c[i * 100 + j] += a[i * 100 + j] + b[i * 100 + j]; m = i * 100 + j; } for (i = 0; i < 10000; i++) if (a[i] != i || b[i] != (i & 31) || c[i] != 4 * i + 4 * (i & 31)) __builtin_abort (); if (l != 10000 || m != 10100) __builtin_abort (); return 0; }
GB_binop__times_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__times_uint8 // A.*B function (eWiseMult): GB_AemultB__times_uint8 // A*D function (colscale): GB_AxD__times_uint8 // D*A function (rowscale): GB_DxB__times_uint8 // C+=B function (dense accum): GB_Cdense_accumB__times_uint8 // C+=b function (dense accum): GB_Cdense_accumb__times_uint8 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_uint8 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_uint8 // C=scalar+B GB_bind1st__times_uint8 // C=scalar+B' GB_bind1st_tran__times_uint8 // C=A+scalar GB_bind2nd__times_uint8 // C=A'+scalar GB_bind2nd_tran__times_uint8 // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x * y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_UINT8 || GxB_NO_TIMES_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__times_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__times_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__times_uint8 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__times_uint8 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__times_uint8 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__times_uint8 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__times_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__times_uint8 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__times_uint8 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__times_uint8 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB_bind1st_tran__times_uint8 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB_bind2nd_tran__times_uint8 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
opencl_agilekeychain_fmt_plug.c
/* 1Password Agile Keychain cracker patch for JtR. Hacked together during * July of 2012 by Dhiru Kholia <dhiru.kholia at gmail.com>. * * This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net> and * Copyright (c) 2012 Dhiru Kholia <dhiru.kholia at gmail.com>, and it is * hereby released to the general public under the following terms: * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * This software is based on "agilekeychain" project but no actual code is * borrowed from it. * * "agilekeychain" project is at https://bitbucket.org/gwik/agilekeychain */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_agilekeychain; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_agilekeychain); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "formats.h" #include "common.h" #include "stdint.h" #include "misc.h" #include "aes.h" #include "common-opencl.h" #include "options.h" #include "jumbo.h" #define FORMAT_LABEL "agilekeychain-opencl" #define FORMAT_NAME "1Password Agile Keychain" #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL AES" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BINARY_SIZE 0 #define PLAINTEXT_LENGTH 64 #define SALT_SIZE sizeof(struct custom_salt) #define BINARY_ALIGN MEM_ALIGN_WORD #define SALT_ALIGN 4 #define SALTLEN 8 #define CTLEN 1040 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } keychain_password; typedef struct { uint32_t v[16/4]; } keychain_hash; typedef struct { uint32_t iterations; uint32_t outlen; uint32_t skip_bytes; uint8_t length; uint8_t salt[64]; } keychain_salt; static int *cracked; static int any_cracked; static struct fmt_tests keychain_tests[] = { {"$agilekeychain$2*1000*8*7146eaa1cca395e5*1040*e7eb81496717d35f12b83024bb055dec00ea82843886cbb8d0d77302a85d89b1d2c0b5b8275dca44c168cba310344be6eea3a79d559d0846a9501f4a012d32b655047673ef66215fc2eb4e944a9856130ee7cd44523017bbbe2957e6a81d1fd128434e7b83b49b8a014a3e413a1d76b109746468070f03f19d361a21c712ef88e05b04f8359f6dd96c1c4487ea2c9df22ea9029e9bc8406d37850a5ead03062283a42218c134d05ba40cddfe46799c931291ec238ee4c11dc71d2b7e018617d4a2bf95a0c3c1f98ea14f886d94ee2a65871418c7c237f1fe52d3e176f8ddab6dfd4bc039b6af36ab1bc9981689c391e71703e31979f732110b84d5fccccf59c918dfcf848fcd80c6da62ced6e231497b9cbef22d5edca439888556bae5e7b05571ac34ea54fafc03fb93e4bc17264e50a1d04b688fcc8bc715dd237086c2537c32de34bbb8a29de0208800af2a9b561551ae6561099beb61045f22dbe871fab5350e40577dd58b4c8fb1232f3f85b8d2e028e5535fd131988a5df4c0408929b8eac6d751dcc698aa1d79603251d90a216ae5e28bffc0610f61fefe0a23148dcc65ab88b117dd3b8d311157424867eb0261b8b8c5b11def85d434dd4c6dc7036822a279a77ec640b28da164bea7abf8b634ba0e4a13d9a31fdcfebbdbe53adcdf2564d656e64923f76bc2619428abdb0056ce20f47f3ece7d4d11dc55d2969684ca336725561cb27ce0504d57c88a2782daccefb7862b385d494ce70fef93d68e673b12a68ba5b8c93702be832d588ac935dbf0a7b332e42d1b6da5f87aed03498a37bb41fc78fcdbe8fe1f999fe756edf3a375beb54dd508ec45af07985f1430a105e552d9817106ae12d09906c4c28af575d270308a950d05c07da348f59571184088d46bbef3e7a2ad03713e90b435547b23f340f0f5d00149838d9919d40dac9b337920c7e577647fe4e2811f05b8e888e3211d9987cf922883aa6e53a756e579f7dff91c297fcc5cda7d10344545f64099cfd2f8fd59ee5c580ca97cf8b17e0222b764df25a2a52b81ee9db41b3c296fcea1203b367e55d321c3504aeda8913b0cae106ccf736991030088d581468264b8486968e868a44172ad904d97e3e52e8370aaf52732e6ee6cc46eb33a901afc6b7c687b8f6ce0b2b4cdfe19c7139615195a052051becf39383ab83699a383a26f8a36c78887fe27ea7588c0ea21a27357ff9923a3d23ca2fb04ad671b63f8a8ec9b7fc969d3bece0f5ff19a40bc327b9905a6de2193ffe3aa1997e9266205d083776e3b94869164abcdb88d64b8ee5465f7165b75e1632abd364a24bb1426889955b8f0354f75c6fb40e254f7de53d8ef7fee9644bf2ebccd934a72bb1cc9c19d354d66996acbddd60d1241657359d9074a4b313b21af2ee4f10cf20f4122a5fad4ee4f37a682ffb7234bea61985d1ad130bfb9f4714461fb574dbf851c*1000*8*c05f3bc3e7f3cad7*1040*f3e3d091b64da1529b04b2795898b717faad59f7dae4bda25e6e267c28a56a7702e51991b2a3fb034cdda2d9bfd531dfd2c3af00f39fdfe8bcbdde02ab790415bcf071d133b15f647f55ff512730ae4914ce20b72184c827f6350ac768b00c9eab0e3322e084bb3e9e9439a10030950f5504dcc4f7ba614b27fde99bd0d743a58341e90ec313395486eb8068df205b7bdf25134ed97dd2e2883d7eb3e63b659602ada765084a69d7ed8fc55b60aa67718cc9e5bf31ab8f3029b32a4b001071848d2b76b5f4b921d2169ca287e9e78ecd904d040c817c7c7cde4ba8510b462e139c16519962ca0adb7d5f89d431cd4541a9a7aaec8d799697f4d3947d87884bed32ada13db725c72ab6450ac8fe989a94917cca784bcf6ffbe756f19d4e8897e0f80d8c318e13e5b30fc356646aaf038a952b0781f12dfef1f4bd6922ae05a573eeff4dbb064cfbb0fd62962a6a53a8de308da2b8e83baebfe261cb127f874a5eff3f05cda123ab2ba559cf444ce33b6845f4c902733b8982044151a8aa1859769082ade5928f2d4f616ce972ae8dde1f2be37d496ad16057008dfe678c75cbdc53db25ed311edbcf8b2a73bcd2809f6bd1d389aaeed82a75fa15676d08aa5390efdc189c180be6a52ec5a7371304d26e477039197671377d1ea3d6ee41e68a42348a4fe9a1d2400eaeba8ed0a7419b9694d780456d96378c00318a5be0f41afa887476b3bebb7cf30d61ca8fc77de35671a3053a517aa39444e01e1752da3146dc97eec5849d6f025c3d4bc6e0499b901f629d8a081ad35ed33602cbef5e9a68f090170fcc1f285eb094e3dc619740a067fd2aeeb20abbb17926c3ad097f3f0bad4de540d1829a985cd7e700100622ec47da046071c11a1597e5f093268b4ed79ffcf2450b9ba2b649b932fbce912bdb4da010581bd9c731be792c8f75177f6c8c4e1756d63a1491a8aae4bb11beeca118e7d08073b500dd82b81e4bdbeb15625afca8f1c8e06b2360da972587516ef62e91d1d9aad90e62226d53363bff318f5af21f69c234731ac22b09506a1b807d2366e88905668d960c7963daa93046e9a56db1d7a437e9a37aa7a2945197265478b264ec14d383030ef73504fd26d4be9e72ebddb14a00bf6bd66a3adaa1d17cada378a2b0bc852f961af52333f7966f8a60738dfd47e79ce537082f187117ffd31f54f53356b671154dfa245671c4cd054c1a8d303a202fccfae6d3f9e3646838cef38703b5e660b5ce7679f5898d801908f90092dbec335c98e4002041287fe9bfa7d7828a29ab240ec2cedc9fa12cfd7c3ef7b61dad4fbf2ef9c0a904dbde1b3792fb5178607608dc9fc2fbc85addf89fa3df94317e729810b508356b5bb176cdb022afb0ec5eeff4d5081b66733d1be1b54cc4f080bfc33187663b5ab185472b35dc8812e201472e6af376c43ee23aa2db6cd04bddd79b99b0c28c48a5ae", "openwall"}, {"$agilekeychain$1*1000*8*54434b3047723444*1040*316539685a36617546544a61466e35743970356559624464304467394a4a41615459594a6b66454c5462417a7a694b5751474e4748595036344f3945374b414b676b6b7278673658794e63734a316c48656b496a3156346a544c6861797537347032466b4d6b416d31704a6b5063547a44703152544f72696e6e38347732597672774f6476414c70346462595a7678656b6e5958716b7a61746d5874514e575965564735627a437578584e4a573050567939413073306c377a4d726e6d576a6655424455394f4934696c48454f4d536e635567393950686d4171364f76747749446130454c6d74783069704d30456d45374f56736e486a5534667877327a526e52596e55454452393544437042646e6739355938714836584968664c4d7a726a4f63544c6858385141464c71565463664270493761664d633055447879613169456a72664479346438305641417054754775477a475266766c4774543668673848624d31636c37624e73743549634457655375507138535139396c4c39364c4f6f757a43305535586161364b47676a61713971394459526a78744e547459797a6a57715a3575534364487a4430306d4e4e39483277674c733238726463616d4f5146467957374234727252774b6d6161664b6d67414d5854496444665848684c376c6c776d47477a4b57566d5a3646346e775441446f3659745038646d336b6370494d50676742797a41325630716e794833793237494152496477556e4d6c4751497367346672635364486e6e71504f6e6264575953584462586c6e573947347a567163535333366e3253504d65656b45483841544f6952384d6170724471706c4a307863713653707265624f544a4d5139377562454a334b776e4879746a37704e37694557484d69696d436f484973613443754d484b4f51484833545a364654694a6d31783061665536796c444f7257666964397243444f684d305a324c6b75693953716664354b435963703559354978757a64354a755158394136663744435a674e4c73484a7935737a707739724c783077316631637349757a6d696252576244396a537730593143633348385a775734534b646569684f634f4c35323364734b7179625750364b76344a4a56626c4f727069366f575a386432745375684c464e42643173445a6a50745743696e666a4458325058644d57654c596d326f5763516a7951524a566372354d4d58435877765172596b734c59354476455156746d75504830444a4e47624e31524f4d544b4a6b4d675835305a7a56736758794c475057714e78496452725269484c75424f4d6d793550677277727453597045566e304c5642764c5a6732504c7a4e71584c4c67634979637369554a3446497655795a78583547306b365a4e337477786c7961796b4d787463796971596f516fcb3584235d7ecde5f8b7bc2b8f1e9e2e*46c3b75f6e4cf139e92f683f32107271", "123"}, {"$agilekeychain$1*1000*8*7a697868444e7458*1040*773954704874444d4d523043546b44375135544f74675a754532624a45794848305949436e4e724d336c524c39316247426a7843317131614152736d50724c6474586a4d4d445954786c31376d363155437130777a414d36586c7045555457424a5a436a657541456742417961654472745a73576e4b7a7a344d547043567846526655524b4339573631756f3850465a3878306b7176644c4253787071764c58376e716a50674f526d4a4e4b546e3359575175614b304a3964756f756935675a77544f4e6770654855776f79553465786e41364d6376496b7651624762424d62756746796a6753514c37793069783869683773454c533559365946584f545246616d48495730464e634d42466e51367856797a4368517335674a755972434b545944633270764e54775879563542776675386b6e4462506b743138694a756d63447134745361526a32373167366e787375514e346a73574e77796b4b49376d3677653448754c364b5a41514633626e71786130634458544e484a436551386e7679304b786d73346f774a383268665167596b466e39317a307269714434546d4d6173416e344b6a74455a584846526a6659746742504262495958386336755241386c496633417666696d7a5036425745757461736b684574794a5230436d50466d4b536375764674674562315679766a43453077356e614b476d345849395a726b7037626153496b6a66634f355261795157645941487731516f564c6764516d4e3074394b3839526341626f6b6b38324465497068624553646f4177786e6f68347779523338394f4e6561315271635236374d424d695978304b336b4a6966776e74614f4b43483237434b596a6630774e79394a4b7153714a48616b4b364455596a454b31433767786a72303450706d44666373574c5a61324f335852474b756c456b76483349754e3156654f417342324d6f75346d4b78774e43424863566e344c4c6c6c6d4e446b617550415a6f3337764f55484b4156344d4769336267344f4737794c354c5567636a565a6b7369616730383377744d69513431333032305a4a3747794944714d67396a5651444132424e79507a34726d346c333552757a764b6c543073437562534376714f346a5939784a546f683358517348623378716677313231383261685357743236455a6a6b6674365870554642386436574c374430635177347278736a744a6e463530756365684c7779497557366550356936514e704e4863353863437165397163496146794a726555714c623438543235396371416154326c66375276746e3550727453306b7042335961364239586c3359384b464865564e677636537234414e4d6c55583867456376686e43646e6e776a6f656d7152613453725148503462744b4a334565714f6e624a774a65623258552fff2bf0505a0bc88b9cbc9073a74586*a6f6556c971bd3ad40b52751ba025713", ""}, {"$agilekeychain$1*1000*8*7a65613743636950*1040*524a397449393859696b4a576e437763716a574947544a6d306e32474442343355764a7a6948517a45686d7569636631514745347448424e4e6b32564239656a55596f724671547638736d4e66783949504b6f38746b6f49426d4d6b794c7a6d3077327639365a4b515934357774664a477247366b5539486135495863766845714146317458356b725a6a50376f726e55734b3136533756706a4b42516165656a50336e4558616450794f59506f4771347268454730784555485a4f5a4772526a76354f45417470616258375a386436474b366f7653583257335939516d4f5364446a414b674e467a31374f716d73516b3362795776305a414a314f63324d616a6c6472413939443879414c523733794c47467654734d7a6a4734733461674353357a4456527841486233646d446e797448696837377364784344704831784f6a5975666168626b5534796678576c59584d4b3448704a784a4f675a6d7672636b5a4b567071445a345a376648624b55414b7262694972384531336c7a6875725a6f44627571775361774b66417743336230614e4166564954334a6c3477666b4254374f747565394b32667266566d3263416a656c79416c45724b3035504a4e42307a33303632483466664272705765415a4f3552416a36544e5a54415a5976666a4b53675a68493071394a6563426964544a4f564d304a773976394944444339516e564a78587539366974586c4f6132717937354c554b65384b7638585132596832417a5271314e4b5653766d4d50506d3554463762763961554e45695a51436e79504f6e7146617a755231373574455365305446624c636450424a43526a49384b32365967496a734c324e525574526e36714c533065694f536c6c37795a456945476d4a6e327262646942416c485046616e384e4d7869427571777355714e7638305267537752726245696c734d68664b53793836684b39445a716b47546d4b59747176474c6b6a6d52513368796b367a356449706c64385541614236546e426a6b4f64766d33493972763941765a71776345686b734c594a7254446c796f46444b6d557441305a636b414e437245587a63487a30304c50564e4e73694d634d5a6f4f74414534424f53685879374e62545734487a555054774a7056686f6a7453666a664e696d354548345631374c61396862586659666332304e465a5678656a304b4d59586d586547634d67474c6d31794a4b546473474c755a697579625779503259726d6d5248544f6f704b575046556e3438415a48474168396d787136327230367248774e73493439693049794b3765314b4f74547265556c564b6e6d594a5959355a7476334b546f75375a6a676c755a557a39744b54747745583948314a37366e6c6d5a53345079555856696438336876596141617a394438711ee66b990b013609582733309b01df00*444f4656a5ec58e8a75204fb25fd5ae5", "PASSWORD"}, {NULL} }; static struct custom_salt { unsigned int nkeys; unsigned int iterations[2]; unsigned int saltlen[2]; unsigned char salt[2][SALTLEN]; unsigned int ctlen[2]; unsigned char ct[2][CTLEN]; } *cur_salt; static cl_int cl_error; static keychain_password *inbuffer; static keychain_hash *outbuffer; static keychain_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; size_t insize, outsize, settingsize, cracked_size; static struct fmt_main *self; #define STEP 0 #define SEED 256 // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char * warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(keychain_password) * gws; outsize = sizeof(keychain_hash) * gws; settingsize = sizeof(keychain_salt); cracked_size = sizeof(*cracked) * gws; inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); cracked = mem_calloc(1, cracked_size); /// Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (cracked) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); MEM_FREE(cracked); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d", PLAINTEXT_LENGTH, (int)sizeof(currentsalt.salt), (int)sizeof(outbuffer->v)); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(keychain_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 1000); } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr; int ctlen; int saltlen; char *p; if (strncmp(ciphertext, "$agilekeychain$", 15) != 0) return 0; /* handle 'chopped' .pot lines */ if (ldr_isa_pot_source(ciphertext)) return 1; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += 15; if ((p = strtokm(ctcopy, "*")) == NULL) /* nkeys */ goto err; if (!isdec(p)) goto err; if (atoi(p) > 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* iterations */ goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt length */ goto err; if (!isdec(p)) goto err; saltlen = atoi(p); if(saltlen > SALTLEN) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* salt */ goto err; if(hexlenl(p) != saltlen * 2) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ct length */ goto err; if (!isdec(p)) goto err; ctlen = atoi(p); if (ctlen > CTLEN) goto err; if ((p = strtokm(NULL, "*")) == NULL) /* ciphertext */ goto err; if(hexlenl(p) != ctlen * 2) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; int i; char *p; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += 15; /* skip over "$agilekeychain$" */ p = strtokm(ctcopy, "*"); cs.nkeys = atoi(p); p = strtokm(NULL, "*"); cs.iterations[0] = atoi(p); p = strtokm(NULL, "*"); cs.saltlen[0] = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.saltlen[0]; i++) cs.salt[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; p = strtokm(NULL, "*"); cs.ctlen[0] = atoi(p); p = strtokm(NULL, "*"); for (i = 0; i < cs.ctlen[0]; i++) cs.ct[0][i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->saltlen[0]); currentsalt.length = cur_salt->saltlen[0]; currentsalt.iterations = cur_salt->iterations[0]; currentsalt.outlen = 16; currentsalt.skip_bytes = 0; HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy salt to gpu"); } #undef set_key static void set_key(char *key, int index) { uint8_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint8_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int akcdecrypt(unsigned char *derived_key, unsigned char *data) { unsigned char out[CTLEN]; int n, key_size; AES_KEY akey; unsigned char iv[16]; memcpy(iv, data + CTLEN - 32, 16); if (AES_set_decrypt_key(derived_key, 128, &akey) < 0) fprintf(stderr, "AES_set_decrypt_key failed in crypt!\n"); AES_cbc_encrypt(data + CTLEN - 16, out + CTLEN - 16, 16, &akey, iv, AES_DECRYPT); n = check_pkcs_pad(out, CTLEN, 16); if (n < 0) return -1; key_size = n / 8; if (key_size != 128 && key_size != 192 && key_size != 256) // "invalid key size" return -1; return 0; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); if (any_cracked) { memset(cracked, 0, cracked_size); any_cracked = 0; } /// Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) if (!akcdecrypt((unsigned char*)outbuffer[index].v, cur_salt->ct[0])) { cracked[index] = 1; #ifdef _OPENMP #pragma omp atomic #endif any_cracked |= 1; } return count; } static int cmp_all(void *binary, int count) { return any_cracked; } static int cmp_one(void *binary, int index) { return cracked[index]; } static int cmp_exact(char *source, int index) { return 1; } static unsigned int iteration_count(void *salt) { struct custom_salt *my_salt; my_salt = salt; return (unsigned int) my_salt->iterations[0]; } struct fmt_main fmt_opencl_agilekeychain = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_NOT_EXACT, { "iteration count", }, keychain_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, fmt_default_binary, get_salt, { iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
ParFriends.h
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.6 -------------------------------------------------*/ /* date: 6/15/2017 ---------------------------------------------*/ /* authors: Ariful Azad, Aydin Buluc --------------------------*/ /****************************************************************/ /* Copyright (c) 2010-2017, The Regents of the University of California Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _PAR_FRIENDS_H_ #define _PAR_FRIENDS_H_ #include "mpi.h" #include <iostream> #include <cstdarg> #include "SpParMat.h" #include "SpParHelper.h" #include "MPIType.h" #include "Friends.h" #include "OptBuf.h" #include "mtSpGEMM.h" #include "MultiwayMerge.h" namespace combblas { template <class IT, class NT, class DER> class SpParMat; /*************************************************************************************************/ /**************************** FRIEND FUNCTIONS FOR PARALLEL CLASSES ******************************/ /*************************************************************************************************/ /** ** Concatenate all the FullyDistVec<IT,NT> objects into a single one **/ template <typename IT, typename NT> FullyDistVec<IT,NT> Concatenate ( std::vector< FullyDistVec<IT,NT> > & vecs) { if(vecs.size() < 1) { SpParHelper::Print("Warning: Nothing to concatenate, returning empty "); return FullyDistVec<IT,NT>(); } else if (vecs.size() < 2) { return vecs[1]; } else { typename std::vector< FullyDistVec<IT,NT> >::iterator it = vecs.begin(); std::shared_ptr<CommGrid> commGridPtr = it->getcommgrid(); MPI_Comm World = commGridPtr->GetWorld(); IT nglen = it->TotalLength(); // new global length IT cumloclen = it->MyLocLength(); // existing cumulative local lengths ++it; for(; it != vecs.end(); ++it) { if(*(commGridPtr) != *(it->getcommgrid())) { SpParHelper::Print("Grids are not comparable for FullyDistVec<IT,NT>::EWiseApply\n"); MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); } nglen += it->TotalLength(); cumloclen += it->MyLocLength(); } FullyDistVec<IT,NT> ConCat (commGridPtr, nglen, NT()); int nprocs = commGridPtr->GetSize(); std::vector< std::vector< NT > > data(nprocs); std::vector< std::vector< IT > > inds(nprocs); IT gloffset = 0; for(it = vecs.begin(); it != vecs.end(); ++it) { IT loclen = it->LocArrSize(); for(IT i=0; i < loclen; ++i) { IT locind; IT loffset = it->LengthUntil(); int owner = ConCat.Owner(gloffset+loffset+i, locind); data[owner].push_back(it->arr[i]); inds[owner].push_back(locind); } gloffset += it->TotalLength(); } int * sendcnt = new int[nprocs]; int * sdispls = new int[nprocs]; for(int i=0; i<nprocs; ++i) sendcnt[i] = (int) data[i].size(); int * rdispls = new int[nprocs]; int * recvcnt = new int[nprocs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, World); // share the request counts sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<nprocs-1; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } IT totrecv = std::accumulate(recvcnt,recvcnt+nprocs,static_cast<IT>(0)); NT * senddatabuf = new NT[cumloclen]; for(int i=0; i<nprocs; ++i) { std::copy(data[i].begin(), data[i].end(), senddatabuf+sdispls[i]); std::vector<NT>().swap(data[i]); // delete data vectors } NT * recvdatabuf = new NT[totrecv]; MPI_Alltoallv(senddatabuf, sendcnt, sdispls, MPIType<NT>(), recvdatabuf, recvcnt, rdispls, MPIType<NT>(), World); // send data delete [] senddatabuf; IT * sendindsbuf = new IT[cumloclen]; for(int i=0; i<nprocs; ++i) { std::copy(inds[i].begin(), inds[i].end(), sendindsbuf+sdispls[i]); std::vector<IT>().swap(inds[i]); // delete inds vectors } IT * recvindsbuf = new IT[totrecv]; MPI_Alltoallv(sendindsbuf, sendcnt, sdispls, MPIType<IT>(), recvindsbuf, recvcnt, rdispls, MPIType<IT>(), World); // send new inds DeleteAll(sendindsbuf, sendcnt, sdispls); for(int i=0; i<nprocs; ++i) { for(int j = rdispls[i]; j < rdispls[i] + recvcnt[i]; ++j) { ConCat.arr[recvindsbuf[j]] = recvdatabuf[j]; } } DeleteAll(recvindsbuf, recvcnt, rdispls); return ConCat; } } template <typename MATRIXA, typename MATRIXB> bool CheckSpGEMMCompliance(const MATRIXA & A, const MATRIXB & B) { if(A.getncol() != B.getnrow()) { std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); return false; } if((void*) &A == (void*) &B) { std::ostringstream outs; outs << "Can not multiply, inputs alias (make a temporary copy of one of them first)"<< std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, MATRIXALIAS); return false; } return true; } // Combined logic for prune, recovery, and select template <typename IT, typename NT, typename DER> void MCLPruneRecoverySelect(SpParMat<IT,NT,DER> & A, NT hardThreshold, IT selectNum, IT recoverNum, NT recoverPct, int kselectVersion) { #ifdef TIMING double t0, t1; #endif // Prune and create a new pruned matrix SpParMat<IT,NT,DER> PrunedA = A.Prune(std::bind2nd(std::less_equal<NT>(), hardThreshold), false); // column-wise statistics of the pruned matrix FullyDistVec<IT,NT> colSums = PrunedA.Reduce(Column, std::plus<NT>(), 0.0); FullyDistVec<IT,NT> nnzPerColumn = PrunedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;}); FullyDistVec<IT,NT> pruneCols(A.getcommgrid(), A.getncol(), hardThreshold); PrunedA.FreeMemory(); // Check if we need recovery // columns with nnz < recoverNum (r) FullyDistSpVec<IT,NT> recoverCols(nnzPerColumn, std::bind2nd(std::less<NT>(), recoverNum)); recoverCols = recoverPct; // columns with nnz < r AND sum < recoverPct (pct) recoverCols = EWiseApply<NT>(recoverCols, colSums, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval < spval;}, false, NT()); IT nrecover = recoverCols.getnnz(); if(nrecover > 0) { #ifdef TIMING t0=MPI_Wtime(); #endif A.Kselect(recoverCols, recoverNum, kselectVersion); #ifdef TIMING t1=MPI_Wtime(); mcl_kselecttime += (t1-t0); #endif pruneCols.Set(recoverCols); #ifdef COMBBLAS_DEBUG std::ostringstream outs; outs << "Number of columns needing recovery: " << nrecover << std::endl; SpParHelper::Print(outs.str()); #endif } if(selectNum>0) { // remaining columns will be up for selection FullyDistSpVec<IT,NT> selectCols = EWiseApply<NT>(recoverCols, colSums, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return spval==-1;}, true, static_cast<NT>(-1)); selectCols = selectNum; selectCols = EWiseApply<NT>(selectCols, nnzPerColumn, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval > spval;}, false, NT()); IT nselect = selectCols.getnnz(); if(nselect > 0 ) { #ifdef TIMING t0=MPI_Wtime(); #endif A.Kselect(selectCols, selectNum, kselectVersion); // PrunedA would also work #ifdef TIMING t1=MPI_Wtime(); mcl_kselecttime += (t1-t0); #endif pruneCols.Set(selectCols); #ifdef COMBBLAS_DEBUG std::ostringstream outs; outs << "Number of columns needing selection: " << nselect << std::endl; SpParHelper::Print(outs.str()); #endif #ifdef TIMING t0=MPI_Wtime(); #endif SpParMat<IT,NT,DER> selectedA = A.PruneColumn(pruneCols, std::less<NT>(), false); #ifdef TIMING t1=MPI_Wtime(); mcl_prunecolumntime += (t1-t0); #endif if(recoverNum>0 ) // recovery can be attempted after selection { FullyDistVec<IT,NT> nnzPerColumn1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;}); FullyDistVec<IT,NT> colSums1 = selectedA.Reduce(Column, std::plus<NT>(), 0.0); selectedA.FreeMemory(); // slected columns with nnz < recoverNum (r) selectCols = recoverNum; selectCols = EWiseApply<NT>(selectCols, nnzPerColumn1, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval < spval;}, false, NT()); // selected columns with sum < recoverPct (pct) selectCols = recoverPct; selectCols = EWiseApply<NT>(selectCols, colSums1, [](NT spval, NT dval){return spval;}, [](NT spval, NT dval){return dval < spval;}, false, NT()); IT n_recovery_after_select = selectCols.getnnz(); if(n_recovery_after_select>0) { // mclExpandVector2 does it on the original vector // mclExpandVector1 does it one pruned vector #ifdef TIMING t0=MPI_Wtime(); #endif A.Kselect(selectCols, recoverNum, kselectVersion); // Kselect on PrunedA might give different result #ifdef TIMING t1=MPI_Wtime(); mcl_kselecttime += (t1-t0); #endif pruneCols.Set(selectCols); #ifdef COMBBLAS_DEBUG std::ostringstream outs1; outs1 << "Number of columns needing recovery after selection: " << nselect << std::endl; SpParHelper::Print(outs1.str()); #endif } } } } // final prune #ifdef TIMING t0=MPI_Wtime(); #endif A.PruneColumn(pruneCols, std::less<NT>(), true); #ifdef TIMING t1=MPI_Wtime(); mcl_prunecolumntime += (t1-t0); #endif // Add loops for empty columns if(recoverNum<=0 ) // if recoverNum>0, recovery would have added nonzeros in empty columns { FullyDistVec<IT,NT> nnzPerColumnA = A.Reduce(Column, std::plus<NT>(), 0.0, [](NT val){return 1.0;}); FullyDistSpVec<IT,NT> emptyColumns(nnzPerColumnA, std::bind2nd(std::equal_to<NT>(), 0.0)); emptyColumns = 1.00; //Ariful: We need a selective AddLoops function with a sparse vector //A.AddLoops(emptyColumns); } } /** * Broadcasts A multiple times (#phases) in order to save storage in the output * Only uses 1/phases of C memory if the threshold/max limits are proper */ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,NUO,UDERO> MemEfficientSpGEMM (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, int phases, NUO hardThreshold, IU selectNum, IU recoverNum, NUO recoverPct, int kselectVersion, int64_t perProcessMemory) { int myrank; MPI_Comm_rank(MPI_COMM_WORLD,&myrank); if(A.getncol() != B.getnrow()) { std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); return SpParMat< IU,NUO,UDERO >(); } if(phases <1 || phases >= A.getncol()) { SpParHelper::Print("MemEfficientSpGEMM: The value of phases is too small or large. Resetting to 1.\n"); phases = 1; } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); if(perProcessMemory>0) // estimate the number of phases permitted by memory { int p; MPI_Comm World = GridC->GetWorld(); MPI_Comm_size(World,&p); // max nnz(A) in a porcess int64_t lannz = A.getlocalnnz(); int64_t gannz; MPI_Allreduce(&lannz, &gannz, 1, MPIType<int64_t>(), MPI_MAX, World); int64_t inputMem = gannz * 20 * 4; // for four copies (two for SUMMA) // max nnz(A^2) stored by summa in a porcess int64_t asquareNNZ = EstPerProcessNnzSUMMA(A,B); int64_t asquareMem = asquareNNZ * 24 * 2; // an extra copy in multiway merge and in selection/recovery step // estimate kselect memory int64_t d = ceil( (asquareNNZ * sqrt(p))/ B.getlocalcols() ); // average nnz per column in A^2 (it is an overestimate because asquareNNZ is estimated based on unmerged matrices) // this is equivalent to (asquareNNZ * p) / B.getcol() int64_t k = std::min(std::max(selectNum, recoverNum), d ); int64_t kselectmem = B.getlocalcols() * k * 8 * 3; // estimate output memory int64_t outputNNZ = (B.getlocalcols() * k)/sqrt(p); int64_t outputMem = outputNNZ * 20 * 2; //inputMem + outputMem + asquareMem/phases + kselectmem/phases < memory int64_t remainingMem = perProcessMemory*1000000000 - inputMem - outputMem; if(remainingMem > 0) { phases = 1 + (asquareMem+kselectmem) / remainingMem; } if(myrank==0) { if(remainingMem < 0) { std::cout << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n Warning: input and output memory requirement is greater than per-process avaiable memory. Keeping phase to the value supplied at the command line. The program may go out of memory and crash! \n !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" << std::endl; } #ifdef SHOW_MEMORY_USAGE int64_t maxMemory = kselectmem/phases + inputMem + outputMem + asquareMem / phases; if(maxMemory>1000000000) std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000000.00 << " GB" << " inputMem: " << inputMem/1000000000.00 << " GB" << " outputMem: " << outputMem/1000000000.00 << " GB" << " kselectmem: " << kselectmem/1000000000.00 << " GB" << std::endl; else std::cout << "phases: " << phases << ": per process memory: " << perProcessMemory << " GB asquareMem: " << asquareMem/1000000.00 << " MB" << " inputMem: " << inputMem/1000000.00 << " MB" << " outputMem: " << outputMem/1000000.00 << " MB" << " kselectmem: " << kselectmem/1000000.00 << " MB" << std::endl; #endif } } IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); std::vector< UDERB > PiecesOfB; UDERB CopyB = *(B.spSeq); // we allow alias matrices as input because of this local copy CopyB.ColSplit(phases, PiecesOfB); // CopyB's memory is destroyed at this point MPI_Barrier(GridC->GetWorld()); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; std::vector< UDERO > toconcatenate; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int p = 0; p< phases; ++p) { SpParHelper::GetSetSizes( PiecesOfB[p], BRecvSizes, (B.commGrid)->GetColWorld()); std::vector< SpTuples<IU,NUO> *> tomerge; for(int i = 0; i < stages; ++i) { std::vector<IU> ess; if(i == Aself) ARecv = A.spSeq; // shallow-copy else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row ARecv = new UDERA(); // first, create the object } #ifdef TIMING double t0=MPI_Wtime(); #endif SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements #ifdef TIMING double t1=MPI_Wtime(); mcl_Abcasttime += (t1-t0); #endif ess.clear(); if(i == Bself) BRecv = &(PiecesOfB[p]); // shallow-copy else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) ess[j] = BRecvSizes[j][i]; BRecv = new UDERB(); } #ifdef TIMING double t2=MPI_Wtime(); #endif SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements #ifdef TIMING double t3=MPI_Wtime(); mcl_Bbcasttime += (t3-t2); #endif #ifdef TIMING double t4=MPI_Wtime(); #endif SpTuples<IU,NUO> * C_cont = LocalSpGEMM<SR, NUO>(*ARecv, *BRecv,i != Aself, i != Bself); #ifdef TIMING double t5=MPI_Wtime(); mcl_localspgemmtime += (t5-t4); #endif if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } // all stages executed #ifdef SHOW_MEMORY_USAGE int64_t gcnnz_unmerged, lcnnz_unmerged = 0; for(size_t i = 0; i < tomerge.size(); ++i) { lcnnz_unmerged += tomerge[i]->getnnz(); } MPI_Allreduce(&lcnnz_unmerged, &gcnnz_unmerged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD); int64_t summa_memory = gcnnz_unmerged*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gannz + gannz/phases) * 20; // last two for broadcasts if(myrank==0) { if(summa_memory>1000000000) std::cout << p+1 << ". unmerged: " << summa_memory/1000000000.00 << "GB " ; else std::cout << p+1 << ". unmerged: " << summa_memory/1000000.00 << " MB " ; } #endif #ifdef TIMING double t6=MPI_Wtime(); #endif //UDERO OnePieceOfC(MergeAll<SR>(tomerge, C_m, PiecesOfB[p].getncol(),true), false); // TODO: MultiwayMerge can directly return UDERO inorder to avoid the extra copy SpTuples<IU,NUO> * OnePieceOfC_tuples = MultiwayMerge<SR>(tomerge, C_m, PiecesOfB[p].getncol(),true); #ifdef SHOW_MEMORY_USAGE int64_t gcnnz_merged, lcnnz_merged ; lcnnz_merged = OnePieceOfC_tuples->getnnz(); MPI_Allreduce(&lcnnz_merged, &gcnnz_merged, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD); // TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore int64_t merge_memory = gcnnz_merged*2*20;//(gannz*2 + phase_nnz + gcnnz_unmerged + gcnnz_merged*2) * 20; if(myrank==0) { if(merge_memory>1000000000) std::cout << " merged: " << merge_memory/1000000000.00 << "GB " ; else std::cout << " merged: " << merge_memory/1000000.00 << " MB " ; } #endif #ifdef TIMING double t7=MPI_Wtime(); mcl_multiwaymergetime += (t7-t6); #endif UDERO * OnePieceOfC = new UDERO(* OnePieceOfC_tuples, false); delete OnePieceOfC_tuples; SpParMat<IU,NUO,UDERO> OnePieceOfC_mat(OnePieceOfC, GridC); MCLPruneRecoverySelect(OnePieceOfC_mat, hardThreshold, selectNum, recoverNum, recoverPct, kselectVersion); #ifdef SHOW_MEMORY_USAGE int64_t gcnnz_pruned, lcnnz_pruned ; lcnnz_pruned = OnePieceOfC_mat.getlocalnnz(); MPI_Allreduce(&lcnnz_pruned, &gcnnz_pruned, 1, MPIType<int64_t>(), MPI_MAX, MPI_COMM_WORLD); // TODO: we can remove gcnnz_merged memory here because we don't need to concatenate anymore int64_t prune_memory = gcnnz_pruned*2*20;//(gannz*2 + phase_nnz + gcnnz_pruned*2) * 20 + kselectmem; // 3 extra copies of OnePieceOfC_mat, we can make it one extra copy! //phase_nnz += gcnnz_pruned; if(myrank==0) { if(prune_memory>1000000000) std::cout << "Prune: " << prune_memory/1000000000.00 << "GB " << std::endl ; else std::cout << "Prune: " << prune_memory/1000000.00 << " MB " << std::endl ; } #endif // ABAB: Change this to accept pointers to objects toconcatenate.push_back(OnePieceOfC_mat.seq()); } UDERO * C = new UDERO(0,C_m, C_n,0); C->ColConcatenate(toconcatenate); // ABAB: Change this to accept a vector of pointers to pointers to DER objects SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERA::esscount); return SpParMat<IU,NUO,UDERO> (C, GridC); } /** * Parallel C = A*B routine that uses a double buffered broadcasting scheme * @pre { Input matrices, A and B, should not alias } * Most memory efficient version available. Total stages: 2*sqrt(p) * Memory requirement during first sqrt(p) stages: <= (3/2)*(nnz(A)+nnz(B))+(1/2)*nnz(C) * Memory requirement during second sqrt(p) stages: <= nnz(A)+nnz(B)+nnz(C) * Final memory requirement: nnz(C) if clearA and clearB are true **/ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,NUO,UDERO> Mult_AnXBn_DoubleBuff (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { if(!CheckSpGEMMCompliance(A,B) ) { return SpParMat< IU,NUO,UDERO >(); } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); UDERA * A1seq = new UDERA(); UDERA * A2seq = new UDERA(); UDERB * B1seq = new UDERB(); UDERB * B2seq = new UDERB(); (A.spSeq)->Split( *A1seq, *A2seq); const_cast< UDERB* >(B.spSeq)->Transpose(); (B.spSeq)->Split( *B1seq, *B2seq); MPI_Barrier(GridC->GetWorld()); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *A1seq, ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *B1seq, BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; std::vector< SpTuples<IU,NUO> *> tomerge; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { std::vector<IU> ess; if(i == Aself) { ARecv = A1seq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B1seq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } if(clearA) delete A1seq; if(clearB) delete B1seq; // Set the new dimensions SpParHelper::GetSetSizes( *A2seq, ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *B2seq, BRecvSizes, (B.commGrid)->GetColWorld()); // Start the second round for(int i = 0; i < stages; ++i) { std::vector<IU> ess; if(i == Aself) { ARecv = A2seq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B2seq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); else delete C_cont; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); if(clearA) { delete A2seq; delete A.spSeq; A.spSeq = NULL; } else { (A.spSeq)->Merge(*A1seq, *A2seq); delete A1seq; delete A2seq; } if(clearB) { delete B2seq; delete B.spSeq; B.spSeq = NULL; } else { (B.spSeq)->Merge(*B1seq, *B2seq); delete B1seq; delete B2seq; const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original } UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false); return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object } /** * Parallel A = B*C routine that uses only MPI-1 features * Relies on simple blocking broadcast * @pre { Input matrices, A and B, should not alias } **/ template <typename SR, typename NUO, typename UDERO, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU, NUO, UDERO> Mult_AnXBn_Synch (SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B, bool clearA = false, bool clearB = false ) { if(!CheckSpGEMMCompliance(A,B) ) { return SpParMat< IU,NUO,UDERO >(); } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); IU C_m = A.spSeq->getnrow(); IU C_n = B.spSeq->getncol(); //const_cast< UDERB* >(B.spSeq)->Transpose(); // do not transpose for colum-by-column multiplication MPI_Barrier(GridC->GetWorld()); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; std::vector< SpTuples<IU,NUO> *> tomerge; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { std::vector<IU> ess; if(i == Aself) { ARecv = A.spSeq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B.spSeq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements /* // before activating this transpose B first SpTuples<IU,NUO> * C_cont = MultiplyReturnTuples<SR, NUO> (*ARecv, *BRecv, // parameters themselves false, true, // transpose information (B is transposed) i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition */ SpTuples<IU,NUO> * C_cont = LocalSpGEMM<SR, NUO> (*ARecv, *BRecv, // parameters themselves i != Aself, // 'delete A' condition i != Bself); // 'delete B' condition if(!C_cont->isZero()) tomerge.push_back(C_cont); #ifdef COMBBLAS_DEBUG std::ostringstream outs; outs << i << "th SUMMA iteration"<< std::endl; SpParHelper::Print(outs.str()); #endif } if(clearA && A.spSeq != NULL) { delete A.spSeq; A.spSeq = NULL; } if(clearB && B.spSeq != NULL) { delete B.spSeq; B.spSeq = NULL; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); //UDERO * C = new UDERO(MergeAll<SR>(tomerge, C_m, C_n,true), false); // First get the result in SpTuples, then convert to UDER // the last parameter to MergeAll deletes tomerge arrays SpTuples<IU,NUO> * C_tuples = MultiwayMerge<SR>(tomerge, C_m, C_n,true); UDERO * C = new UDERO(*C_tuples, false); //if(!clearB) // const_cast< UDERB* >(B.spSeq)->Transpose(); // transpose back to original return SpParMat<IU,NUO,UDERO> (C, GridC); // return the result object } /** * Estimate the maximum nnz needed to store in a process from all stages of SUMMA before reduction * @pre { Input matrices, A and B, should not alias } **/ template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> IU EstPerProcessNnzSUMMA(SpParMat<IU,NU1,UDERA> & A, SpParMat<IU,NU2,UDERB> & B) { IU nnzC_SUMMA = 0; if(A.getncol() != B.getnrow()) { std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << B.getnrow() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); return nnzC_SUMMA; } int stages, dummy; // last two parameters of ProductGrid are ignored for Synch multiplication std::shared_ptr<CommGrid> GridC = ProductGrid((A.commGrid).get(), (B.commGrid).get(), stages, dummy, dummy); MPI_Barrier(GridC->GetWorld()); IU ** ARecvSizes = SpHelper::allocate2D<IU>(UDERA::esscount, stages); IU ** BRecvSizes = SpHelper::allocate2D<IU>(UDERB::esscount, stages); SpParHelper::GetSetSizes( *(A.spSeq), ARecvSizes, (A.commGrid)->GetRowWorld()); SpParHelper::GetSetSizes( *(B.spSeq), BRecvSizes, (B.commGrid)->GetColWorld()); // Remotely fetched matrices are stored as pointers UDERA * ARecv; UDERB * BRecv; int Aself = (A.commGrid)->GetRankInProcRow(); int Bself = (B.commGrid)->GetRankInProcCol(); for(int i = 0; i < stages; ++i) { std::vector<IU> ess; if(i == Aself) { ARecv = A.spSeq; // shallow-copy } else { ess.resize(UDERA::esscount); for(int j=0; j< UDERA::esscount; ++j) { ess[j] = ARecvSizes[j][i]; // essentials of the ith matrix in this row } ARecv = new UDERA(); // first, create the object } SpParHelper::BCastMatrix(GridC->GetRowWorld(), *ARecv, ess, i); // then, receive its elements ess.clear(); if(i == Bself) { BRecv = B.spSeq; // shallow-copy } else { ess.resize(UDERB::esscount); for(int j=0; j< UDERB::esscount; ++j) { ess[j] = BRecvSizes[j][i]; } BRecv = new UDERB(); } SpParHelper::BCastMatrix(GridC->GetColWorld(), *BRecv, ess, i); // then, receive its elements IU* colnnzC = estimateNNZ(*ARecv, *BRecv); IU nzc = BRecv->GetDCSC()->nzc; IU nnzC_stage = 0; #ifdef THREADED #pragma omp parallel for reduction (+:nnzC_stage) #endif for (IU k=0; k<nzc; k++) { nnzC_stage = nnzC_stage + colnnzC[k]; } nnzC_SUMMA += nnzC_stage; } SpHelper::deallocate2D(ARecvSizes, UDERA::esscount); SpHelper::deallocate2D(BRecvSizes, UDERB::esscount); IU nnzC_SUMMA_max = 0; MPI_Allreduce(&nnzC_SUMMA, &nnzC_SUMMA_max, 1, MPIType<IU>(), MPI_MAX, GridC->GetWorld()); return nnzC_SUMMA_max; } template <typename MATRIX, typename VECTOR> void CheckSpMVCompliance(const MATRIX & A, const VECTOR & x) { if(A.getncol() != x.TotalLength()) { std::ostringstream outs; outs << "Can not multiply, dimensions does not match"<< std::endl; outs << A.getncol() << " != " << x.TotalLength() << std::endl; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } if(! ( *(A.getcommgrid()) == *(x.getcommgrid())) ) { std::cout << "Grids are not comparable for SpMV" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); } } template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf); template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue) { typedef typename promote_trait<NUM,IU>::T_promote T_promote; OptBuf<int32_t, T_promote > optbuf = OptBuf<int32_t, T_promote >(); return SpMV<SR>(A, x, indexisvalue, optbuf); } /** * Step 1 of the sparse SpMV algorithm * @param[in,out] trxlocnz, lenuntil,trxinds,trxnums { set or allocated } * @param[in] indexisvalue **/ template<typename IU, typename NV> void TransposeVector(MPI_Comm & World, const FullyDistSpVec<IU,NV> & x, int32_t & trxlocnz, IU & lenuntil, int32_t * & trxinds, NV * & trxnums, bool indexisvalue) { int32_t xlocnz = (int32_t) x.getlocnnz(); int32_t roffst = (int32_t) x.RowLenUntil(); // since trxinds is int32_t int32_t roffset; IU luntil = x.LengthUntil(); int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&roffst, 1, MPIType<int32_t>(), diagneigh, TROST, &roffset, 1, MPIType<int32_t>(), diagneigh, TROST, World, &status); MPI_Sendrecv(&xlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, &trxlocnz, 1, MPIType<int32_t>(), diagneigh, TRNNZ, World, &status); MPI_Sendrecv(&luntil, 1, MPIType<IU>(), diagneigh, TRLUT, &lenuntil, 1, MPIType<IU>(), diagneigh, TRLUT, World, &status); // ABAB: Important observation is that local indices (given by x.ind) is 32-bit addressible // Copy them to 32 bit integers and transfer that to save 50% of off-node bandwidth trxinds = new int32_t[trxlocnz]; int32_t * temp_xind = new int32_t[xlocnz]; #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i< xlocnz; ++i) temp_xind[i] = (int32_t) x.ind[i]; MPI_Sendrecv(temp_xind, xlocnz, MPIType<int32_t>(), diagneigh, TRI, trxinds, trxlocnz, MPIType<int32_t>(), diagneigh, TRI, World, &status); delete [] temp_xind; if(!indexisvalue) { trxnums = new NV[trxlocnz]; MPI_Sendrecv(const_cast<NV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NV>(), diagneigh, TRX, World, &status); } std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<int32_t>(), roffset)); // fullydist indexing (p pieces) -> matrix indexing (sqrt(p) pieces) } /** * Step 2 of the sparse SpMV algorithm * @param[in,out] trxinds, trxnums { deallocated } * @param[in,out] indacc, numacc { allocated } * @param[in,out] accnz { set } * @param[in] trxlocnz, lenuntil, indexisvalue **/ template<typename IU, typename NV> void AllGatherVector(MPI_Comm & ColWorld, int trxlocnz, IU lenuntil, int32_t * & trxinds, NV * & trxnums, int32_t * & indacc, NV * & numacc, int & accnz, bool indexisvalue) { int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colnz = new int[colneighs]; colnz[colrank] = trxlocnz; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); accnz = std::accumulate(colnz, colnz+colneighs, 0); indacc = new int32_t[accnz]; numacc = new NV[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? // This will happen when n/sqrt(p) > 2^31 // Currently we can solve a small problem (scale 32) with 4096 processor // For a medium problem (scale 35), we'll need 32K processors which gives sqrt(p) ~ 180 // 2^35 / 180 ~ 2^29 / 3 which is not an issue ! #ifdef TIMING double t0=MPI_Wtime(); #endif MPI_Allgatherv(trxinds, trxlocnz, MPIType<int32_t>(), indacc, colnz, dpls, MPIType<int32_t>(), ColWorld); delete [] trxinds; if(indexisvalue) { IU lenuntilcol; if(colrank == 0) lenuntilcol = lenuntil; MPI_Bcast(&lenuntilcol, 1, MPIType<IU>(), 0, ColWorld); for(int i=0; i< accnz; ++i) // fill numerical values from indices { numacc[i] = indacc[i] + lenuntilcol; } } else { MPI_Allgatherv(trxnums, trxlocnz, MPIType<NV>(), numacc, colnz, dpls, MPIType<NV>(), ColWorld); delete [] trxnums; } #ifdef TIMING double t1=MPI_Wtime(); cblas_allgathertime += (t1-t0); #endif DeleteAll(colnz,dpls); } /** * Step 3 of the sparse SpMV algorithm, with the semiring * @param[in,out] optbuf {scratch space for all-to-all (fold) communication} * @param[in,out] indacc, numacc {index and values of the input vector, deleted upon exit} * @param[in,out] sendindbuf, sendnumbuf {index and values of the output vector, created} **/ template<typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void LocalSpMV(const SpParMat<IU,NUM,UDER> & A, int rowneighs, OptBuf<int32_t, OVT > & optbuf, int32_t * & indacc, IVT * & numacc, int32_t * & sendindbuf, OVT * & sendnumbuf, int * & sdispls, int * sendcnt, int accnz, bool indexisvalue, PreAllocatedSPA<OVT> & SPA) { if(optbuf.totmax > 0) // graph500 optimization enabled { if(A.spSeq->getnsplit() > 0) { // optbuf.{inds/nums/dspls} and sendcnt are all pre-allocated and only filled by dcsc_gespmv_threaded generic_gespmv_threaded_setbuffers<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs); } else { generic_gespmv<SR> (*(A.spSeq), indacc, numacc, accnz, optbuf.inds, optbuf.nums, sendcnt, optbuf.dspls, rowneighs, indexisvalue); } DeleteAll(indacc,numacc); } else { if(A.spSeq->getnsplit() > 0) { // sendindbuf/sendnumbuf/sdispls are all allocated and filled by dcsc_gespmv_threaded int totalsent = generic_gespmv_threaded<SR> (*(A.spSeq), indacc, numacc, accnz, sendindbuf, sendnumbuf, sdispls, rowneighs, SPA); DeleteAll(indacc, numacc); for(int i=0; i<rowneighs-1; ++i) sendcnt[i] = sdispls[i+1] - sdispls[i]; sendcnt[rowneighs-1] = totalsent - sdispls[rowneighs-1]; } else { // default SpMSpV std::vector< int32_t > indy; std::vector< OVT > numy; generic_gespmv<SR>(*(A.spSeq), indacc, numacc, accnz, indy, numy, SPA); DeleteAll(indacc, numacc); int32_t bufsize = indy.size(); // as compact as possible sendindbuf = new int32_t[bufsize]; sendnumbuf = new OVT[bufsize]; int32_t perproc = A.getlocalrows() / rowneighs; int k = 0; // index to buffer for(int i=0; i<rowneighs; ++i) { int32_t end_this = (i==rowneighs-1) ? A.getlocalrows(): (i+1)*perproc; while(k < bufsize && indy[k] < end_this) { sendindbuf[k] = indy[k] - i*perproc; sendnumbuf[k] = numy[k]; ++sendcnt[i]; ++k; } } sdispls = new int[rowneighs](); std::partial_sum(sendcnt, sendcnt+rowneighs-1, sdispls+1); //#endif } } } // non threaded template <typename SR, typename IU, typename OVT> void MergeContributions(int* listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU>& mergedind, std::vector<OVT>& mergednum) { int nlists = indsvec.size(); // this condition is checked in the caller SpMV function. // I am still putting it here for completeness if(nlists == 1) { // simply copy data int veclen = listSizes[0]; mergedind.resize(veclen); mergednum.resize(veclen); for(int i=0; i<veclen; i++) { mergedind[i] = indsvec[0][i]; mergednum[i] = numsvec[0][i]; } return; } int32_t hsize = 0; int32_t inf = std::numeric_limits<int32_t>::min(); int32_t sup = std::numeric_limits<int32_t>::max(); KNHeap< int32_t, int32_t > sHeap(sup, inf); int * processed = new int[nlists](); for(int i=0; i<nlists; ++i) { if(listSizes[i] > 0) { // key, list_id sHeap.insert(indsvec[i][0], i); ++hsize; } } int32_t key, locv; if(hsize > 0) { sHeap.deleteMin(&key, &locv); mergedind.push_back( static_cast<IU>(key)); mergednum.push_back(numsvec[locv][0]); // nothing is processed yet if( (++(processed[locv])) < listSizes[locv] ) sHeap.insert(indsvec[locv][processed[locv]], locv); else --hsize; } while(hsize > 0) { sHeap.deleteMin(&key, &locv); if(mergedind.back() == static_cast<IU>(key)) { mergednum.back() = SR::add(mergednum.back(), numsvec[locv][processed[locv]]); // ABAB: Benchmark actually allows us to be non-deterministic in terms of parent selection // We can just skip this addition operator (if it's a max/min select) } else { mergedind.push_back(static_cast<IU>(key)); mergednum.push_back(numsvec[locv][processed[locv]]); } if( (++(processed[locv])) < listSizes[locv] ) sHeap.insert(indsvec[locv][processed[locv]], locv); else --hsize; } DeleteAll(processed); } template <typename SR, typename IU, typename OVT> void MergeContributions_threaded(int * & listSizes, std::vector<int32_t *> & indsvec, std::vector<OVT *> & numsvec, std::vector<IU> & mergedind, std::vector<OVT> & mergednum, IU maxindex) { int nlists = indsvec.size(); // this condition is checked in the caller SpMV function. // I am still putting it here for completeness if(nlists == 1) { // simply copy data int veclen = listSizes[0]; mergedind.resize(veclen); mergednum.resize(veclen); #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<veclen; i++) { mergedind[i] = indsvec[0][i]; mergednum[i] = numsvec[0][i]; } return; } int nthreads=1; #ifdef THREADED #pragma omp parallel { nthreads = omp_get_num_threads(); } #endif int nsplits = 4*nthreads; // oversplit for load balance nsplits = std::min(nsplits, (int)maxindex); std::vector< std::vector<int32_t> > splitters(nlists); for(int k=0; k< nlists; k++) { splitters[k].resize(nsplits+1); splitters[k][0] = static_cast<int32_t>(0); #pragma omp parallel for for(int i=1; i< nsplits; i++) { IU cur_idx = i * (maxindex/nsplits); auto it = std::lower_bound (indsvec[k], indsvec[k] + listSizes[k], cur_idx); splitters[k][i] = (int32_t) (it - indsvec[k]); } splitters[k][nsplits] = listSizes[k]; } // ------ perform merge in parallel ------ std::vector<std::vector<IU>> indsBuf(nsplits); std::vector<std::vector<OVT>> numsBuf(nsplits); //TODO: allocate these vectors here before calling MergeContributions #pragma omp parallel for schedule(dynamic) for(int i=0; i< nsplits; i++) { std::vector<int32_t *> tIndsVec(nlists); std::vector<OVT *> tNumsVec(nlists); std::vector<int> tLengths(nlists); for(int j=0; j< nlists; ++j) { tIndsVec[j] = indsvec[j] + splitters[j][i]; tNumsVec[j] = numsvec[j] + splitters[j][i]; tLengths[j]= splitters[j][i+1] - splitters[j][i]; } MergeContributions<SR>(tLengths.data(), tIndsVec, tNumsVec, indsBuf[i], numsBuf[i]); } // ------ concatenate merged tuples processed by threads ------ std::vector<IU> tdisp(nsplits+1); tdisp[0] = 0; for(int i=0; i<nsplits; ++i) { tdisp[i+1] = tdisp[i] + indsBuf[i].size(); } mergedind.resize(tdisp[nsplits]); mergednum.resize(tdisp[nsplits]); #pragma omp parallel for schedule(dynamic) for(int i=0; i< nsplits; i++) { std::copy(indsBuf[i].data() , indsBuf[i].data() + indsBuf[i].size(), mergedind.data() + tdisp[i]); std::copy(numsBuf[i].data() , numsBuf[i].data() + numsBuf[i].size(), mergednum.data() + tdisp[i]); } } /** * This version is the most flexible sparse matrix X sparse vector [Used in KDT] * It accepts different types for the matrix (NUM), the input vector (IVT) and the output vector (OVT) * without relying on automatic type promotion * Input (x) and output (y) vectors can be ALIASED because y is not written until the algorithm is done with x. */ template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, OptBuf<int32_t, OVT > & optbuf, PreAllocatedSPA<OVT> & SPA) { CheckSpMVCompliance(A,x); optbuf.MarkEmpty(); y.glen = A.getnrow(); // in case it is not set already MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int accnz; int32_t trxlocnz; IU lenuntil; int32_t *trxinds, *indacc; IVT *trxnums, *numacc; #ifdef TIMING double t0=MPI_Wtime(); #endif TransposeVector(World, x, trxlocnz, lenuntil, trxinds, trxnums, indexisvalue); #ifdef TIMING double t1=MPI_Wtime(); cblas_transvectime += (t1-t0); #endif if(x.commGrid->GetGridRows() > 1) { AllGatherVector(ColWorld, trxlocnz, lenuntil, trxinds, trxnums, indacc, numacc, accnz, indexisvalue); // trxindS/trxnums deallocated, indacc/numacc allocated, accnz set } else { accnz = trxlocnz; indacc = trxinds; // aliasing ptr numacc = trxnums; // aliasing ptr } int rowneighs; MPI_Comm_size(RowWorld, &rowneighs); int * sendcnt = new int[rowneighs](); int32_t * sendindbuf; OVT * sendnumbuf; int * sdispls; #ifdef TIMING double t2=MPI_Wtime(); #endif LocalSpMV<SR>(A, rowneighs, optbuf, indacc, numacc, sendindbuf, sendnumbuf, sdispls, sendcnt, accnz, indexisvalue, SPA); // indacc/numacc deallocated, sendindbuf/sendnumbuf/sdispls allocated #ifdef TIMING double t3=MPI_Wtime(); cblas_localspmvtime += (t3-t2); #endif if(x.commGrid->GetGridCols() == 1) { y.ind.resize(sendcnt[0]); y.num.resize(sendcnt[0]); if(optbuf.totmax > 0 ) // graph500 optimization enabled { #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<sendcnt[0]; i++) { y.ind[i] = optbuf.inds[i]; y.num[i] = optbuf.nums[i]; } } else { #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<sendcnt[0]; i++) { y.ind[i] = sendindbuf[i]; y.num[i] = sendnumbuf[i]; } DeleteAll(sendindbuf, sendnumbuf,sdispls); } delete [] sendcnt; return; } int * rdispls = new int[rowneighs]; int * recvcnt = new int[rowneighs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts // receive displacements are exact whereas send displacements have slack rdispls[0] = 0; for(int i=0; i<rowneighs-1; ++i) { rdispls[i+1] = rdispls[i] + recvcnt[i]; } int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0); int32_t * recvindbuf = new int32_t[totrecv]; OVT * recvnumbuf = new OVT[totrecv]; #ifdef TIMING double t4=MPI_Wtime(); #endif if(optbuf.totmax > 0 ) // graph500 optimization enabled { MPI_Alltoallv(optbuf.inds, sendcnt, optbuf.dspls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld); MPI_Alltoallv(optbuf.nums, sendcnt, optbuf.dspls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld); delete [] sendcnt; } else { MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<int32_t>(), recvindbuf, recvcnt, rdispls, MPIType<int32_t>(), RowWorld); MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<OVT>(), recvnumbuf, recvcnt, rdispls, MPIType<OVT>(), RowWorld); DeleteAll(sendindbuf, sendnumbuf, sendcnt, sdispls); } #ifdef TIMING double t5=MPI_Wtime(); cblas_alltoalltime += (t5-t4); #endif #ifdef TIMING double t6=MPI_Wtime(); #endif //MergeContributions<SR>(y,recvcnt, rdispls, recvindbuf, recvnumbuf, rowneighs); // free memory of y, in case it was aliased std::vector<IU>().swap(y.ind); std::vector<OVT>().swap(y.num); std::vector<int32_t *> indsvec(rowneighs); std::vector<OVT *> numsvec(rowneighs); #ifdef THREADED #pragma omp parallel for #endif for(int i=0; i<rowneighs; i++) { indsvec[i] = recvindbuf+rdispls[i]; numsvec[i] = recvnumbuf+rdispls[i]; } #ifdef THREADED MergeContributions_threaded<SR>(recvcnt, indsvec, numsvec, y.ind, y.num, y.MyLocLength()); #else MergeContributions<SR>(recvcnt, indsvec, numsvec, y.ind, y.num); #endif DeleteAll(recvcnt, rdispls,recvindbuf, recvnumbuf); #ifdef TIMING double t7=MPI_Wtime(); cblas_mergeconttime += (t7-t6); #endif } template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, PreAllocatedSPA<OVT> & SPA) { OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >(); SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA); } template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue) { OptBuf< int32_t, OVT > optbuf = OptBuf< int32_t,OVT >(); PreAllocatedSPA<OVT> SPA; SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA); } template <typename SR, typename IVT, typename OVT, typename IU, typename NUM, typename UDER> void SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IVT> & x, FullyDistSpVec<IU,OVT> & y, bool indexisvalue, OptBuf<int32_t, OVT > & optbuf) { PreAllocatedSPA<OVT> SPA; SpMV<SR>(A, x, y, indexisvalue, optbuf, SPA); } /** * Automatic type promotion is ONLY done here, all the callee functions (in Friends.h and below) are initialized with the promoted type * If indexisvalues = true, then we do not need to transfer values for x (happens for BFS iterations with boolean matrices and integer rhs vectors) **/ template <typename SR, typename IU, typename NUM, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,IU>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,IU> & x, bool indexisvalue, OptBuf<int32_t, typename promote_trait<NUM,IU>::T_promote > & optbuf) { typedef typename promote_trait<NUM,IU>::T_promote T_promote; FullyDistSpVec<IU, T_promote> y ( x.getcommgrid(), A.getnrow()); // identity doesn't matter for sparse vectors SpMV<SR>(A, x, y, indexisvalue, optbuf); return y; } /** * Parallel dense SpMV **/ template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> FullyDistVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistVec<IU,NUV> & x ) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; CheckSpMVCompliance(A, x); MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int xsize = (int) x.LocArrSize(); int trxsize = 0; int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&xsize, 1, MPI_INT, diagneigh, TRX, &trxsize, 1, MPI_INT, diagneigh, TRX, World, &status); NUV * trxnums = new NUV[trxsize]; MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.arr)), xsize, MPIType<NUV>(), diagneigh, TRX, trxnums, trxsize, MPIType<NUV>(), diagneigh, TRX, World, &status); int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colsize = new int[colneighs]; colsize[colrank] = trxsize; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colsize, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colsize, colsize+colneighs-1, dpls+1); int accsize = std::accumulate(colsize, colsize+colneighs, 0); NUV * numacc = new NUV[accsize]; MPI_Allgatherv(trxnums, trxsize, MPIType<NUV>(), numacc, colsize, dpls, MPIType<NUV>(), ColWorld); delete [] trxnums; // serial SpMV with dense vector T_promote id = SR::id(); IU ysize = A.getlocalrows(); T_promote * localy = new T_promote[ysize]; std::fill_n(localy, ysize, id); #ifdef THREADED dcsc_gespmv_threaded<SR>(*(A.spSeq), numacc, localy); #else dcsc_gespmv<SR>(*(A.spSeq), numacc, localy); #endif DeleteAll(numacc,colsize, dpls); // FullyDistVec<IT,NT>(shared_ptr<CommGrid> grid, IT globallen, NT initval, NT id) FullyDistVec<IU, T_promote> y ( x.commGrid, A.getnrow(), id); int rowneighs; MPI_Comm_size(RowWorld, &rowneighs); IU begptr, endptr; for(int i=0; i< rowneighs; ++i) { begptr = y.RowLenUntil(i); if(i == rowneighs-1) { endptr = ysize; } else { endptr = y.RowLenUntil(i+1); } MPI_Reduce(localy+begptr, SpHelper::p2a(y.arr), endptr-begptr, MPIType<T_promote>(), SR::mpi_op(), i, RowWorld); } delete [] localy; return y; } /** * \TODO: Old version that is no longer considered optimal * Kept for legacy purposes * To be removed when other functionals are fully tested. **/ template <typename SR, typename IU, typename NUM, typename NUV, typename UDER> FullyDistSpVec<IU,typename promote_trait<NUM,NUV>::T_promote> SpMV (const SpParMat<IU,NUM,UDER> & A, const FullyDistSpVec<IU,NUV> & x) { typedef typename promote_trait<NUM,NUV>::T_promote T_promote; CheckSpMVCompliance(A, x); MPI_Comm World = x.commGrid->GetWorld(); MPI_Comm ColWorld = x.commGrid->GetColWorld(); MPI_Comm RowWorld = x.commGrid->GetRowWorld(); int xlocnz = (int) x.getlocnnz(); int trxlocnz = 0; int roffst = x.RowLenUntil(); int offset; int diagneigh = x.commGrid->GetComplementRank(); MPI_Status status; MPI_Sendrecv(&xlocnz, 1, MPI_INT, diagneigh, TRX, &trxlocnz, 1, MPI_INT, diagneigh, TRX, World, &status); MPI_Sendrecv(&roffst, 1, MPI_INT, diagneigh, TROST, &offset, 1, MPI_INT, diagneigh, TROST, World, &status); IU * trxinds = new IU[trxlocnz]; NUV * trxnums = new NUV[trxlocnz]; MPI_Sendrecv(const_cast<IU*>(SpHelper::p2a(x.ind)), xlocnz, MPIType<IU>(), diagneigh, TRX, trxinds, trxlocnz, MPIType<IU>(), diagneigh, TRX, World, &status); MPI_Sendrecv(const_cast<NUV*>(SpHelper::p2a(x.num)), xlocnz, MPIType<NUV>(), diagneigh, TRX, trxnums, trxlocnz, MPIType<NUV>(), diagneigh, TRX, World, &status); std::transform(trxinds, trxinds+trxlocnz, trxinds, std::bind2nd(std::plus<IU>(), offset)); // fullydist indexing (n pieces) -> matrix indexing (sqrt(p) pieces) int colneighs, colrank; MPI_Comm_size(ColWorld, &colneighs); MPI_Comm_rank(ColWorld, &colrank); int * colnz = new int[colneighs]; colnz[colrank] = trxlocnz; MPI_Allgather(MPI_IN_PLACE, 1, MPI_INT, colnz, 1, MPI_INT, ColWorld); int * dpls = new int[colneighs](); // displacements (zero initialized pid) std::partial_sum(colnz, colnz+colneighs-1, dpls+1); int accnz = std::accumulate(colnz, colnz+colneighs, 0); IU * indacc = new IU[accnz]; NUV * numacc = new NUV[accnz]; // ABAB: Future issues here, colnz is of type int (MPI limitation) // What if the aggregate vector size along the processor row/column is not 32-bit addressible? MPI_Allgatherv(trxinds, trxlocnz, MPIType<IU>(), indacc, colnz, dpls, MPIType<IU>(), ColWorld); MPI_Allgatherv(trxnums, trxlocnz, MPIType<NUV>(), numacc, colnz, dpls, MPIType<NUV>(), ColWorld); DeleteAll(trxinds, trxnums); // serial SpMV with sparse vector std::vector< int32_t > indy; std::vector< T_promote > numy; int32_t * tmpindacc = new int32_t[accnz]; for(int i=0; i< accnz; ++i) tmpindacc[i] = indacc[i]; delete [] indacc; dcsc_gespmv<SR>(*(A.spSeq), tmpindacc, numacc, accnz, indy, numy); // actual multiplication DeleteAll(tmpindacc, numacc); DeleteAll(colnz, dpls); FullyDistSpVec<IU, T_promote> y ( x.commGrid, A.getnrow()); // identity doesn't matter for sparse vectors IU yintlen = y.MyRowLength(); int rowneighs; MPI_Comm_size(RowWorld,&rowneighs); std::vector< std::vector<IU> > sendind(rowneighs); std::vector< std::vector<T_promote> > sendnum(rowneighs); typename std::vector<int32_t>::size_type outnz = indy.size(); for(typename std::vector<IU>::size_type i=0; i< outnz; ++i) { IU locind; int rown = y.OwnerWithinRow(yintlen, static_cast<IU>(indy[i]), locind); sendind[rown].push_back(locind); sendnum[rown].push_back(numy[i]); } IU * sendindbuf = new IU[outnz]; T_promote * sendnumbuf = new T_promote[outnz]; int * sendcnt = new int[rowneighs]; int * sdispls = new int[rowneighs]; for(int i=0; i<rowneighs; ++i) sendcnt[i] = sendind[i].size(); int * rdispls = new int[rowneighs]; int * recvcnt = new int[rowneighs]; MPI_Alltoall(sendcnt, 1, MPI_INT, recvcnt, 1, MPI_INT, RowWorld); // share the request counts sdispls[0] = 0; rdispls[0] = 0; for(int i=0; i<rowneighs-1; ++i) { sdispls[i+1] = sdispls[i] + sendcnt[i]; rdispls[i+1] = rdispls[i] + recvcnt[i]; } int totrecv = std::accumulate(recvcnt,recvcnt+rowneighs,0); IU * recvindbuf = new IU[totrecv]; T_promote * recvnumbuf = new T_promote[totrecv]; for(int i=0; i<rowneighs; ++i) { std::copy(sendind[i].begin(), sendind[i].end(), sendindbuf+sdispls[i]); std::vector<IU>().swap(sendind[i]); } for(int i=0; i<rowneighs; ++i) { std::copy(sendnum[i].begin(), sendnum[i].end(), sendnumbuf+sdispls[i]); std::vector<T_promote>().swap(sendnum[i]); } MPI_Alltoallv(sendindbuf, sendcnt, sdispls, MPIType<IU>(), recvindbuf, recvcnt, rdispls, MPIType<IU>(), RowWorld); MPI_Alltoallv(sendnumbuf, sendcnt, sdispls, MPIType<T_promote>(), recvnumbuf, recvcnt, rdispls, MPIType<T_promote>(), RowWorld); DeleteAll(sendindbuf, sendnumbuf); DeleteAll(sendcnt, recvcnt, sdispls, rdispls); // define a SPA-like data structure IU ysize = y.MyLocLength(); T_promote * localy = new T_promote[ysize]; bool * isthere = new bool[ysize]; std::vector<IU> nzinds; // nonzero indices std::fill_n(isthere, ysize, false); for(int i=0; i< totrecv; ++i) { if(!isthere[recvindbuf[i]]) { localy[recvindbuf[i]] = recvnumbuf[i]; // initial assignment nzinds.push_back(recvindbuf[i]); isthere[recvindbuf[i]] = true; } else { localy[recvindbuf[i]] = SR::add(localy[recvindbuf[i]], recvnumbuf[i]); } } DeleteAll(isthere, recvindbuf, recvnumbuf); sort(nzinds.begin(), nzinds.end()); int nnzy = nzinds.size(); y.ind.resize(nnzy); y.num.resize(nnzy); for(int i=0; i< nnzy; ++i) { y.ind[i] = nzinds[i]; y.num[i] = localy[nzinds[i]]; } delete [] localy; return y; } template <typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB> SpParMat<IU,typename promote_trait<NU1,NU2>::T_promote,typename promote_trait<UDERA,UDERB>::T_promote> EWiseMult (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B , bool exclude) { typedef typename promote_trait<NU1,NU2>::T_promote N_promote; typedef typename promote_trait<UDERA,UDERB>::T_promote DER_promote; if(*(A.commGrid) == *(B.commGrid)) { DER_promote * result = new DER_promote( EWiseMult(*(A.spSeq),*(B.spSeq),exclude) ); return SpParMat<IU, N_promote, DER_promote> (result, A.commGrid); } else { std::cout << "Grids are not comparable elementwise multiplication" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,N_promote,DER_promote >(); } } template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, bool notB, const NU2& defaultBVal) { if(*(A.commGrid) == *(B.commGrid)) { RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, notB, defaultBVal) ); return SpParMat<IU, RETT, RETDER> (result, A.commGrid); } else { std::cout << "Grids are not comparable elementwise apply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,RETT,RETDER >(); } } template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect, const bool useExtendedBinOp) { if(*(A.commGrid) == *(B.commGrid)) { RETDER * result = new RETDER( EWiseApply<RETT>(*(A.spSeq),*(B.spSeq), __binary_op, do_op, allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect) ); return SpParMat<IU, RETT, RETDER> (result, A.commGrid); } else { std::cout << "Grids are not comparable elementwise apply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return SpParMat< IU,RETT,RETDER >(); } } // plain adapter template <typename RETT, typename RETDER, typename IU, typename NU1, typename NU2, typename UDERA, typename UDERB, typename _BinaryOperation, typename _BinaryPredicate> SpParMat<IU,RETT,RETDER> EWiseApply (const SpParMat<IU,NU1,UDERA> & A, const SpParMat<IU,NU2,UDERB> & B, _BinaryOperation __binary_op, _BinaryPredicate do_op, bool allowANulls, bool allowBNulls, const NU1& ANullVal, const NU2& BNullVal, const bool allowIntersect = true) { return EWiseApply<RETT, RETDER>(A, B, EWiseExtToPlainAdapter<RETT, NU1, NU2, _BinaryOperation>(__binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(do_op), allowANulls, allowBNulls, ANullVal, BNullVal, allowIntersect, true); } // end adapter /** * if exclude is true, then we prune all entries W[i] != zero from V * if exclude is false, then we perform a proper elementwise multiplication **/ template <typename IU, typename NU1, typename NU2> FullyDistSpVec<IU,typename promote_trait<NU1,NU2>::T_promote> EWiseMult (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , bool exclude, NU2 zero) { typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.glen != W.glen) { std::cerr << "Vector dimensions don't match for EWiseMult\n"; MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; IU size= V.getlocnnz(); if(exclude) { #if defined(_OPENMP) && defined(CBLAS_EXPERIMENTAL) // not faster than serial int actual_splits = cblas_splits * 1; // 1 is the parallel slackness std::vector <IU> tlosizes (actual_splits, 0); std::vector < std::vector<IU> > tlinds(actual_splits); std::vector < std::vector<T_promote> > tlnums(actual_splits); IU tlsize = size / actual_splits; #pragma omp parallel for //schedule(dynamic, 1) for(IU t = 0; t < actual_splits; ++t) { IU tlbegin = t*tlsize; IU tlend = (t==actual_splits-1)? size : (t+1)*tlsize; for(IU i=tlbegin; i<tlend; ++i) { if(W.arr[V.ind[i]] == zero) // keep only those { tlinds[t].push_back(V.ind[i]); tlnums[t].push_back(V.num[i]); tlosizes[t]++; } } } std::vector<IU> prefix_sum(actual_splits+1,0); std::partial_sum(tlosizes.begin(), tlosizes.end(), prefix_sum.begin()+1); Product.ind.resize(prefix_sum[actual_splits]); Product.num.resize(prefix_sum[actual_splits]); #pragma omp parallel for //schedule(dynamic, 1) for(IU t=0; t< actual_splits; ++t) { std::copy(tlinds[t].begin(), tlinds[t].end(), Product.ind.begin()+prefix_sum[t]); std::copy(tlnums[t].begin(), tlnums[t].end(), Product.num.begin()+prefix_sum[t]); } #else for(IU i=0; i<size; ++i) { if(W.arr[V.ind[i]] == zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i]); } } #endif } else { for(IU i=0; i<size; ++i) { if(W.arr[V.ind[i]] != zero) // keep only those { Product.ind.push_back(V.ind[i]); Product.num.push_back(V.num[i] * W.arr[V.ind[i]]); } } } } return Product; } else { std::cout << "Grids are not comparable elementwise multiplication" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } /** Threaded EWiseApply. Only called internally from EWiseApply. **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply_threaded (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp) { typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.TotalLength() != W.TotalLength()) { std::ostringstream outs; outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { int nthreads=1; #ifdef _OPENMP #pragma omp parallel { nthreads = omp_get_num_threads(); } #endif Product.glen = V.glen; IU size= W.LocArrSize(); IU spsize = V.getlocnnz(); // temporary result vectors per thread std::vector<std::vector<IU>> tProductInd(nthreads); std::vector<std::vector<T_promote>> tProductVal(nthreads); IU perthread; //chunk of tProductInd or tProductVal allocated to each thread if (allowVNulls) perthread = size/nthreads; else perthread = spsize/nthreads; #ifdef _OPENMP #pragma omp parallel #endif { int curthread = 0; #ifdef _OPENMP curthread = omp_get_thread_num(); #endif IU tStartIdx = perthread * curthread; IU tNextIdx = perthread * (curthread+1); if (allowVNulls) { if(curthread == nthreads-1) tNextIdx = size; // get sparse part for the current thread auto it = std::lower_bound (V.ind.begin(), V.ind.end(), tStartIdx); IU tSpIdx = (IU) std::distance(V.ind.begin(), it); // iterate over the dense vector for(IU tIdx=tStartIdx; tIdx < tNextIdx; ++tIdx) { if(tSpIdx < spsize && V.ind[tSpIdx] < tNextIdx && V.ind[tSpIdx] == tIdx) { if (_doOp(V.num[tSpIdx], W.arr[tIdx], false, false)) { tProductInd[curthread].push_back(tIdx); tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[tIdx], false, false)); } tSpIdx++; } else { if (_doOp(Vzero, W.arr[tIdx], true, false)) { tProductInd[curthread].push_back(tIdx); tProductVal[curthread].push_back (_binary_op(Vzero, W.arr[tIdx], true, false)); } } } } else // iterate over the sparse vector { if(curthread == nthreads-1) tNextIdx = spsize; for(IU tSpIdx=tStartIdx; tSpIdx < tNextIdx; ++tSpIdx) { if (_doOp(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false)) { tProductInd[curthread].push_back( V.ind[tSpIdx]); tProductVal[curthread].push_back (_binary_op(V.num[tSpIdx], W.arr[V.ind[tSpIdx]], false, false)); } } } } std::vector<IU> tdisp(nthreads+1); tdisp[0] = 0; for(int i=0; i<nthreads; ++i) { tdisp[i+1] = tdisp[i] + tProductInd[i].size(); } // copy results from temporary vectors Product.ind.resize(tdisp[nthreads]); Product.num.resize(tdisp[nthreads]); #ifdef _OPENMP #pragma omp parallel #endif { int curthread = 0; #ifdef _OPENMP curthread = omp_get_thread_num(); #endif std::copy(tProductInd[curthread].begin(), tProductInd[curthread].end(), Product.ind.data() + tdisp[curthread]); std::copy(tProductVal[curthread].begin() , tProductVal[curthread].end(), Product.num.data() + tdisp[curthread]); } } return Product; } else { std::cout << "Grids are not comparable for EWiseApply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } /** * Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret. * The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not * performed and ret does not contain an element at that position. * More formally the operation is defined as: * if (_doOp(V[i], W[i])) * ret[i] = _binary_op(V[i], W[i]) * else * // ret[i] is not set * Hence _doOp can be used to implement a filter on either of the vectors. * * The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does) * the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value. * * The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter: * FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, retTrue, false, 0) **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero, const bool useExtendedBinOp) { #ifdef _OPENMP return EWiseApply_threaded<RET>(V, W, _binary_op, _doOp, allowVNulls, Vzero, useExtendedBinOp); #else typedef RET T_promote; //typedef typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); //FullyDistVec< IU, NU1> DV (V); // Ariful: I am not sure why it was there?? if(V.TotalLength() != W.TotalLength()) { std::ostringstream outs; outs << "Vector dimensions don't match (" << V.TotalLength() << " vs " << W.TotalLength() << ") for EWiseApply (short version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; IU size= W.LocArrSize(); IU spsize = V.getlocnnz(); IU sp_iter = 0; if (allowVNulls) { // iterate over the dense vector for(IU i=0; i<size; ++i) { if(sp_iter < spsize && V.ind[sp_iter] == i) { if (_doOp(V.num[sp_iter], W.arr[i], false, false)) { Product.ind.push_back(i); Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[i], false, false)); } sp_iter++; } else { if (_doOp(Vzero, W.arr[i], true, false)) { Product.ind.push_back(i); Product.num.push_back(_binary_op(Vzero, W.arr[i], true, false)); } } } } else { // iterate over the sparse vector for(sp_iter = 0; sp_iter < spsize; ++sp_iter) { if (_doOp(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false)) { Product.ind.push_back(V.ind[sp_iter]); Product.num.push_back(_binary_op(V.num[sp_iter], W.arr[V.ind[sp_iter]], false, false)); } } } } return Product; } else { std::cout << "Grids are not comparable for EWiseApply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } #endif } /** * Performs an arbitrary binary operation _binary_op on the corresponding elements of two vectors with the result stored in a return vector ret. * The binary operatiation is only performed if the binary predicate _doOp returns true for those elements. Otherwise the binary operation is not * performed and ret does not contain an element at that position. * More formally the operation is defined as: * if (_doOp(V[i], W[i])) * ret[i] = _binary_op(V[i], W[i]) * else * // ret[i] is not set * Hence _doOp can be used to implement a filter on either of the vectors. * * The above is only defined if both V[i] and W[i] exist (i.e. an intersection). To allow a union operation (ex. when V[i] doesn't exist but W[i] does) * the allowVNulls flag is set to true and the Vzero argument is used as the missing V[i] value. * !allowVNulls && !allowWNulls => intersection * !allowVNulls && allowWNulls => operate on all elements of V * allowVNulls && !allowWNulls => operate on all elements of W * allowVNulls && allowWNulls => union * * The type of each element of ret must not necessarily be related to the types of V or W, so the return type must be explicitly specified as a template parameter: * FullyDistSpVec<int, double> r = EWiseApply<double>(V, W, plus, ...) * For intersection, Vzero and Wzero are irrelevant * ABAB: \todo: Should allowIntersect be "false" for all SetDifference uses? **/ template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect, const bool useExtendedBinOp) { typedef RET T_promote; // typename promote_trait<NU1,NU2>::T_promote T_promote; if(*(V.commGrid) == *(W.commGrid)) { FullyDistSpVec< IU, T_promote> Product(V.commGrid); if(V.glen != W.glen) { std::ostringstream outs; outs << "Vector dimensions don't match (" << V.glen << " vs " << W.glen << ") for EWiseApply (full version)\n"; SpParHelper::Print(outs.str()); MPI_Abort(MPI_COMM_WORLD, DIMMISMATCH); } else { Product.glen = V.glen; typename std::vector< IU >::const_iterator indV = V.ind.begin(); typename std::vector< NU1 >::const_iterator numV = V.num.begin(); typename std::vector< IU >::const_iterator indW = W.ind.begin(); typename std::vector< NU2 >::const_iterator numW = W.num.begin(); while (indV < V.ind.end() && indW < W.ind.end()) { if (*indV == *indW) { // overlap if (allowIntersect) { if (_doOp(*numV, *numW, false, false)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, *numW, false, false)); } } indV++; numV++; indW++; numW++; } else if (*indV < *indW) { // V has value but W does not if (allowWNulls) { if (_doOp(*numV, Wzero, false, true)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, Wzero, false, true)); } } indV++; numV++; } else //(*indV > *indW) { // W has value but V does not if (allowVNulls) { if (_doOp(Vzero, *numW, true, false)) { Product.ind.push_back(*indW); Product.num.push_back(_binary_op(Vzero, *numW, true, false)); } } indW++; numW++; } } // clean up while (allowWNulls && indV < V.ind.end()) { if (_doOp(*numV, Wzero, false, true)) { Product.ind.push_back(*indV); Product.num.push_back(_binary_op(*numV, Wzero, false, true)); } indV++; numV++; } while (allowVNulls && indW < W.ind.end()) { if (_doOp(Vzero, *numW, true, false)) { Product.ind.push_back(*indW); Product.num.push_back(_binary_op(Vzero, *numW, true, false)); } indW++; numW++; } } return Product; } else { std::cout << "Grids are not comparable for EWiseApply" << std::endl; MPI_Abort(MPI_COMM_WORLD, GRIDMISMATCH); return FullyDistSpVec< IU,T_promote>(); } } // plain callback versions template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, NU1 Vzero) { return EWiseApply<RET>(V, W, EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp), allowVNulls, Vzero, true); } template <typename RET, typename IU, typename NU1, typename NU2, typename _BinaryOperation, typename _BinaryPredicate> FullyDistSpVec<IU,RET> EWiseApply (const FullyDistSpVec<IU,NU1> & V, const FullyDistSpVec<IU,NU2> & W , _BinaryOperation _binary_op, _BinaryPredicate _doOp, bool allowVNulls, bool allowWNulls, NU1 Vzero, NU2 Wzero, const bool allowIntersect = true) { return EWiseApply<RET>(V, W, EWiseExtToPlainAdapter<RET, NU1, NU2, _BinaryOperation>(_binary_op), EWiseExtToPlainAdapter<bool, NU1, NU2, _BinaryPredicate>(_doOp), allowVNulls, allowWNulls, Vzero, Wzero, allowIntersect, true); } } #endif
TaskDispatcher.h
#include "nvtt.h" // OpenMP // http://en.wikipedia.org/wiki/OpenMP #if defined(HAVE_OPENMP) #include <omp.h> #endif // Gran Central Dispatch (GCD/libdispatch) // http://developer.apple.com/mac/library/documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html #if NV_OS_DARWIN && defined(HAVE_DISPATCH_H) #define HAVE_GCD 1 #include <dispatch/dispatch.h> #endif // Parallel Patterns Library (PPL) is part of Microsoft's concurrency runtime: // http://msdn.microsoft.com/en-us/library/dd504870.aspx #if NV_OS_WIN32 && _MSC_VER >= 1600 #define HAVE_PPL 1 #include <ppl.h> #endif // Intel Thread Building Blocks (TBB). // http://www.threadingbuildingblocks.org/ #if defined(HAVE_TBB) #include <tbb/parallel_for.h> #endif #include "nvthread/ParallelFor.h" namespace nvtt { struct SequentialTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { for (int i = 0; i < count; i++) { task(context, i); } } }; struct ParallelTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { nv::ParallelFor parallelFor(task, context); parallelFor.run(count); // @@ Add support for custom grain. } }; #if defined(HAVE_OPENMP) struct OpenMPTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { #pragma omp parallel for for (int i = 0; i < count; i++) { task(context, i); } } }; #endif #if NV_OS_DARWIN && defined(HAVE_DISPATCH_H) // Task dispatcher using Apple's Grand Central Dispatch. struct AppleTaskDispatcher : public TaskDispatcher { // @@ This is really lame, but I refuse to use size_t in the public API. struct BlockContext { Task * task; void * context; }; static void block(void * context, size_t id) { BlockContext * ctx = (BlockContext *)context; ctx->task(ctx->context, int(id)); } virtual void dispatch(Task * task, void * context, int count) { dispatch_queue_t q = dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0); BlockContext blockCtx = { task, context }; dispatch_apply_f(count, q, &blockCtx, block); } }; #endif #if defined(HAVE_PPL) struct TaskFunctor { TaskFunctor(Task * task, void * context) : task(task), context(context) {} void operator()(int n) const { task(context, n); } Task * task; void * context; }; // Task dispatcher using Microsoft's concurrency runtime. struct MicrosoftTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { TaskFunctor func(task, context); Concurrency::parallel_for(0, count, func); } }; #endif #if defined(HAVE_TBB) struct TaskFunctor { TaskFunctor(Task * task, void * context) : task(task), context(context) {} void operator()(int & n) const { task(context, n); } Task * task; void * context; }; // Task dispatcher using Inte's Thread Building Blocks. struct IntelTaskDispatcher : public TaskDispatcher { virtual void dispatch(Task * task, void * context, int count) { parallel_for(blocked_range<int>(0, count, 1), TaskFunctor(task, context)); } }; #endif #if defined(HAVE_OPENMP) typedef OpenMPTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_TBB) typedef IntelTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_PPL) typedef MicrosoftTaskDispatcher ConcurrentTaskDispatcher; #elif defined(HAVE_GCD) typedef AppleTaskDispatcher ConcurrentTaskDispatcher; #else //typedef SequentialTaskDispatcher ConcurrentTaskDispatcher; typedef ParallelTaskDispatcher ConcurrentTaskDispatcher; #endif } // namespace nvtt
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 4; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
blas_server_omp.c
/*********************************************************************/ /* Copyright 2009, 2010 The University of Texas at Austin. */ /* All rights reserved. */ /* */ /* Redistribution and use in source and binary forms, with or */ /* without modification, are permitted provided that the following */ /* conditions are met: */ /* */ /* 1. Redistributions of source code must retain the above */ /* copyright notice, this list of conditions and the following */ /* disclaimer. */ /* */ /* 2. Redistributions in binary form must reproduce the above */ /* copyright notice, this list of conditions and the following */ /* disclaimer in the documentation and/or other materials */ /* provided with the distribution. */ /* */ /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ /* */ /* The views and conclusions contained in the software and */ /* documentation are those of the authors and should not be */ /* interpreted as representing official policies, either expressed */ /* or implied, of The University of Texas at Austin. */ /*********************************************************************/ #include <stdbool.h> #include <stdio.h> #include <stdlib.h> //#include <sys/mman.h> #include "common.h" #ifndef USE_OPENMP #include "blas_server.c" #else #ifndef likely #ifdef __GNUC__ #define likely(x) __builtin_expect(!!(x), 1) #else #define likely(x) (x) #endif #endif #ifndef unlikely #ifdef __GNUC__ #define unlikely(x) __builtin_expect(!!(x), 0) #else #define unlikely(x) (x) #endif #endif #ifndef OMP_SCHED #define OMP_SCHED static #endif int blas_server_avail = 0; static void * blas_thread_buffer[MAX_PARALLEL_NUMBER][MAX_CPU_NUMBER]; #ifdef HAVE_C11 static atomic_bool blas_buffer_inuse[MAX_PARALLEL_NUMBER]; #else static _Bool blas_buffer_inuse[MAX_PARALLEL_NUMBER]; #endif static void adjust_thread_buffers() { int i=0, j=0; //adjust buffer for each thread for(i=0; i < MAX_PARALLEL_NUMBER; i++) { for(j=0; j < blas_cpu_number; j++){ if(blas_thread_buffer[i][j] == NULL){ blas_thread_buffer[i][j] = blas_memory_alloc(2); } } for(; j < MAX_CPU_NUMBER; j++){ if(blas_thread_buffer[i][j] != NULL){ blas_memory_free(blas_thread_buffer[i][j]); blas_thread_buffer[i][j] = NULL; } } } } void goto_set_num_threads(int num_threads) { if (num_threads < 1) num_threads = blas_num_threads; if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER; if (num_threads > blas_num_threads) { blas_num_threads = num_threads; } blas_cpu_number = num_threads; omp_set_num_threads(blas_cpu_number); adjust_thread_buffers(); #if defined(ARCH_MIPS64) //set parameters for different number of threads. blas_set_parameter(); #endif } void openblas_set_num_threads(int num_threads) { goto_set_num_threads(num_threads); } int blas_thread_init(void){ blas_get_cpu_number(); adjust_thread_buffers(); blas_server_avail = 1; return 0; } int BLASFUNC(blas_thread_shutdown)(void){ int i=0, j=0; blas_server_avail = 0; for(i=0; i<MAX_PARALLEL_NUMBER; i++) { for(j=0; j<MAX_CPU_NUMBER; j++){ if(blas_thread_buffer[i][j]!=NULL){ blas_memory_free(blas_thread_buffer[i][j]); blas_thread_buffer[i][j]=NULL; } } } return 0; } static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ if (!(mode & BLAS_COMPLEX)){ #ifdef EXPRECISION if ((mode & BLAS_PREC) == BLAS_XDOUBLE){ /* REAL / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((xdouble *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else #endif if ((mode & BLAS_PREC) == BLAS_DOUBLE){ /* REAL / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((double *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else if ((mode & BLAS_PREC) == BLAS_SINGLE){ /* REAL / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((float *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); #ifdef BUILD_BFLOAT16 } else if ((mode & BLAS_PREC) == BLAS_BFLOAT16){ /* REAL / BFLOAT16 */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, bfloat16, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, bfloat16 *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((bfloat16 *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else if ((mode & BLAS_PREC) == BLAS_STOBF16){ /* REAL / BLAS_STOBF16 */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, bfloat16 *, BLASLONG, float *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((float *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else if ((mode & BLAS_PREC) == BLAS_DTOBF16){ /* REAL / BLAS_DTOBF16 */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, bfloat16 *, BLASLONG, double *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((double *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); #endif } else { /* REAL / Other types in future */ } } else { #ifdef EXPRECISION if ((mode & BLAS_PREC) == BLAS_XDOUBLE){ /* COMPLEX / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((xdouble *)args -> alpha)[0], ((xdouble *)args -> alpha)[1], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else #endif if ((mode & BLAS_PREC) == BLAS_DOUBLE){ /* COMPLEX / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((double *)args -> alpha)[0], ((double *)args -> alpha)[1], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else if ((mode & BLAS_PREC) == BLAS_SINGLE){ /* COMPLEX / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((float *)args -> alpha)[0], ((float *)args -> alpha)[1], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else { /* COMPLEX / Other types in future */ } } } static void exec_threads(blas_queue_t *queue, int buf_index){ void *buffer, *sa, *sb; int pos=0, release_flag=0; buffer = NULL; sa = queue -> sa; sb = queue -> sb; #ifdef CONSISTENT_FPCSR __asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode)); __asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode)); #endif if ((sa == NULL) && (sb == NULL) && ((queue -> mode & BLAS_PTHREAD) == 0)) { pos = omp_get_thread_num(); buffer = blas_thread_buffer[buf_index][pos]; //fallback if(buffer==NULL) { buffer = blas_memory_alloc(2); release_flag=1; } if (sa == NULL) { sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A); queue->sa=sa; } if (sb == NULL) { if (!(queue -> mode & BLAS_COMPLEX)){ #ifdef EXPRECISION if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){ #if defined ( BUILD_DOUBLE) || defined (BUILD_COMPLEX16) sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); #endif } else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE){ #if defined (BUILD_SINGLE) || defined (BUILD_COMPLEX) sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); #endif } else { /* Other types in future */ } } else { #ifdef EXPRECISION if ((queue -> mode & BLAS_PREC) == BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif if ((queue -> mode & BLAS_PREC) == BLAS_DOUBLE){ #ifdef BUILD_COMPLEX16 sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); #else fprintf(stderr,"UNHANDLED COMPLEX16\n"); #endif } else if ((queue -> mode & BLAS_PREC) == BLAS_SINGLE) { #ifdef BUILD_COMPLEX sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); #else fprintf(stderr,"UNHANDLED COMPLEX\n"); #endif } else { /* Other types in future */ } } queue->sb=sb; } } if (queue -> mode & BLAS_LEGACY) { legacy_exec(queue -> routine, queue -> mode, queue -> args, sb); } else if (queue -> mode & BLAS_PTHREAD) { void (*pthreadcompat)(void *) = queue -> routine; (pthreadcompat)(queue -> args); } else { int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine; (routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position); } if (release_flag) blas_memory_free(buffer); } int exec_blas(BLASLONG num, blas_queue_t *queue){ // Handle lazy re-init of the thread-pool after a POSIX fork if (unlikely(blas_server_avail == 0)) blas_thread_init(); BLASLONG i, buf_index; if ((num <= 0) || (queue == NULL)) return 0; #ifdef CONSISTENT_FPCSR for (i = 0; i < num; i ++) { __asm__ __volatile__ ("fnstcw %0" : "=m" (queue[i].x87_mode)); __asm__ __volatile__ ("stmxcsr %0" : "=m" (queue[i].sse_mode)); } #endif while(true) { for(i=0; i < MAX_PARALLEL_NUMBER; i++) { #ifdef HAVE_C11 _Bool inuse = false; if(atomic_compare_exchange_weak(&blas_buffer_inuse[i], &inuse, true)) { #else if(blas_buffer_inuse[i] == false) { blas_buffer_inuse[i] = true; #endif buf_index = i; break; } } if(i != MAX_PARALLEL_NUMBER) break; } #pragma omp parallel for schedule(OMP_SCHED) for (i = 0; i < num; i ++) { #ifndef USE_SIMPLE_THREADED_LEVEL3 queue[i].position = i; #endif exec_threads(&queue[i], buf_index); } #ifdef HAVE_C11 atomic_store(&blas_buffer_inuse[buf_index], false); #else blas_buffer_inuse[buf_index] = false; #endif return 0; } #endif
iw_core.c
/* // Copyright 2016-2018 Intel Corporation All Rights Reserved. // // The source code, information and material ("Material") contained herein is // owned by Intel Corporation or its suppliers or licensors, and title // to such Material remains with Intel Corporation or its suppliers or // licensors. The Material contains proprietary information of Intel // or its suppliers and licensors. The Material is protected by worldwide // copyright laws and treaty provisions. No part of the Material may be used, // copied, reproduced, modified, published, uploaded, posted, transmitted, // distributed or disclosed in any way without Intel's prior express written // permission. No license under any patent, copyright or other intellectual // property rights in the Material is granted to or conferred upon you, // either expressly, by implication, inducement, estoppel or otherwise. // Any license under such intellectual property rights must be express and // approved by Intel in writing. // // Unless otherwise agreed by Intel in writing, // you may not remove or alter this notice or any other notice embedded in // Materials by Intel or Intel's suppliers or licensors in any way. // */ #include "iw_own.h" #include "iw/iw_image.h" #if defined _WIN32 #include <malloc.h> #include <intrin.h> #else #ifdef _OPENMP #if (defined __GNUC__) && !(defined __clang__) #define GCC_VERSION (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__) #if (GCC_VERSION >= 40700) #define OWN_ALLOW_OMP_ATOMICS #endif #undef GCC_VERSION #else #define OWN_ALLOW_OMP_ATOMICS #endif #endif #ifdef OWN_ALLOW_OMP_ATOMICS #include <omp.h> // Use OMP atomics #else #if (defined __clang__ && defined __has_include) #if !__has_include(<stdatomic.h>) #ifndef __STDC_NO_ATOMICS__ #define __STDC_NO_ATOMICS__ #endif #endif #elif (defined __GNUC__) #define GCC_VERSION (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__) #if (GCC_VERSION < 40900) #ifndef __STDC_NO_ATOMICS__ #define __STDC_NO_ATOMICS__ #endif #endif #undef GCC_VERSION #endif #if !defined __STDC_NO_ATOMICS__ #include <stdatomic.h> #ifndef __ATOMIC_ACQ_REL #define __ATOMIC_ACQ_REL 4 #endif #else #pragma message("Atomic operations are not supported by this compiler. Some features my not be thread-safe.") #endif #endif #ifndef __APPLE__ #include <malloc.h> #endif #endif /* ///////////////////////////////////////////////////////////////////////////// // IW DLL entry points ///////////////////////////////////////////////////////////////////////////// */ #ifdef IW_BUILD_DLL #if defined _WIN32 #include <Windows.h> int WINAPI DllMain( HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved ) { switch( fdwReason ) { case DLL_PROCESS_ATTACH: break; case DLL_THREAD_ATTACH: break; case DLL_THREAD_DETACH: break; case DLL_PROCESS_DETACH: break; default: break; } return 1; UNREFERENCED_PARAMETER(hinstDLL); UNREFERENCED_PARAMETER(lpvReserved); } #elif defined __unix__ int _init(void) { return 1; } void _fini(void) { } #elif defined __APPLE__ __attribute__((constructor)) void initializer( void ) { static int initialized = 0; if(!initialized) { initialized = 1; } return; } __attribute__((destructor)) void destructor() { } #endif #endif /* ///////////////////////////////////////////////////////////////////////////// // Base IW definitions ///////////////////////////////////////////////////////////////////////////// */ IW_DECL(int) iwTypeToSize(IppDataType dataType) { switch(dataType) { case ipp8u: case ipp8s: return 1; case ipp8uc: case ipp8sc: case ipp16u: case ipp16s: return 2; case ipp16uc: case ipp16sc: case ipp32u: case ipp32s: case ipp32f: return 4; case ipp32uc: case ipp32sc: case ipp32fc: case ipp64u: case ipp64s: case ipp64f: return 8; case ipp64uc: case ipp64sc: case ipp64fc: return 16; default: return 0; } } IW_DECL(double) iwTypeGetMin(IppDataType type) { switch(type) { case ipp8u: return IPP_MIN_8U; case ipp8s: return IPP_MIN_8S; case ipp16u: return IPP_MIN_16U; case ipp16s: return IPP_MIN_16S; case ipp32u: return IPP_MIN_32U; case ipp32s: return IPP_MIN_32S; case ipp32f: return -IPP_MAXABS_32F; case ipp64f: return -IPP_MAXABS_64F; default: return 0; } } IW_DECL(double) iwTypeGetMax(IppDataType type) { switch(type) { case ipp8u: return IPP_MAX_8U; case ipp8s: return IPP_MAX_8S; case ipp16u: return IPP_MAX_16U; case ipp16s: return IPP_MAX_16S; case ipp32u: return IPP_MAX_32U; case ipp32s: return IPP_MAX_32S; case ipp32f: return IPP_MAXABS_32F; case ipp64f: return IPP_MAXABS_64F; default: return 0; } } IW_DECL(double) iwTypeGetRange(IppDataType type) { switch(type) { case ipp8u: return ((double)IPP_MAX_8U - IPP_MIN_8U); case ipp8s: return ((double)IPP_MAX_8S - IPP_MIN_8S); case ipp16u: return ((double)IPP_MAX_16U - IPP_MIN_16U); case ipp16s: return ((double)IPP_MAX_16S - IPP_MIN_16S); case ipp32u: return ((double)IPP_MAX_32U - IPP_MIN_32U); case ipp32s: return ((double)IPP_MAX_32S - IPP_MIN_32S); default: return 0; } } IW_DECL(int) iwTypeIsFloat(IppDataType type) { return (type == ipp64f || type == ipp64fc || type == ipp32f || type == ipp32fc)?1:0; } IW_DECL(int) iwTypeIsSigned(IppDataType type) { return (type == ipp64f || type == ipp64fc || type == ipp64s || type == ipp64sc || type == ipp32f || type == ipp32fc || type == ipp32s || type == ipp32sc || type == ipp16s || type == ipp16sc || type == ipp8s || type == ipp8sc)?1:0; } IW_DECL(double) iwValueSaturate(double val, IppDataType dstType) { switch(dstType) { case ipp8u: return (double)ownCast_64f8u(val); case ipp8s: return (double)ownCast_64f8s(val); case ipp16u: return (double)ownCast_64f16u(val); case ipp16s: return (double)ownCast_64f16s(val); case ipp32u: return (double)ownCast_64f32u(val); case ipp32s: return (double)ownCast_64f32s(val); default: return val; } } IW_DECL(double) iwValueRelToAbs(double val, IppDataType type) { if(iwTypeIsFloat(type)) return val; else { double min = iwTypeGetMin(type); double max = iwTypeGetMax(type); return (max - min)*val + min; } } IW_DECL(double) iwValueAbsToRel(double val, IppDataType type) { if(iwTypeIsFloat(type)) return val; else { double min = iwTypeGetMin(type); double max = iwTypeGetMax(type); return (val - min)/(max - min); } } IW_DECL(double) iwRangeWeightCorrector(IppDataType type) { if(iwTypeIsSigned(type) && !iwTypeIsFloat(type)) { double min = iwTypeGetMin(type); double max = iwTypeGetMax(type); double range = iwTypeGetRange(type); if(range) return (-min-max)/range; else return 0; } return 0; } /* ///////////////////////////////////////////////////////////////////////////// // IwAtomic - Atomic operations layer ///////////////////////////////////////////////////////////////////////////// */ IW_DECL(int) iwAtomic_AddInt(int *pInt, int delta) { #if defined _WIN32 return _InterlockedExchangeAdd((long volatile*)pInt, delta); #else #ifdef OWN_ALLOW_OMP_ATOMICS int ret; #pragma omp atomic capture { ret = *pInt; *pInt += delta; } return ret; #else #if defined __APPLE__ && !defined __STDC_NO_ATOMICS__ return __atomic_fetch_add(pInt, delta, __ATOMIC_ACQ_REL); #elif defined __GNUC__ && !defined __STDC_NO_ATOMICS__ return __atomic_fetch_add(pInt, delta, __ATOMIC_ACQ_REL); #else int ret = *pInt; *pInt += delta; return ret; #endif #endif #endif } /* ///////////////////////////////////////////////////////////////////////////// // IW version info ///////////////////////////////////////////////////////////////////////////// */ IW_DECL(void) iwGetLibVersion(IwVersion *pVersion) { if(!pVersion) return; pVersion->m_major = IW_VERSION_MAJOR; pVersion->m_minor = IW_VERSION_MINOR; pVersion->m_update = IW_VERSION_UPDATE; pVersion->m_versionStr = IW_VERSION_STR; pVersion->m_pIppVersion = ippiGetLibVersion(); #ifdef IW_PREBUILT pVersion->m_bUserBuild = 0; #else pVersion->m_bUserBuild = 1; #endif } /* ///////////////////////////////////////////////////////////////////////////// // IW status ///////////////////////////////////////////////////////////////////////////// */ IW_DECL(const char*) iwGetStatusString(IppStatus status) { #ifdef ICV_BASE (void)status; return "Status messages are not supported"; #else if(status <= iwStsErr) return ippGetStatusString(status); else if(status >= iwStsWrn) return ippGetStatusString(status); else return ippGetStatusString(status); #endif }
tree-pretty-print.c
/* Pretty formatting of GENERIC trees in C syntax. Copyright (C) 2001-2018 Free Software Foundation, Inc. Adapted from c-pretty-print.c by Diego Novillo <dnovillo@redhat.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "backend.h" #include "rtl.h" #include "tree.h" #include "predict.h" #include "cgraph.h" #include "tree-pretty-print.h" #include "stor-layout.h" #include "langhooks.h" #include "tree-iterator.h" #include "dumpfile.h" #include "internal-fn.h" #include "gomp-constants.h" #include "gimple.h" /* Local functions, macros and variables. */ static const char *op_symbol (const_tree); static void pretty_print_string (pretty_printer *, const char*); static void newline_and_indent (pretty_printer *, int); static void maybe_init_pretty_print (FILE *); static void print_struct_decl (pretty_printer *, const_tree, int, dump_flags_t); static void do_niy (pretty_printer *, const_tree, dump_flags_t); #define INDENT(SPACE) do { \ int i; for (i = 0; i<SPACE; i++) pp_space (pp); } while (0) #define NIY do_niy (pp, node, flags) static pretty_printer *tree_pp; /* Try to print something for an unknown tree code. */ static void do_niy (pretty_printer *pp, const_tree node, dump_flags_t flags) { int i, len; pp_string (pp, "<<< Unknown tree: "); pp_string (pp, get_tree_code_name (TREE_CODE (node))); if (EXPR_P (node)) { len = TREE_OPERAND_LENGTH (node); for (i = 0; i < len; ++i) { newline_and_indent (pp, 2); dump_generic_node (pp, TREE_OPERAND (node, i), 2, flags, false); } } pp_string (pp, " >>>"); } /* Debugging function to print out a generic expression. */ DEBUG_FUNCTION void debug_generic_expr (tree t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Debugging function to print out a generic statement. */ DEBUG_FUNCTION void debug_generic_stmt (tree t) { print_generic_stmt (stderr, t, TDF_VOPS|TDF_MEMSYMS); fprintf (stderr, "\n"); } /* Debugging function to print out a chain of trees . */ DEBUG_FUNCTION void debug_tree_chain (tree t) { hash_set<tree> seen; while (t) { print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID); fprintf (stderr, " "); t = TREE_CHAIN (t); if (seen.add (t)) { fprintf (stderr, "... [cycled back to "); print_generic_expr (stderr, t, TDF_VOPS|TDF_MEMSYMS|TDF_UID); fprintf (stderr, "]"); break; } } fprintf (stderr, "\n"); } /* Prints declaration DECL to the FILE with details specified by FLAGS. */ void print_generic_decl (FILE *file, tree decl, dump_flags_t flags) { maybe_init_pretty_print (file); print_declaration (tree_pp, decl, 2, flags); pp_write_text_to_stream (tree_pp); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in dumpfile.h. */ void print_generic_stmt (FILE *file, tree t, dump_flags_t flags) { maybe_init_pretty_print (file); dump_generic_node (tree_pp, t, 0, flags, true); pp_newline_and_flush (tree_pp); } /* Print tree T, and its successors, on file FILE. FLAGS specifies details to show in the dump. See TDF_* in dumpfile.h. The output is indented by INDENT spaces. */ void print_generic_stmt_indented (FILE *file, tree t, dump_flags_t flags, int indent) { int i; maybe_init_pretty_print (file); for (i = 0; i < indent; i++) pp_space (tree_pp); dump_generic_node (tree_pp, t, indent, flags, true); pp_newline_and_flush (tree_pp); } /* Print a single expression T on file FILE. FLAGS specifies details to show in the dump. See TDF_* in dumpfile.h. */ void print_generic_expr (FILE *file, tree t, dump_flags_t flags) { maybe_init_pretty_print (file); dump_generic_node (tree_pp, t, 0, flags, false); pp_flush (tree_pp); } /* Dump NAME, an IDENTIFIER_POINTER, sanitized so that D<num> sequences in it are replaced with Dxxxx, as long as they are at the start or preceded by $ and at the end or followed by $. See make_fancy_name in tree-sra.c. */ static void dump_fancy_name (pretty_printer *pp, tree name) { int cnt = 0; int length = IDENTIFIER_LENGTH (name); const char *n = IDENTIFIER_POINTER (name); do { n = strchr (n, 'D'); if (n == NULL) break; if (ISDIGIT (n[1]) && (n == IDENTIFIER_POINTER (name) || n[-1] == '$')) { int l = 2; while (ISDIGIT (n[l])) l++; if (n[l] == '\0' || n[l] == '$') { cnt++; length += 5 - l; } n += l; } else n++; } while (1); if (cnt == 0) { pp_tree_identifier (pp, name); return; } char *str = XNEWVEC (char, length + 1); char *p = str; const char *q; q = n = IDENTIFIER_POINTER (name); do { q = strchr (q, 'D'); if (q == NULL) break; if (ISDIGIT (q[1]) && (q == IDENTIFIER_POINTER (name) || q[-1] == '$')) { int l = 2; while (ISDIGIT (q[l])) l++; if (q[l] == '\0' || q[l] == '$') { memcpy (p, n, q - n); memcpy (p + (q - n), "Dxxxx", 5); p += (q - n) + 5; n = q + l; } q += l; } else q++; } while (1); memcpy (p, n, IDENTIFIER_LENGTH (name) - (n - IDENTIFIER_POINTER (name))); str[length] = '\0'; if (pp_translate_identifiers (pp)) { const char *text = identifier_to_locale (str); pp_append_text (pp, text, text + strlen (text)); } else pp_append_text (pp, str, str + length); XDELETEVEC (str); } /* Dump the name of a _DECL node and its DECL_UID if TDF_UID is set in FLAGS. */ static void dump_decl_name (pretty_printer *pp, tree node, dump_flags_t flags) { tree name = DECL_NAME (node); if (name) { if ((flags & TDF_ASMNAME) && HAS_DECL_ASSEMBLER_NAME_P (node) && DECL_ASSEMBLER_NAME_SET_P (node)) pp_tree_identifier (pp, DECL_ASSEMBLER_NAME_RAW (node)); /* For -fcompare-debug don't dump DECL_NAMELESS names at all, -g might have created more fancy names and their indexes could get out of sync. Usually those should be DECL_IGNORED_P too, SRA can create even non-DECL_IGNORED_P DECL_NAMELESS fancy names, let's hope those never get out of sync after doing the dump_fancy_name sanitization. */ else if ((flags & TDF_COMPARE_DEBUG) && DECL_NAMELESS (node) && DECL_IGNORED_P (node)) name = NULL_TREE; /* For DECL_NAMELESS names look for embedded uids in the names and sanitize them for TDF_NOUID. */ else if ((flags & TDF_NOUID) && DECL_NAMELESS (node)) dump_fancy_name (pp, name); else pp_tree_identifier (pp, name); } char uid_sep = (flags & TDF_GIMPLE) ? '_' : '.'; if ((flags & TDF_UID) || name == NULL_TREE) { if (TREE_CODE (node) == LABEL_DECL && LABEL_DECL_UID (node) != -1) pp_printf (pp, "L%c%d", uid_sep, (int) LABEL_DECL_UID (node)); else if (TREE_CODE (node) == DEBUG_EXPR_DECL) { if (flags & TDF_NOUID) pp_string (pp, "D#xxxx"); else pp_printf (pp, "D#%i", DEBUG_TEMP_UID (node)); } else { char c = TREE_CODE (node) == CONST_DECL ? 'C' : 'D'; if (flags & TDF_NOUID) pp_printf (pp, "%c.xxxx", c); else pp_printf (pp, "%c%c%u", c, uid_sep, DECL_UID (node)); } } if ((flags & TDF_ALIAS) && DECL_PT_UID (node) != DECL_UID (node)) { if (flags & TDF_NOUID) pp_printf (pp, "ptD.xxxx"); else pp_printf (pp, "ptD.%u", DECL_PT_UID (node)); } } /* Like the above, but used for pretty printing function calls. */ static void dump_function_name (pretty_printer *pp, tree node, dump_flags_t flags) { if (CONVERT_EXPR_P (node)) node = TREE_OPERAND (node, 0); if (DECL_NAME (node) && (flags & TDF_ASMNAME) == 0) pp_string (pp, lang_hooks.decl_printable_name (node, 1)); else dump_decl_name (pp, node, flags); } /* Dump a function declaration. NODE is the FUNCTION_TYPE. PP, SPC and FLAGS are as in dump_generic_node. */ static void dump_function_declaration (pretty_printer *pp, tree node, int spc, dump_flags_t flags) { bool wrote_arg = false; tree arg; pp_space (pp); pp_left_paren (pp); /* Print the argument types. */ arg = TYPE_ARG_TYPES (node); while (arg && arg != void_list_node && arg != error_mark_node) { if (wrote_arg) { pp_comma (pp); pp_space (pp); } wrote_arg = true; dump_generic_node (pp, TREE_VALUE (arg), spc, flags, false); arg = TREE_CHAIN (arg); } /* Drop the trailing void_type_node if we had any previous argument. */ if (arg == void_list_node && !wrote_arg) pp_string (pp, "void"); /* Properly dump vararg function types. */ else if (!arg && wrote_arg) pp_string (pp, ", ..."); /* Avoid printing any arg for unprototyped functions. */ pp_right_paren (pp); } /* Dump the domain associated with an array. */ static void dump_array_domain (pretty_printer *pp, tree domain, int spc, dump_flags_t flags) { pp_left_bracket (pp); if (domain) { tree min = TYPE_MIN_VALUE (domain); tree max = TYPE_MAX_VALUE (domain); if (min && max && integer_zerop (min) && tree_fits_shwi_p (max)) pp_wide_integer (pp, tree_to_shwi (max) + 1); else { if (min) dump_generic_node (pp, min, spc, flags, false); pp_colon (pp); if (max) dump_generic_node (pp, max, spc, flags, false); } } else pp_string (pp, "<unknown>"); pp_right_bracket (pp); } /* Dump OpenMP clause CLAUSE. PP, CLAUSE, SPC and FLAGS are as in dump_generic_node. */ static void dump_omp_clause (pretty_printer *pp, tree clause, int spc, dump_flags_t flags) { const char *name; switch (OMP_CLAUSE_CODE (clause)) { case OMP_CLAUSE_PRIVATE: name = "private"; goto print_remap; case OMP_CLAUSE_SHARED: name = "shared"; goto print_remap; case OMP_CLAUSE_FIRSTPRIVATE: name = "firstprivate"; goto print_remap; case OMP_CLAUSE_LASTPRIVATE: name = "lastprivate"; goto print_remap; case OMP_CLAUSE_COPYIN: name = "copyin"; goto print_remap; case OMP_CLAUSE_COPYPRIVATE: name = "copyprivate"; goto print_remap; case OMP_CLAUSE_UNIFORM: name = "uniform"; goto print_remap; case OMP_CLAUSE_USE_DEVICE_PTR: name = "use_device_ptr"; goto print_remap; case OMP_CLAUSE_IS_DEVICE_PTR: name = "is_device_ptr"; goto print_remap; case OMP_CLAUSE__LOOPTEMP_: name = "_looptemp_"; goto print_remap; case OMP_CLAUSE_TO_DECLARE: name = "to"; goto print_remap; case OMP_CLAUSE_LINK: name = "link"; goto print_remap; print_remap: pp_string (pp, name); pp_left_paren (pp); dump_generic_node (pp, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_REDUCTION: pp_string (pp, "reduction("); if (OMP_CLAUSE_REDUCTION_CODE (clause) != ERROR_MARK) { pp_string (pp, op_symbol_code (OMP_CLAUSE_REDUCTION_CODE (clause))); pp_colon (pp); } dump_generic_node (pp, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_IF: pp_string (pp, "if("); switch (OMP_CLAUSE_IF_MODIFIER (clause)) { case ERROR_MARK: break; case OMP_PARALLEL: pp_string (pp, "parallel:"); break; case OMP_TASK: pp_string (pp, "task:"); break; case OMP_TASKLOOP: pp_string (pp, "taskloop:"); break; case OMP_TARGET_DATA: pp_string (pp, "target data:"); break; case OMP_TARGET: pp_string (pp, "target:"); break; case OMP_TARGET_UPDATE: pp_string (pp, "target update:"); break; case OMP_TARGET_ENTER_DATA: pp_string (pp, "target enter data:"); break; case OMP_TARGET_EXIT_DATA: pp_string (pp, "target exit data:"); break; default: gcc_unreachable (); } dump_generic_node (pp, OMP_CLAUSE_IF_EXPR (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_NUM_THREADS: pp_string (pp, "num_threads("); dump_generic_node (pp, OMP_CLAUSE_NUM_THREADS_EXPR (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_NOWAIT: pp_string (pp, "nowait"); break; case OMP_CLAUSE_ORDERED: pp_string (pp, "ordered"); if (OMP_CLAUSE_ORDERED_EXPR (clause)) { pp_left_paren (pp); dump_generic_node (pp, OMP_CLAUSE_ORDERED_EXPR (clause), spc, flags, false); pp_right_paren (pp); } break; case OMP_CLAUSE_DEFAULT: pp_string (pp, "default("); switch (OMP_CLAUSE_DEFAULT_KIND (clause)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: pp_string (pp, "shared"); break; case OMP_CLAUSE_DEFAULT_NONE: pp_string (pp, "none"); break; case OMP_CLAUSE_DEFAULT_PRIVATE: pp_string (pp, "private"); break; case OMP_CLAUSE_DEFAULT_FIRSTPRIVATE: pp_string (pp, "firstprivate"); break; case OMP_CLAUSE_DEFAULT_PRESENT: pp_string (pp, "present"); break; default: gcc_unreachable (); } pp_right_paren (pp); break; case OMP_CLAUSE_SCHEDULE: pp_string (pp, "schedule("); if (OMP_CLAUSE_SCHEDULE_KIND (clause) & (OMP_CLAUSE_SCHEDULE_MONOTONIC | OMP_CLAUSE_SCHEDULE_NONMONOTONIC)) { if (OMP_CLAUSE_SCHEDULE_KIND (clause) & OMP_CLAUSE_SCHEDULE_MONOTONIC) pp_string (pp, "monotonic"); else pp_string (pp, "nonmonotonic"); if (OMP_CLAUSE_SCHEDULE_SIMD (clause)) pp_comma (pp); else pp_colon (pp); } if (OMP_CLAUSE_SCHEDULE_SIMD (clause)) pp_string (pp, "simd:"); switch (OMP_CLAUSE_SCHEDULE_KIND (clause) & OMP_CLAUSE_SCHEDULE_MASK) { case OMP_CLAUSE_SCHEDULE_STATIC: pp_string (pp, "static"); break; case OMP_CLAUSE_SCHEDULE_DYNAMIC: pp_string (pp, "dynamic"); break; case OMP_CLAUSE_SCHEDULE_GUIDED: pp_string (pp, "guided"); break; case OMP_CLAUSE_SCHEDULE_RUNTIME: pp_string (pp, "runtime"); break; case OMP_CLAUSE_SCHEDULE_AUTO: pp_string (pp, "auto"); break; default: gcc_unreachable (); } if (OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause)) { pp_comma (pp); dump_generic_node (pp, OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clause), spc, flags, false); } pp_right_paren (pp); break; case OMP_CLAUSE_UNTIED: pp_string (pp, "untied"); break; case OMP_CLAUSE_COLLAPSE: pp_string (pp, "collapse("); dump_generic_node (pp, OMP_CLAUSE_COLLAPSE_EXPR (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_FINAL: pp_string (pp, "final("); dump_generic_node (pp, OMP_CLAUSE_FINAL_EXPR (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_MERGEABLE: pp_string (pp, "mergeable"); break; case OMP_CLAUSE_LINEAR: pp_string (pp, "linear("); switch (OMP_CLAUSE_LINEAR_KIND (clause)) { case OMP_CLAUSE_LINEAR_DEFAULT: break; case OMP_CLAUSE_LINEAR_REF: pp_string (pp, "ref("); break; case OMP_CLAUSE_LINEAR_VAL: pp_string (pp, "val("); break; case OMP_CLAUSE_LINEAR_UVAL: pp_string (pp, "uval("); break; default: gcc_unreachable (); } dump_generic_node (pp, OMP_CLAUSE_DECL (clause), spc, flags, false); if (OMP_CLAUSE_LINEAR_KIND (clause) != OMP_CLAUSE_LINEAR_DEFAULT) pp_right_paren (pp); pp_colon (pp); dump_generic_node (pp, OMP_CLAUSE_LINEAR_STEP (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_ALIGNED: pp_string (pp, "aligned("); dump_generic_node (pp, OMP_CLAUSE_DECL (clause), spc, flags, false); if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause)) { pp_colon (pp); dump_generic_node (pp, OMP_CLAUSE_ALIGNED_ALIGNMENT (clause), spc, flags, false); } pp_right_paren (pp); break; case OMP_CLAUSE_DEPEND: pp_string (pp, "depend("); switch (OMP_CLAUSE_DEPEND_KIND (clause)) { case OMP_CLAUSE_DEPEND_IN: pp_string (pp, "in"); break; case OMP_CLAUSE_DEPEND_OUT: pp_string (pp, "out"); break; case OMP_CLAUSE_DEPEND_INOUT: pp_string (pp, "inout"); break; case OMP_CLAUSE_DEPEND_SOURCE: pp_string (pp, "source)"); return; case OMP_CLAUSE_DEPEND_SINK: pp_string (pp, "sink:"); for (tree t = OMP_CLAUSE_DECL (clause); t; t = TREE_CHAIN (t)) if (TREE_CODE (t) == TREE_LIST) { dump_generic_node (pp, TREE_VALUE (t), spc, flags, false); if (TREE_PURPOSE (t) != integer_zero_node) { if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (t)) pp_minus (pp); else pp_plus (pp); dump_generic_node (pp, TREE_PURPOSE (t), spc, flags, false); } if (TREE_CHAIN (t)) pp_comma (pp); } else gcc_unreachable (); pp_right_paren (pp); return; default: gcc_unreachable (); } pp_colon (pp); dump_generic_node (pp, OMP_CLAUSE_DECL (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_MAP: pp_string (pp, "map("); switch (OMP_CLAUSE_MAP_KIND (clause)) { case GOMP_MAP_ALLOC: case GOMP_MAP_POINTER: pp_string (pp, "alloc"); break; case GOMP_MAP_TO: case GOMP_MAP_TO_PSET: pp_string (pp, "to"); break; case GOMP_MAP_FROM: pp_string (pp, "from"); break; case GOMP_MAP_TOFROM: pp_string (pp, "tofrom"); break; case GOMP_MAP_FORCE_ALLOC: pp_string (pp, "force_alloc"); break; case GOMP_MAP_FORCE_TO: pp_string (pp, "force_to"); break; case GOMP_MAP_FORCE_FROM: pp_string (pp, "force_from"); break; case GOMP_MAP_FORCE_TOFROM: pp_string (pp, "force_tofrom"); break; case GOMP_MAP_FORCE_PRESENT: pp_string (pp, "force_present"); break; case GOMP_MAP_DELETE: pp_string (pp, "delete"); break; case GOMP_MAP_FORCE_DEVICEPTR: pp_string (pp, "force_deviceptr"); break; case GOMP_MAP_ALWAYS_TO: pp_string (pp, "always,to"); break; case GOMP_MAP_ALWAYS_FROM: pp_string (pp, "always,from"); break; case GOMP_MAP_ALWAYS_TOFROM: pp_string (pp, "always,tofrom"); break; case GOMP_MAP_RELEASE: pp_string (pp, "release"); break; case GOMP_MAP_FIRSTPRIVATE_POINTER: pp_string (pp, "firstprivate"); break; case GOMP_MAP_FIRSTPRIVATE_REFERENCE: pp_string (pp, "firstprivate ref"); break; case GOMP_MAP_STRUCT: pp_string (pp, "struct"); break; case GOMP_MAP_ALWAYS_POINTER: pp_string (pp, "always_pointer"); break; case GOMP_MAP_DEVICE_RESIDENT: pp_string (pp, "device_resident"); break; case GOMP_MAP_LINK: pp_string (pp, "link"); break; default: gcc_unreachable (); } pp_colon (pp); dump_generic_node (pp, OMP_CLAUSE_DECL (clause), spc, flags, false); print_clause_size: if (OMP_CLAUSE_SIZE (clause)) { switch (OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_MAP ? OMP_CLAUSE_MAP_KIND (clause) : GOMP_MAP_TO) { case GOMP_MAP_POINTER: case GOMP_MAP_FIRSTPRIVATE_POINTER: case GOMP_MAP_FIRSTPRIVATE_REFERENCE: case GOMP_MAP_ALWAYS_POINTER: pp_string (pp, " [pointer assign, bias: "); break; case GOMP_MAP_TO_PSET: pp_string (pp, " [pointer set, len: "); break; default: pp_string (pp, " [len: "); break; } dump_generic_node (pp, OMP_CLAUSE_SIZE (clause), spc, flags, false); pp_right_bracket (pp); } pp_right_paren (pp); break; case OMP_CLAUSE_FROM: pp_string (pp, "from("); dump_generic_node (pp, OMP_CLAUSE_DECL (clause), spc, flags, false); goto print_clause_size; case OMP_CLAUSE_TO: pp_string (pp, "to("); dump_generic_node (pp, OMP_CLAUSE_DECL (clause), spc, flags, false); goto print_clause_size; case OMP_CLAUSE__CACHE_: pp_string (pp, "("); dump_generic_node (pp, OMP_CLAUSE_DECL (clause), spc, flags, false); goto print_clause_size; case OMP_CLAUSE_NUM_TEAMS: pp_string (pp, "num_teams("); dump_generic_node (pp, OMP_CLAUSE_NUM_TEAMS_EXPR (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_THREAD_LIMIT: pp_string (pp, "thread_limit("); dump_generic_node (pp, OMP_CLAUSE_THREAD_LIMIT_EXPR (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_DEVICE: pp_string (pp, "device("); dump_generic_node (pp, OMP_CLAUSE_DEVICE_ID (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_DIST_SCHEDULE: pp_string (pp, "dist_schedule(static"); if (OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (clause)) { pp_comma (pp); dump_generic_node (pp, OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (clause), spc, flags, false); } pp_right_paren (pp); break; case OMP_CLAUSE_PROC_BIND: pp_string (pp, "proc_bind("); switch (OMP_CLAUSE_PROC_BIND_KIND (clause)) { case OMP_CLAUSE_PROC_BIND_MASTER: pp_string (pp, "master"); break; case OMP_CLAUSE_PROC_BIND_CLOSE: pp_string (pp, "close"); break; case OMP_CLAUSE_PROC_BIND_SPREAD: pp_string (pp, "spread"); break; default: gcc_unreachable (); } pp_right_paren (pp); break; case OMP_CLAUSE_SAFELEN: pp_string (pp, "safelen("); dump_generic_node (pp, OMP_CLAUSE_SAFELEN_EXPR (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_SIMDLEN: pp_string (pp, "simdlen("); dump_generic_node (pp, OMP_CLAUSE_SIMDLEN_EXPR (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_PRIORITY: pp_string (pp, "priority("); dump_generic_node (pp, OMP_CLAUSE_PRIORITY_EXPR (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_GRAINSIZE: pp_string (pp, "grainsize("); dump_generic_node (pp, OMP_CLAUSE_GRAINSIZE_EXPR (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_NUM_TASKS: pp_string (pp, "num_tasks("); dump_generic_node (pp, OMP_CLAUSE_NUM_TASKS_EXPR (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_HINT: pp_string (pp, "hint("); dump_generic_node (pp, OMP_CLAUSE_HINT_EXPR (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE_DEFAULTMAP: pp_string (pp, "defaultmap(tofrom:scalar)"); break; case OMP_CLAUSE__SIMDUID_: pp_string (pp, "_simduid_("); dump_generic_node (pp, OMP_CLAUSE__SIMDUID__DECL (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE__SIMT_: pp_string (pp, "_simt_"); break; case OMP_CLAUSE_GANG: pp_string (pp, "gang"); if (OMP_CLAUSE_GANG_EXPR (clause) != NULL_TREE) { pp_string (pp, "(num: "); dump_generic_node (pp, OMP_CLAUSE_GANG_EXPR (clause), spc, flags, false); } if (OMP_CLAUSE_GANG_STATIC_EXPR (clause) != NULL_TREE) { if (OMP_CLAUSE_GANG_EXPR (clause) == NULL_TREE) pp_left_paren (pp); else pp_space (pp); pp_string (pp, "static:"); if (OMP_CLAUSE_GANG_STATIC_EXPR (clause) == integer_minus_one_node) pp_character (pp, '*'); else dump_generic_node (pp, OMP_CLAUSE_GANG_STATIC_EXPR (clause), spc, flags, false); } if (OMP_CLAUSE_GANG_EXPR (clause) != NULL_TREE || OMP_CLAUSE_GANG_STATIC_EXPR (clause) != NULL_TREE) pp_right_paren (pp); break; case OMP_CLAUSE_ASYNC: pp_string (pp, "async"); if (OMP_CLAUSE_ASYNC_EXPR (clause)) { pp_character(pp, '('); dump_generic_node (pp, OMP_CLAUSE_ASYNC_EXPR (clause), spc, flags, false); pp_character(pp, ')'); } break; case OMP_CLAUSE_AUTO: case OMP_CLAUSE_SEQ: pp_string (pp, omp_clause_code_name[OMP_CLAUSE_CODE (clause)]); break; case OMP_CLAUSE_WAIT: pp_string (pp, "wait("); dump_generic_node (pp, OMP_CLAUSE_WAIT_EXPR (clause), spc, flags, false); pp_character(pp, ')'); break; case OMP_CLAUSE_WORKER: pp_string (pp, "worker"); if (OMP_CLAUSE_WORKER_EXPR (clause) != NULL_TREE) { pp_left_paren (pp); dump_generic_node (pp, OMP_CLAUSE_WORKER_EXPR (clause), spc, flags, false); pp_right_paren (pp); } break; case OMP_CLAUSE_VECTOR: pp_string (pp, "vector"); if (OMP_CLAUSE_VECTOR_EXPR (clause) != NULL_TREE) { pp_left_paren (pp); dump_generic_node (pp, OMP_CLAUSE_VECTOR_EXPR (clause), spc, flags, false); pp_right_paren (pp); } break; case OMP_CLAUSE_NUM_GANGS: pp_string (pp, "num_gangs("); dump_generic_node (pp, OMP_CLAUSE_NUM_GANGS_EXPR (clause), spc, flags, false); pp_character (pp, ')'); break; case OMP_CLAUSE_NUM_WORKERS: pp_string (pp, "num_workers("); dump_generic_node (pp, OMP_CLAUSE_NUM_WORKERS_EXPR (clause), spc, flags, false); pp_character (pp, ')'); break; case OMP_CLAUSE_VECTOR_LENGTH: pp_string (pp, "vector_length("); dump_generic_node (pp, OMP_CLAUSE_VECTOR_LENGTH_EXPR (clause), spc, flags, false); pp_character (pp, ')'); break; case OMP_CLAUSE_INBRANCH: pp_string (pp, "inbranch"); break; case OMP_CLAUSE_NOTINBRANCH: pp_string (pp, "notinbranch"); break; case OMP_CLAUSE_FOR: pp_string (pp, "for"); break; case OMP_CLAUSE_PARALLEL: pp_string (pp, "parallel"); break; case OMP_CLAUSE_SECTIONS: pp_string (pp, "sections"); break; case OMP_CLAUSE_TASKGROUP: pp_string (pp, "taskgroup"); break; case OMP_CLAUSE_NOGROUP: pp_string (pp, "nogroup"); break; case OMP_CLAUSE_THREADS: pp_string (pp, "threads"); break; case OMP_CLAUSE_SIMD: pp_string (pp, "simd"); break; case OMP_CLAUSE_INDEPENDENT: pp_string (pp, "independent"); break; case OMP_CLAUSE_TILE: pp_string (pp, "tile("); dump_generic_node (pp, OMP_CLAUSE_TILE_LIST (clause), spc, flags, false); pp_right_paren (pp); break; case OMP_CLAUSE__GRIDDIM_: pp_string (pp, "_griddim_("); pp_unsigned_wide_integer (pp, OMP_CLAUSE__GRIDDIM__DIMENSION (clause)); pp_colon (pp); dump_generic_node (pp, OMP_CLAUSE__GRIDDIM__SIZE (clause), spc, flags, false); pp_comma (pp); dump_generic_node (pp, OMP_CLAUSE__GRIDDIM__GROUP (clause), spc, flags, false); pp_right_paren (pp); break; default: /* Should never happen. */ dump_generic_node (pp, clause, spc, flags, false); break; } } /* Dump the list of OpenMP clauses. PP, SPC and FLAGS are as in dump_generic_node. */ void dump_omp_clauses (pretty_printer *pp, tree clause, int spc, dump_flags_t flags) { if (clause == NULL) return; pp_space (pp); while (1) { dump_omp_clause (pp, clause, spc, flags); clause = OMP_CLAUSE_CHAIN (clause); if (clause == NULL) return; pp_space (pp); } } /* Dump location LOC to PP. */ void dump_location (pretty_printer *pp, location_t loc) { expanded_location xloc = expand_location (loc); pp_left_bracket (pp); if (xloc.file) { pp_string (pp, xloc.file); pp_string (pp, ":"); } pp_decimal_int (pp, xloc.line); pp_colon (pp); pp_decimal_int (pp, xloc.column); pp_string (pp, "] "); } /* Dump lexical block BLOCK. PP, SPC and FLAGS are as in dump_generic_node. */ static void dump_block_node (pretty_printer *pp, tree block, int spc, dump_flags_t flags) { tree t; pp_printf (pp, "BLOCK #%d ", BLOCK_NUMBER (block)); if (flags & TDF_ADDRESS) pp_printf (pp, "[%p] ", (void *) block); if (BLOCK_ABSTRACT (block)) pp_string (pp, "[abstract] "); if (TREE_ASM_WRITTEN (block)) pp_string (pp, "[written] "); if (flags & TDF_SLIM) return; if (BLOCK_SOURCE_LOCATION (block)) dump_location (pp, BLOCK_SOURCE_LOCATION (block)); newline_and_indent (pp, spc + 2); if (BLOCK_SUPERCONTEXT (block)) { pp_string (pp, "SUPERCONTEXT: "); dump_generic_node (pp, BLOCK_SUPERCONTEXT (block), 0, flags | TDF_SLIM, false); newline_and_indent (pp, spc + 2); } if (BLOCK_SUBBLOCKS (block)) { pp_string (pp, "SUBBLOCKS: "); for (t = BLOCK_SUBBLOCKS (block); t; t = BLOCK_CHAIN (t)) { dump_generic_node (pp, t, 0, flags | TDF_SLIM, false); pp_space (pp); } newline_and_indent (pp, spc + 2); } if (BLOCK_CHAIN (block)) { pp_string (pp, "SIBLINGS: "); for (t = BLOCK_CHAIN (block); t; t = BLOCK_CHAIN (t)) { dump_generic_node (pp, t, 0, flags | TDF_SLIM, false); pp_space (pp); } newline_and_indent (pp, spc + 2); } if (BLOCK_VARS (block)) { pp_string (pp, "VARS: "); for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t)) { dump_generic_node (pp, t, 0, flags, false); pp_space (pp); } newline_and_indent (pp, spc + 2); } if (vec_safe_length (BLOCK_NONLOCALIZED_VARS (block)) > 0) { unsigned i; vec<tree, va_gc> *nlv = BLOCK_NONLOCALIZED_VARS (block); pp_string (pp, "NONLOCALIZED_VARS: "); FOR_EACH_VEC_ELT (*nlv, i, t) { dump_generic_node (pp, t, 0, flags, false); pp_space (pp); } newline_and_indent (pp, spc + 2); } if (BLOCK_ABSTRACT_ORIGIN (block)) { pp_string (pp, "ABSTRACT_ORIGIN: "); dump_generic_node (pp, BLOCK_ABSTRACT_ORIGIN (block), 0, flags | TDF_SLIM, false); newline_and_indent (pp, spc + 2); } if (BLOCK_FRAGMENT_ORIGIN (block)) { pp_string (pp, "FRAGMENT_ORIGIN: "); dump_generic_node (pp, BLOCK_FRAGMENT_ORIGIN (block), 0, flags | TDF_SLIM, false); newline_and_indent (pp, spc + 2); } if (BLOCK_FRAGMENT_CHAIN (block)) { pp_string (pp, "FRAGMENT_CHAIN: "); for (t = BLOCK_FRAGMENT_CHAIN (block); t; t = BLOCK_FRAGMENT_CHAIN (t)) { dump_generic_node (pp, t, 0, flags | TDF_SLIM, false); pp_space (pp); } newline_and_indent (pp, spc + 2); } } /* Dump the node NODE on the pretty_printer PP, SPC spaces of indent. FLAGS specifies details to show in the dump (see TDF_* in dumpfile.h). If IS_STMT is true, the object printed is considered to be a statement and it is terminated by ';' if appropriate. */ int dump_generic_node (pretty_printer *pp, tree node, int spc, dump_flags_t flags, bool is_stmt) { tree type; tree op0, op1; const char *str; bool is_expr; enum tree_code code; if (node == NULL_TREE) return spc; is_expr = EXPR_P (node); if (is_stmt && (flags & TDF_STMTADDR)) pp_printf (pp, "<&%p> ", (void *)node); if ((flags & TDF_LINENO) && EXPR_HAS_LOCATION (node)) dump_location (pp, EXPR_LOCATION (node)); code = TREE_CODE (node); switch (code) { case ERROR_MARK: pp_string (pp, "<<< error >>>"); break; case IDENTIFIER_NODE: pp_tree_identifier (pp, node); break; case TREE_LIST: while (node && node != error_mark_node) { if (TREE_PURPOSE (node)) { dump_generic_node (pp, TREE_PURPOSE (node), spc, flags, false); pp_space (pp); } dump_generic_node (pp, TREE_VALUE (node), spc, flags, false); node = TREE_CHAIN (node); if (node && TREE_CODE (node) == TREE_LIST) { pp_comma (pp); pp_space (pp); } } break; case TREE_BINFO: dump_generic_node (pp, BINFO_TYPE (node), spc, flags, false); break; case TREE_VEC: { size_t i; if (TREE_VEC_LENGTH (node) > 0) { size_t len = TREE_VEC_LENGTH (node); for (i = 0; i < len - 1; i++) { dump_generic_node (pp, TREE_VEC_ELT (node, i), spc, flags, false); pp_comma (pp); pp_space (pp); } dump_generic_node (pp, TREE_VEC_ELT (node, len - 1), spc, flags, false); } } break; case VOID_TYPE: case POINTER_BOUNDS_TYPE: case INTEGER_TYPE: case REAL_TYPE: case FIXED_POINT_TYPE: case COMPLEX_TYPE: case VECTOR_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: { unsigned int quals = TYPE_QUALS (node); enum tree_code_class tclass; if (quals & TYPE_QUAL_ATOMIC) pp_string (pp, "atomic "); if (quals & TYPE_QUAL_CONST) pp_string (pp, "const "); else if (quals & TYPE_QUAL_VOLATILE) pp_string (pp, "volatile "); else if (quals & TYPE_QUAL_RESTRICT) pp_string (pp, "restrict "); if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node))) { pp_string (pp, "<address-space-"); pp_decimal_int (pp, TYPE_ADDR_SPACE (node)); pp_string (pp, "> "); } tclass = TREE_CODE_CLASS (TREE_CODE (node)); if (tclass == tcc_declaration) { if (DECL_NAME (node)) dump_decl_name (pp, node, flags); else pp_string (pp, "<unnamed type decl>"); } else if (tclass == tcc_type) { if (TYPE_NAME (node)) { if (TREE_CODE (TYPE_NAME (node)) == IDENTIFIER_NODE) pp_tree_identifier (pp, TYPE_NAME (node)); else if (TREE_CODE (TYPE_NAME (node)) == TYPE_DECL && DECL_NAME (TYPE_NAME (node))) dump_decl_name (pp, TYPE_NAME (node), flags); else pp_string (pp, "<unnamed type>"); } else if (TREE_CODE (node) == VECTOR_TYPE) { pp_string (pp, "vector"); pp_left_paren (pp); pp_wide_integer (pp, TYPE_VECTOR_SUBPARTS (node)); pp_string (pp, ") "); dump_generic_node (pp, TREE_TYPE (node), spc, flags, false); } else if (TREE_CODE (node) == INTEGER_TYPE) { if (TYPE_PRECISION (node) == CHAR_TYPE_SIZE) pp_string (pp, (TYPE_UNSIGNED (node) ? "unsigned char" : "signed char")); else if (TYPE_PRECISION (node) == SHORT_TYPE_SIZE) pp_string (pp, (TYPE_UNSIGNED (node) ? "unsigned short" : "signed short")); else if (TYPE_PRECISION (node) == INT_TYPE_SIZE) pp_string (pp, (TYPE_UNSIGNED (node) ? "unsigned int" : "signed int")); else if (TYPE_PRECISION (node) == LONG_TYPE_SIZE) pp_string (pp, (TYPE_UNSIGNED (node) ? "unsigned long" : "signed long")); else if (TYPE_PRECISION (node) == LONG_LONG_TYPE_SIZE) pp_string (pp, (TYPE_UNSIGNED (node) ? "unsigned long long" : "signed long long")); else if (TYPE_PRECISION (node) >= CHAR_TYPE_SIZE && pow2p_hwi (TYPE_PRECISION (node))) { pp_string (pp, (TYPE_UNSIGNED (node) ? "uint" : "int")); pp_decimal_int (pp, TYPE_PRECISION (node)); pp_string (pp, "_t"); } else { pp_string (pp, (TYPE_UNSIGNED (node) ? "<unnamed-unsigned:" : "<unnamed-signed:")); pp_decimal_int (pp, TYPE_PRECISION (node)); pp_greater (pp); } } else if (TREE_CODE (node) == COMPLEX_TYPE) { pp_string (pp, "__complex__ "); dump_generic_node (pp, TREE_TYPE (node), spc, flags, false); } else if (TREE_CODE (node) == REAL_TYPE) { pp_string (pp, "<float:"); pp_decimal_int (pp, TYPE_PRECISION (node)); pp_greater (pp); } else if (TREE_CODE (node) == FIXED_POINT_TYPE) { pp_string (pp, "<fixed-point-"); pp_string (pp, TYPE_SATURATING (node) ? "sat:" : "nonsat:"); pp_decimal_int (pp, TYPE_PRECISION (node)); pp_greater (pp); } else if (TREE_CODE (node) == VOID_TYPE) pp_string (pp, "void"); else pp_string (pp, "<unnamed type>"); } break; } case POINTER_TYPE: case REFERENCE_TYPE: str = (TREE_CODE (node) == POINTER_TYPE ? "*" : "&"); if (TREE_TYPE (node) == NULL) { pp_string (pp, str); pp_string (pp, "<null type>"); } else if (TREE_CODE (TREE_TYPE (node)) == FUNCTION_TYPE) { tree fnode = TREE_TYPE (node); dump_generic_node (pp, TREE_TYPE (fnode), spc, flags, false); pp_space (pp); pp_left_paren (pp); pp_string (pp, str); if (TYPE_IDENTIFIER (node)) dump_generic_node (pp, TYPE_NAME (node), spc, flags, false); else if (flags & TDF_NOUID) pp_printf (pp, "<Txxxx>"); else pp_printf (pp, "<T%x>", TYPE_UID (node)); pp_right_paren (pp); dump_function_declaration (pp, fnode, spc, flags); } else { unsigned int quals = TYPE_QUALS (node); dump_generic_node (pp, TREE_TYPE (node), spc, flags, false); pp_space (pp); pp_string (pp, str); if (quals & TYPE_QUAL_CONST) pp_string (pp, " const"); if (quals & TYPE_QUAL_VOLATILE) pp_string (pp, " volatile"); if (quals & TYPE_QUAL_RESTRICT) pp_string (pp, " restrict"); if (!ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (node))) { pp_string (pp, " <address-space-"); pp_decimal_int (pp, TYPE_ADDR_SPACE (node)); pp_greater (pp); } if (TYPE_REF_CAN_ALIAS_ALL (node)) pp_string (pp, " {ref-all}"); } break; case OFFSET_TYPE: NIY; break; case MEM_REF: { if (flags & TDF_GIMPLE) { pp_string (pp, "__MEM <"); dump_generic_node (pp, TREE_TYPE (node), spc, flags | TDF_SLIM, false); if (TYPE_ALIGN (TREE_TYPE (node)) != TYPE_ALIGN (TYPE_MAIN_VARIANT (TREE_TYPE (node)))) { pp_string (pp, ", "); pp_decimal_int (pp, TYPE_ALIGN (TREE_TYPE (node))); } pp_greater (pp); pp_string (pp, " ("); if (TREE_TYPE (TREE_OPERAND (node, 0)) != TREE_TYPE (TREE_OPERAND (node, 1))) { pp_left_paren (pp); dump_generic_node (pp, TREE_TYPE (TREE_OPERAND (node, 1)), spc, flags | TDF_SLIM, false); pp_right_paren (pp); } dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags | TDF_SLIM, false); if (! integer_zerop (TREE_OPERAND (node, 1))) { pp_string (pp, " + "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags | TDF_SLIM, false); } pp_right_paren (pp); } else if (integer_zerop (TREE_OPERAND (node, 1)) /* Dump the types of INTEGER_CSTs explicitly, for we can't infer them and MEM_ATTR caching will share MEM_REFs with differently-typed op0s. */ && TREE_CODE (TREE_OPERAND (node, 0)) != INTEGER_CST /* Released SSA_NAMES have no TREE_TYPE. */ && TREE_TYPE (TREE_OPERAND (node, 0)) != NULL_TREE /* Same pointer types, but ignoring POINTER_TYPE vs. REFERENCE_TYPE. */ && (TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 0))) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 1)))) && (TYPE_MODE (TREE_TYPE (TREE_OPERAND (node, 0))) == TYPE_MODE (TREE_TYPE (TREE_OPERAND (node, 1)))) && (TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (node, 0))) == TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (node, 1)))) /* Same value types ignoring qualifiers. */ && (TYPE_MAIN_VARIANT (TREE_TYPE (node)) == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (TREE_OPERAND (node, 1))))) && (!(flags & TDF_ALIAS) || MR_DEPENDENCE_CLIQUE (node) == 0)) { if (TREE_CODE (TREE_OPERAND (node, 0)) != ADDR_EXPR) { pp_star (pp); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); } else dump_generic_node (pp, TREE_OPERAND (TREE_OPERAND (node, 0), 0), spc, flags, false); } else { tree ptype; pp_string (pp, "MEM["); pp_left_paren (pp); ptype = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_OPERAND (node, 1))); dump_generic_node (pp, ptype, spc, flags | TDF_SLIM, false); pp_right_paren (pp); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); if (!integer_zerop (TREE_OPERAND (node, 1))) { pp_string (pp, " + "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); } if ((flags & TDF_ALIAS) && MR_DEPENDENCE_CLIQUE (node) != 0) { pp_string (pp, " clique "); pp_unsigned_wide_integer (pp, MR_DEPENDENCE_CLIQUE (node)); pp_string (pp, " base "); pp_unsigned_wide_integer (pp, MR_DEPENDENCE_BASE (node)); } pp_right_bracket (pp); } break; } case TARGET_MEM_REF: { const char *sep = ""; tree tmp; pp_string (pp, "MEM["); if (TREE_CODE (TMR_BASE (node)) == ADDR_EXPR) { pp_string (pp, sep); sep = ", "; pp_string (pp, "symbol: "); dump_generic_node (pp, TREE_OPERAND (TMR_BASE (node), 0), spc, flags, false); } else { pp_string (pp, sep); sep = ", "; pp_string (pp, "base: "); dump_generic_node (pp, TMR_BASE (node), spc, flags, false); } tmp = TMR_INDEX2 (node); if (tmp) { pp_string (pp, sep); sep = ", "; pp_string (pp, "base: "); dump_generic_node (pp, tmp, spc, flags, false); } tmp = TMR_INDEX (node); if (tmp) { pp_string (pp, sep); sep = ", "; pp_string (pp, "index: "); dump_generic_node (pp, tmp, spc, flags, false); } tmp = TMR_STEP (node); if (tmp) { pp_string (pp, sep); sep = ", "; pp_string (pp, "step: "); dump_generic_node (pp, tmp, spc, flags, false); } tmp = TMR_OFFSET (node); if (tmp) { pp_string (pp, sep); sep = ", "; pp_string (pp, "offset: "); dump_generic_node (pp, tmp, spc, flags, false); } pp_right_bracket (pp); } break; case ARRAY_TYPE: { tree tmp; /* Print the innermost component type. */ for (tmp = TREE_TYPE (node); TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) ; dump_generic_node (pp, tmp, spc, flags, false); /* Print the dimensions. */ for (tmp = node; TREE_CODE (tmp) == ARRAY_TYPE; tmp = TREE_TYPE (tmp)) dump_array_domain (pp, TYPE_DOMAIN (tmp), spc, flags); break; } case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: { unsigned int quals = TYPE_QUALS (node); if (quals & TYPE_QUAL_ATOMIC) pp_string (pp, "atomic "); if (quals & TYPE_QUAL_CONST) pp_string (pp, "const "); if (quals & TYPE_QUAL_VOLATILE) pp_string (pp, "volatile "); /* Print the name of the structure. */ if (TREE_CODE (node) == RECORD_TYPE) pp_string (pp, "struct "); else if (TREE_CODE (node) == UNION_TYPE) pp_string (pp, "union "); if (TYPE_NAME (node)) dump_generic_node (pp, TYPE_NAME (node), spc, flags, false); else if (!(flags & TDF_SLIM)) /* FIXME: If we eliminate the 'else' above and attempt to show the fields for named types, we may get stuck following a cycle of pointers to structs. The alleged self-reference check in print_struct_decl will not detect cycles involving more than one pointer or struct type. */ print_struct_decl (pp, node, spc, flags); break; } case LANG_TYPE: NIY; break; case INTEGER_CST: if (flags & TDF_GIMPLE && (POINTER_TYPE_P (TREE_TYPE (node)) || (TYPE_PRECISION (TREE_TYPE (node)) < TYPE_PRECISION (integer_type_node)) || exact_log2 (TYPE_PRECISION (TREE_TYPE (node))) == -1)) { pp_string (pp, "_Literal ("); dump_generic_node (pp, TREE_TYPE (node), spc, flags, false); pp_string (pp, ") "); } if (TREE_CODE (TREE_TYPE (node)) == POINTER_TYPE && ! (flags & TDF_GIMPLE)) { /* In the case of a pointer, one may want to divide by the size of the pointed-to type. Unfortunately, this not straightforward. The C front-end maps expressions (int *) 5 int *p; (p + 5) in such a way that the two INTEGER_CST nodes for "5" have different values but identical types. In the latter case, the 5 is multiplied by sizeof (int) in c-common.c (pointer_int_sum) to convert it to a byte address, and yet the type of the node is left unchanged. Argh. What is consistent though is that the number value corresponds to bytes (UNITS) offset. NB: Neither of the following divisors can be trivially used to recover the original literal: TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (node))) TYPE_PRECISION (TREE_TYPE (TREE_TYPE (node))) */ pp_wide_integer (pp, TREE_INT_CST_LOW (node)); pp_string (pp, "B"); /* pseudo-unit */ } else if (tree_fits_shwi_p (node)) pp_wide_integer (pp, tree_to_shwi (node)); else if (tree_fits_uhwi_p (node)) pp_unsigned_wide_integer (pp, tree_to_uhwi (node)); else { wide_int val = wi::to_wide (node); if (wi::neg_p (val, TYPE_SIGN (TREE_TYPE (node)))) { pp_minus (pp); val = -val; } print_hex (val, pp_buffer (pp)->digit_buffer); pp_string (pp, pp_buffer (pp)->digit_buffer); } if ((flags & TDF_GIMPLE) && ! (POINTER_TYPE_P (TREE_TYPE (node)) || (TYPE_PRECISION (TREE_TYPE (node)) < TYPE_PRECISION (integer_type_node)) || exact_log2 (TYPE_PRECISION (TREE_TYPE (node))) == -1)) { if (TYPE_UNSIGNED (TREE_TYPE (node))) pp_character (pp, 'u'); if (TYPE_PRECISION (TREE_TYPE (node)) == TYPE_PRECISION (unsigned_type_node)) ; else if (TYPE_PRECISION (TREE_TYPE (node)) == TYPE_PRECISION (long_unsigned_type_node)) pp_character (pp, 'l'); else if (TYPE_PRECISION (TREE_TYPE (node)) == TYPE_PRECISION (long_long_unsigned_type_node)) pp_string (pp, "ll"); } if (TREE_OVERFLOW (node)) pp_string (pp, "(OVF)"); break; case POLY_INT_CST: pp_string (pp, "POLY_INT_CST ["); dump_generic_node (pp, POLY_INT_CST_COEFF (node, 0), spc, flags, false); for (unsigned int i = 1; i < NUM_POLY_INT_COEFFS; ++i) { pp_string (pp, ", "); dump_generic_node (pp, POLY_INT_CST_COEFF (node, i), spc, flags, false); } pp_string (pp, "]"); break; case REAL_CST: /* Code copied from print_node. */ { REAL_VALUE_TYPE d; if (TREE_OVERFLOW (node)) pp_string (pp, " overflow"); d = TREE_REAL_CST (node); if (REAL_VALUE_ISINF (d)) pp_string (pp, REAL_VALUE_NEGATIVE (d) ? " -Inf" : " Inf"); else if (REAL_VALUE_ISNAN (d)) pp_string (pp, " Nan"); else { char string[100]; real_to_decimal (string, &d, sizeof (string), 0, 1); pp_string (pp, string); } break; } case FIXED_CST: { char string[100]; fixed_to_decimal (string, TREE_FIXED_CST_PTR (node), sizeof (string)); pp_string (pp, string); break; } case COMPLEX_CST: pp_string (pp, "__complex__ ("); dump_generic_node (pp, TREE_REALPART (node), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_IMAGPART (node), spc, flags, false); pp_right_paren (pp); break; case STRING_CST: pp_string (pp, "\""); pretty_print_string (pp, TREE_STRING_POINTER (node)); pp_string (pp, "\""); break; case VECTOR_CST: { unsigned i; pp_string (pp, "{ "); unsigned HOST_WIDE_INT nunits; if (!VECTOR_CST_NELTS (node).is_constant (&nunits)) nunits = vector_cst_encoded_nelts (node); for (i = 0; i < nunits; ++i) { if (i != 0) pp_string (pp, ", "); dump_generic_node (pp, VECTOR_CST_ELT (node, i), spc, flags, false); } if (!VECTOR_CST_NELTS (node).is_constant ()) pp_string (pp, ", ..."); pp_string (pp, " }"); } break; case FUNCTION_TYPE: case METHOD_TYPE: dump_generic_node (pp, TREE_TYPE (node), spc, flags, false); pp_space (pp); if (TREE_CODE (node) == METHOD_TYPE) { if (TYPE_METHOD_BASETYPE (node)) dump_generic_node (pp, TYPE_NAME (TYPE_METHOD_BASETYPE (node)), spc, flags, false); else pp_string (pp, "<null method basetype>"); pp_colon_colon (pp); } if (TYPE_IDENTIFIER (node)) dump_generic_node (pp, TYPE_NAME (node), spc, flags, false); else if (TYPE_NAME (node) && DECL_NAME (TYPE_NAME (node))) dump_decl_name (pp, TYPE_NAME (node), flags); else if (flags & TDF_NOUID) pp_printf (pp, "<Txxxx>"); else pp_printf (pp, "<T%x>", TYPE_UID (node)); dump_function_declaration (pp, node, spc, flags); break; case FUNCTION_DECL: case CONST_DECL: dump_decl_name (pp, node, flags); break; case LABEL_DECL: if (DECL_NAME (node)) dump_decl_name (pp, node, flags); else if (LABEL_DECL_UID (node) != -1) { if (flags & TDF_GIMPLE) pp_printf (pp, "L%d", (int) LABEL_DECL_UID (node)); else pp_printf (pp, "<L%d>", (int) LABEL_DECL_UID (node)); } else { if (flags & TDF_NOUID) pp_string (pp, "<D.xxxx>"); else { if (flags & TDF_GIMPLE) pp_printf (pp, "<D%u>", DECL_UID (node)); else pp_printf (pp, "<D.%u>", DECL_UID (node)); } } break; case TYPE_DECL: if (DECL_IS_BUILTIN (node)) { /* Don't print the declaration of built-in types. */ break; } if (DECL_NAME (node)) dump_decl_name (pp, node, flags); else if (TYPE_NAME (TREE_TYPE (node)) != node) { pp_string (pp, (TREE_CODE (TREE_TYPE (node)) == UNION_TYPE ? "union" : "struct ")); dump_generic_node (pp, TREE_TYPE (node), spc, flags, false); } else pp_string (pp, "<anon>"); break; case VAR_DECL: case PARM_DECL: case FIELD_DECL: case DEBUG_EXPR_DECL: case NAMESPACE_DECL: case NAMELIST_DECL: dump_decl_name (pp, node, flags); break; case RESULT_DECL: pp_string (pp, "<retval>"); break; case COMPONENT_REF: op0 = TREE_OPERAND (node, 0); str = "."; if (op0 && (TREE_CODE (op0) == INDIRECT_REF || (TREE_CODE (op0) == MEM_REF && TREE_CODE (TREE_OPERAND (op0, 0)) != ADDR_EXPR && integer_zerop (TREE_OPERAND (op0, 1)) /* Dump the types of INTEGER_CSTs explicitly, for we can't infer them and MEM_ATTR caching will share MEM_REFs with differently-typed op0s. */ && TREE_CODE (TREE_OPERAND (op0, 0)) != INTEGER_CST /* Released SSA_NAMES have no TREE_TYPE. */ && TREE_TYPE (TREE_OPERAND (op0, 0)) != NULL_TREE /* Same pointer types, but ignoring POINTER_TYPE vs. REFERENCE_TYPE. */ && (TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 0))) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 1)))) && (TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 0))) == TYPE_MODE (TREE_TYPE (TREE_OPERAND (op0, 1)))) && (TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (op0, 0))) == TYPE_REF_CAN_ALIAS_ALL (TREE_TYPE (TREE_OPERAND (op0, 1)))) /* Same value types ignoring qualifiers. */ && (TYPE_MAIN_VARIANT (TREE_TYPE (op0)) == TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (TREE_OPERAND (op0, 1))))) && MR_DEPENDENCE_CLIQUE (op0) == 0))) { op0 = TREE_OPERAND (op0, 0); str = "->"; } if (op_prio (op0) < op_prio (node)) pp_left_paren (pp); dump_generic_node (pp, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_right_paren (pp); pp_string (pp, str); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); op0 = component_ref_field_offset (node); if (op0 && TREE_CODE (op0) != INTEGER_CST) { pp_string (pp, "{off: "); dump_generic_node (pp, op0, spc, flags, false); pp_right_brace (pp); } break; case BIT_FIELD_REF: pp_string (pp, "BIT_FIELD_REF <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false); pp_greater (pp); break; case BIT_INSERT_EXPR: pp_string (pp, "BIT_INSERT_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false); pp_string (pp, " ("); if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (node, 1)))) pp_decimal_int (pp, TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (node, 1)))); else dump_generic_node (pp, TYPE_SIZE (TREE_TYPE (TREE_OPERAND (node, 1))), spc, flags, false); pp_string (pp, " bits)>"); break; case ARRAY_REF: case ARRAY_RANGE_REF: op0 = TREE_OPERAND (node, 0); if (op_prio (op0) < op_prio (node)) pp_left_paren (pp); dump_generic_node (pp, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_right_paren (pp); pp_left_bracket (pp); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); if (TREE_CODE (node) == ARRAY_RANGE_REF) pp_string (pp, " ..."); pp_right_bracket (pp); op0 = array_ref_low_bound (node); op1 = array_ref_element_size (node); if (!integer_zerop (op0) || TREE_OPERAND (node, 2) || TREE_OPERAND (node, 3)) { pp_string (pp, "{lb: "); dump_generic_node (pp, op0, spc, flags, false); pp_string (pp, " sz: "); dump_generic_node (pp, op1, spc, flags, false); pp_right_brace (pp); } break; case CONSTRUCTOR: { unsigned HOST_WIDE_INT ix; tree field, val; bool is_struct_init = false; bool is_array_init = false; widest_int curidx; pp_left_brace (pp); if (TREE_CLOBBER_P (node)) pp_string (pp, "CLOBBER"); else if (TREE_CODE (TREE_TYPE (node)) == RECORD_TYPE || TREE_CODE (TREE_TYPE (node)) == UNION_TYPE) is_struct_init = true; else if (TREE_CODE (TREE_TYPE (node)) == ARRAY_TYPE && TYPE_DOMAIN (TREE_TYPE (node)) && TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node))) && TREE_CODE (TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node)))) == INTEGER_CST) { tree minv = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (node))); is_array_init = true; curidx = wi::to_widest (minv); } FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (node), ix, field, val) { if (field) { if (is_struct_init) { pp_dot (pp); dump_generic_node (pp, field, spc, flags, false); pp_equal (pp); } else if (is_array_init && (TREE_CODE (field) != INTEGER_CST || curidx != wi::to_widest (field))) { pp_left_bracket (pp); if (TREE_CODE (field) == RANGE_EXPR) { dump_generic_node (pp, TREE_OPERAND (field, 0), spc, flags, false); pp_string (pp, " ... "); dump_generic_node (pp, TREE_OPERAND (field, 1), spc, flags, false); if (TREE_CODE (TREE_OPERAND (field, 1)) == INTEGER_CST) curidx = wi::to_widest (TREE_OPERAND (field, 1)); } else dump_generic_node (pp, field, spc, flags, false); if (TREE_CODE (field) == INTEGER_CST) curidx = wi::to_widest (field); pp_string (pp, "]="); } } if (is_array_init) curidx += 1; if (val && TREE_CODE (val) == ADDR_EXPR) if (TREE_CODE (TREE_OPERAND (val, 0)) == FUNCTION_DECL) val = TREE_OPERAND (val, 0); if (val && TREE_CODE (val) == FUNCTION_DECL) dump_decl_name (pp, val, flags); else dump_generic_node (pp, val, spc, flags, false); if (ix != CONSTRUCTOR_NELTS (node) - 1) { pp_comma (pp); pp_space (pp); } } pp_right_brace (pp); } break; case COMPOUND_EXPR: { tree *tp; if (flags & TDF_SLIM) { pp_string (pp, "<COMPOUND_EXPR>"); break; } dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (pp, spc); else { pp_comma (pp); pp_space (pp); } for (tp = &TREE_OPERAND (node, 1); TREE_CODE (*tp) == COMPOUND_EXPR; tp = &TREE_OPERAND (*tp, 1)) { dump_generic_node (pp, TREE_OPERAND (*tp, 0), spc, flags, !(flags & TDF_SLIM)); if (flags & TDF_SLIM) newline_and_indent (pp, spc); else { pp_comma (pp); pp_space (pp); } } dump_generic_node (pp, *tp, spc, flags, !(flags & TDF_SLIM)); } break; case STATEMENT_LIST: { tree_stmt_iterator si; bool first = true; if (flags & TDF_SLIM) { pp_string (pp, "<STATEMENT_LIST>"); break; } for (si = tsi_start (node); !tsi_end_p (si); tsi_next (&si)) { if (!first) newline_and_indent (pp, spc); else first = false; dump_generic_node (pp, tsi_stmt (si), spc, flags, true); } } break; case MODIFY_EXPR: case INIT_EXPR: dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_space (pp); pp_equal (pp); pp_space (pp); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); break; case TARGET_EXPR: pp_string (pp, "TARGET_EXPR <"); dump_generic_node (pp, TARGET_EXPR_SLOT (node), spc, flags, false); pp_comma (pp); pp_space (pp); dump_generic_node (pp, TARGET_EXPR_INITIAL (node), spc, flags, false); pp_greater (pp); break; case DECL_EXPR: print_declaration (pp, DECL_EXPR_DECL (node), spc, flags); is_stmt = false; break; case COND_EXPR: if (TREE_TYPE (node) == NULL || TREE_TYPE (node) == void_type_node) { pp_string (pp, "if ("); dump_generic_node (pp, COND_EXPR_COND (node), spc, flags, false); pp_right_paren (pp); /* The lowered cond_exprs should always be printed in full. */ if (COND_EXPR_THEN (node) && (IS_EMPTY_STMT (COND_EXPR_THEN (node)) || TREE_CODE (COND_EXPR_THEN (node)) == GOTO_EXPR) && COND_EXPR_ELSE (node) && (IS_EMPTY_STMT (COND_EXPR_ELSE (node)) || TREE_CODE (COND_EXPR_ELSE (node)) == GOTO_EXPR)) { pp_space (pp); dump_generic_node (pp, COND_EXPR_THEN (node), 0, flags, true); if (!IS_EMPTY_STMT (COND_EXPR_ELSE (node))) { pp_string (pp, " else "); dump_generic_node (pp, COND_EXPR_ELSE (node), 0, flags, true); } } else if (!(flags & TDF_SLIM)) { /* Output COND_EXPR_THEN. */ if (COND_EXPR_THEN (node)) { newline_and_indent (pp, spc+2); pp_left_brace (pp); newline_and_indent (pp, spc+4); dump_generic_node (pp, COND_EXPR_THEN (node), spc+4, flags, true); newline_and_indent (pp, spc+2); pp_right_brace (pp); } /* Output COND_EXPR_ELSE. */ if (COND_EXPR_ELSE (node) && !IS_EMPTY_STMT (COND_EXPR_ELSE (node))) { newline_and_indent (pp, spc); pp_string (pp, "else"); newline_and_indent (pp, spc+2); pp_left_brace (pp); newline_and_indent (pp, spc+4); dump_generic_node (pp, COND_EXPR_ELSE (node), spc+4, flags, true); newline_and_indent (pp, spc+2); pp_right_brace (pp); } } is_expr = false; } else { dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_space (pp); pp_question (pp); pp_space (pp); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_space (pp); pp_colon (pp); pp_space (pp); dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false); } break; case BIND_EXPR: pp_left_brace (pp); if (!(flags & TDF_SLIM)) { if (BIND_EXPR_VARS (node)) { pp_newline (pp); for (op0 = BIND_EXPR_VARS (node); op0; op0 = DECL_CHAIN (op0)) { print_declaration (pp, op0, spc+2, flags); pp_newline (pp); } } newline_and_indent (pp, spc+2); dump_generic_node (pp, BIND_EXPR_BODY (node), spc+2, flags, true); newline_and_indent (pp, spc); pp_right_brace (pp); } is_expr = false; break; case CALL_EXPR: if (CALL_EXPR_FN (node) != NULL_TREE) print_call_name (pp, CALL_EXPR_FN (node), flags); else pp_string (pp, internal_fn_name (CALL_EXPR_IFN (node))); /* Print parameters. */ pp_space (pp); pp_left_paren (pp); { tree arg; call_expr_arg_iterator iter; FOR_EACH_CALL_EXPR_ARG (arg, iter, node) { dump_generic_node (pp, arg, spc, flags, false); if (more_call_expr_args_p (&iter)) { pp_comma (pp); pp_space (pp); } } } if (CALL_EXPR_VA_ARG_PACK (node)) { if (call_expr_nargs (node) > 0) { pp_comma (pp); pp_space (pp); } pp_string (pp, "__builtin_va_arg_pack ()"); } pp_right_paren (pp); op1 = CALL_EXPR_STATIC_CHAIN (node); if (op1) { pp_string (pp, " [static-chain: "); dump_generic_node (pp, op1, spc, flags, false); pp_right_bracket (pp); } if (CALL_EXPR_RETURN_SLOT_OPT (node)) pp_string (pp, " [return slot optimization]"); if (CALL_EXPR_TAILCALL (node)) pp_string (pp, " [tail call]"); break; case WITH_CLEANUP_EXPR: NIY; break; case CLEANUP_POINT_EXPR: pp_string (pp, "<<cleanup_point "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ">>"); break; case PLACEHOLDER_EXPR: pp_string (pp, "<PLACEHOLDER_EXPR "); dump_generic_node (pp, TREE_TYPE (node), spc, flags, false); pp_greater (pp); break; /* Binary arithmetic and logic expressions. */ case WIDEN_SUM_EXPR: case WIDEN_MULT_EXPR: case MULT_EXPR: case MULT_HIGHPART_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case POINTER_DIFF_EXPR: case MINUS_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case WIDEN_LSHIFT_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: case BIT_AND_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: case TRUTH_AND_EXPR: case TRUTH_OR_EXPR: case TRUTH_XOR_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: case EQ_EXPR: case NE_EXPR: case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: { const char *op = op_symbol (node); op0 = TREE_OPERAND (node, 0); op1 = TREE_OPERAND (node, 1); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op0) <= op_prio (node)) { pp_left_paren (pp); dump_generic_node (pp, op0, spc, flags, false); pp_right_paren (pp); } else dump_generic_node (pp, op0, spc, flags, false); pp_space (pp); pp_string (pp, op); pp_space (pp); /* When the operands are expressions with less priority, keep semantics of the tree representation. */ if (op_prio (op1) <= op_prio (node)) { pp_left_paren (pp); dump_generic_node (pp, op1, spc, flags, false); pp_right_paren (pp); } else dump_generic_node (pp, op1, spc, flags, false); } break; /* Unary arithmetic and logic expressions. */ case NEGATE_EXPR: case BIT_NOT_EXPR: case TRUTH_NOT_EXPR: case ADDR_EXPR: case PREDECREMENT_EXPR: case PREINCREMENT_EXPR: case INDIRECT_REF: if (TREE_CODE (node) == ADDR_EXPR && (TREE_CODE (TREE_OPERAND (node, 0)) == STRING_CST || TREE_CODE (TREE_OPERAND (node, 0)) == FUNCTION_DECL)) ; /* Do not output '&' for strings and function pointers. */ else pp_string (pp, op_symbol (node)); if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_left_paren (pp); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_right_paren (pp); } else dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); break; case POSTDECREMENT_EXPR: case POSTINCREMENT_EXPR: if (op_prio (TREE_OPERAND (node, 0)) < op_prio (node)) { pp_left_paren (pp); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_right_paren (pp); } else dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, op_symbol (node)); break; case MIN_EXPR: pp_string (pp, "MIN_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_greater (pp); break; case MAX_EXPR: pp_string (pp, "MAX_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_greater (pp); break; case ABS_EXPR: pp_string (pp, "ABS_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_greater (pp); break; case RANGE_EXPR: NIY; break; case ADDR_SPACE_CONVERT_EXPR: case FIXED_CONVERT_EXPR: case FIX_TRUNC_EXPR: case FLOAT_EXPR: CASE_CONVERT: type = TREE_TYPE (node); op0 = TREE_OPERAND (node, 0); if (type != TREE_TYPE (op0)) { pp_left_paren (pp); dump_generic_node (pp, type, spc, flags, false); pp_string (pp, ") "); } if (op_prio (op0) < op_prio (node)) pp_left_paren (pp); dump_generic_node (pp, op0, spc, flags, false); if (op_prio (op0) < op_prio (node)) pp_right_paren (pp); break; case VIEW_CONVERT_EXPR: pp_string (pp, "VIEW_CONVERT_EXPR<"); dump_generic_node (pp, TREE_TYPE (node), spc, flags, false); pp_string (pp, ">("); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_right_paren (pp); break; case PAREN_EXPR: pp_string (pp, "(("); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, "))"); break; case NON_LVALUE_EXPR: pp_string (pp, "NON_LVALUE_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_greater (pp); break; case SAVE_EXPR: pp_string (pp, "SAVE_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_greater (pp); break; case COMPLEX_EXPR: pp_string (pp, "COMPLEX_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_greater (pp); break; case CONJ_EXPR: pp_string (pp, "CONJ_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_greater (pp); break; case REALPART_EXPR: if (flags & TDF_GIMPLE) { pp_string (pp, "__real "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); } else { pp_string (pp, "REALPART_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_greater (pp); } break; case IMAGPART_EXPR: if (flags & TDF_GIMPLE) { pp_string (pp, "__imag "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); } else { pp_string (pp, "IMAGPART_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_greater (pp); } break; case VA_ARG_EXPR: pp_string (pp, "VA_ARG_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_greater (pp); break; case TRY_FINALLY_EXPR: case TRY_CATCH_EXPR: pp_string (pp, "try"); newline_and_indent (pp, spc+2); pp_left_brace (pp); newline_and_indent (pp, spc+4); dump_generic_node (pp, TREE_OPERAND (node, 0), spc+4, flags, true); newline_and_indent (pp, spc+2); pp_right_brace (pp); newline_and_indent (pp, spc); pp_string (pp, (TREE_CODE (node) == TRY_CATCH_EXPR) ? "catch" : "finally"); newline_and_indent (pp, spc+2); pp_left_brace (pp); newline_and_indent (pp, spc+4); dump_generic_node (pp, TREE_OPERAND (node, 1), spc+4, flags, true); newline_and_indent (pp, spc+2); pp_right_brace (pp); is_expr = false; break; case CATCH_EXPR: pp_string (pp, "catch ("); dump_generic_node (pp, CATCH_TYPES (node), spc+2, flags, false); pp_right_paren (pp); newline_and_indent (pp, spc+2); pp_left_brace (pp); newline_and_indent (pp, spc+4); dump_generic_node (pp, CATCH_BODY (node), spc+4, flags, true); newline_and_indent (pp, spc+2); pp_right_brace (pp); is_expr = false; break; case EH_FILTER_EXPR: pp_string (pp, "<<<eh_filter ("); dump_generic_node (pp, EH_FILTER_TYPES (node), spc+2, flags, false); pp_string (pp, ")>>>"); newline_and_indent (pp, spc+2); pp_left_brace (pp); newline_and_indent (pp, spc+4); dump_generic_node (pp, EH_FILTER_FAILURE (node), spc+4, flags, true); newline_and_indent (pp, spc+2); pp_right_brace (pp); is_expr = false; break; case LABEL_EXPR: op0 = TREE_OPERAND (node, 0); /* If this is for break or continue, don't bother printing it. */ if (DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) break; } dump_generic_node (pp, op0, spc, flags, false); pp_colon (pp); if (DECL_NONLOCAL (op0)) pp_string (pp, " [non-local]"); break; case LOOP_EXPR: pp_string (pp, "while (1)"); if (!(flags & TDF_SLIM)) { newline_and_indent (pp, spc+2); pp_left_brace (pp); newline_and_indent (pp, spc+4); dump_generic_node (pp, LOOP_EXPR_BODY (node), spc+4, flags, true); newline_and_indent (pp, spc+2); pp_right_brace (pp); } is_expr = false; break; case PREDICT_EXPR: pp_string (pp, "// predicted "); if (PREDICT_EXPR_OUTCOME (node)) pp_string (pp, "likely by "); else pp_string (pp, "unlikely by "); pp_string (pp, predictor_name (PREDICT_EXPR_PREDICTOR (node))); pp_string (pp, " predictor."); break; case ANNOTATE_EXPR: pp_string (pp, "ANNOTATE_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); switch ((enum annot_expr_kind) TREE_INT_CST_LOW (TREE_OPERAND (node, 1))) { case annot_expr_ivdep_kind: pp_string (pp, ", ivdep"); break; case annot_expr_unroll_kind: pp_printf (pp, ", unroll %d", (int) TREE_INT_CST_LOW (TREE_OPERAND (node, 2))); break; case annot_expr_no_vector_kind: pp_string (pp, ", no-vector"); break; case annot_expr_vector_kind: pp_string (pp, ", vector"); break; case annot_expr_parallel_kind: pp_string (pp, ", parallel"); break; default: gcc_unreachable (); } pp_greater (pp); break; case RETURN_EXPR: pp_string (pp, "return"); op0 = TREE_OPERAND (node, 0); if (op0) { pp_space (pp); if (TREE_CODE (op0) == MODIFY_EXPR) dump_generic_node (pp, TREE_OPERAND (op0, 1), spc, flags, false); else dump_generic_node (pp, op0, spc, flags, false); } break; case EXIT_EXPR: pp_string (pp, "if ("); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ") break"); break; case SWITCH_EXPR: pp_string (pp, "switch ("); dump_generic_node (pp, SWITCH_COND (node), spc, flags, false); pp_right_paren (pp); if (!(flags & TDF_SLIM)) { newline_and_indent (pp, spc+2); pp_left_brace (pp); if (SWITCH_BODY (node)) { newline_and_indent (pp, spc+4); dump_generic_node (pp, SWITCH_BODY (node), spc+4, flags, true); } newline_and_indent (pp, spc+2); pp_right_brace (pp); } is_expr = false; break; case GOTO_EXPR: op0 = GOTO_DESTINATION (node); if (TREE_CODE (op0) != SSA_NAME && DECL_P (op0) && DECL_NAME (op0)) { const char *name = IDENTIFIER_POINTER (DECL_NAME (op0)); if (strcmp (name, "break") == 0 || strcmp (name, "continue") == 0) { pp_string (pp, name); break; } } pp_string (pp, "goto "); dump_generic_node (pp, op0, spc, flags, false); break; case ASM_EXPR: pp_string (pp, "__asm__"); if (ASM_VOLATILE_P (node)) pp_string (pp, " __volatile__"); pp_left_paren (pp); dump_generic_node (pp, ASM_STRING (node), spc, flags, false); pp_colon (pp); dump_generic_node (pp, ASM_OUTPUTS (node), spc, flags, false); pp_colon (pp); dump_generic_node (pp, ASM_INPUTS (node), spc, flags, false); if (ASM_CLOBBERS (node)) { pp_colon (pp); dump_generic_node (pp, ASM_CLOBBERS (node), spc, flags, false); } pp_right_paren (pp); break; case CASE_LABEL_EXPR: if (CASE_LOW (node) && CASE_HIGH (node)) { pp_string (pp, "case "); dump_generic_node (pp, CASE_LOW (node), spc, flags, false); pp_string (pp, " ... "); dump_generic_node (pp, CASE_HIGH (node), spc, flags, false); } else if (CASE_LOW (node)) { pp_string (pp, "case "); dump_generic_node (pp, CASE_LOW (node), spc, flags, false); } else pp_string (pp, "default"); pp_colon (pp); break; case OBJ_TYPE_REF: pp_string (pp, "OBJ_TYPE_REF("); dump_generic_node (pp, OBJ_TYPE_REF_EXPR (node), spc, flags, false); pp_semicolon (pp); /* We omit the class type for -fcompare-debug because we may drop TYPE_BINFO early depending on debug info, and then virtual_method_call_p would return false, whereas when TYPE_BINFO is preserved it may still return true and then we'd print the class type. Compare tree and rtl dumps for libstdc++-prettyprinters/shared_ptr.cc with and without -g, for example, at occurrences of OBJ_TYPE_REF. */ if (!(flags & (TDF_SLIM | TDF_COMPARE_DEBUG)) && virtual_method_call_p (node)) { pp_string (pp, "("); dump_generic_node (pp, obj_type_ref_class (node), spc, flags, false); pp_string (pp, ")"); } dump_generic_node (pp, OBJ_TYPE_REF_OBJECT (node), spc, flags, false); pp_arrow (pp); dump_generic_node (pp, OBJ_TYPE_REF_TOKEN (node), spc, flags, false); pp_right_paren (pp); break; case SSA_NAME: if (SSA_NAME_IDENTIFIER (node)) { if ((flags & TDF_NOUID) && SSA_NAME_VAR (node) && DECL_NAMELESS (SSA_NAME_VAR (node))) dump_fancy_name (pp, SSA_NAME_IDENTIFIER (node)); else if (! (flags & TDF_GIMPLE) || SSA_NAME_VAR (node)) dump_generic_node (pp, SSA_NAME_IDENTIFIER (node), spc, flags, false); } pp_underscore (pp); pp_decimal_int (pp, SSA_NAME_VERSION (node)); if (SSA_NAME_IS_DEFAULT_DEF (node)) pp_string (pp, "(D)"); if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (node)) pp_string (pp, "(ab)"); break; case WITH_SIZE_EXPR: pp_string (pp, "WITH_SIZE_EXPR <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_greater (pp); break; case ASSERT_EXPR: pp_string (pp, "ASSERT_EXPR <"); dump_generic_node (pp, ASSERT_EXPR_VAR (node), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, ASSERT_EXPR_COND (node), spc, flags, false); pp_greater (pp); break; case SCEV_KNOWN: pp_string (pp, "scev_known"); break; case SCEV_NOT_KNOWN: pp_string (pp, "scev_not_known"); break; case POLYNOMIAL_CHREC: pp_left_brace (pp); dump_generic_node (pp, CHREC_LEFT (node), spc, flags, false); pp_string (pp, ", +, "); dump_generic_node (pp, CHREC_RIGHT (node), spc, flags, false); pp_printf (pp, "}_%u", CHREC_VARIABLE (node)); is_stmt = false; break; case REALIGN_LOAD_EXPR: pp_string (pp, "REALIGN_LOAD <"); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false); pp_greater (pp); break; case VEC_COND_EXPR: pp_string (pp, " VEC_COND_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, " , "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, " , "); dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false); pp_string (pp, " > "); break; case VEC_PERM_EXPR: pp_string (pp, " VEC_PERM_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, " , "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, " , "); dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false); pp_string (pp, " > "); break; case DOT_PROD_EXPR: pp_string (pp, " DOT_PROD_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false); pp_string (pp, " > "); break; case WIDEN_MULT_PLUS_EXPR: pp_string (pp, " WIDEN_MULT_PLUS_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false); pp_string (pp, " > "); break; case WIDEN_MULT_MINUS_EXPR: pp_string (pp, " WIDEN_MULT_MINUS_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false); pp_string (pp, " > "); break; case FMA_EXPR: pp_string (pp, " FMA_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 2), spc, flags, false); pp_string (pp, " > "); break; case OACC_PARALLEL: pp_string (pp, "#pragma acc parallel"); goto dump_omp_clauses_body; case OACC_KERNELS: pp_string (pp, "#pragma acc kernels"); goto dump_omp_clauses_body; case OACC_DATA: pp_string (pp, "#pragma acc data"); dump_omp_clauses (pp, OACC_DATA_CLAUSES (node), spc, flags); goto dump_omp_body; case OACC_HOST_DATA: pp_string (pp, "#pragma acc host_data"); dump_omp_clauses (pp, OACC_HOST_DATA_CLAUSES (node), spc, flags); goto dump_omp_body; case OACC_DECLARE: pp_string (pp, "#pragma acc declare"); dump_omp_clauses (pp, OACC_DECLARE_CLAUSES (node), spc, flags); break; case OACC_UPDATE: pp_string (pp, "#pragma acc update"); dump_omp_clauses (pp, OACC_UPDATE_CLAUSES (node), spc, flags); break; case OACC_ENTER_DATA: pp_string (pp, "#pragma acc enter data"); dump_omp_clauses (pp, OACC_ENTER_DATA_CLAUSES (node), spc, flags); break; case OACC_EXIT_DATA: pp_string (pp, "#pragma acc exit data"); dump_omp_clauses (pp, OACC_EXIT_DATA_CLAUSES (node), spc, flags); break; case OACC_CACHE: pp_string (pp, "#pragma acc cache"); dump_omp_clauses (pp, OACC_CACHE_CLAUSES (node), spc, flags); break; case OMP_PARALLEL: pp_string (pp, "#pragma omp parallel"); dump_omp_clauses (pp, OMP_PARALLEL_CLAUSES (node), spc, flags); goto dump_omp_body; dump_omp_clauses_body: dump_omp_clauses (pp, OMP_CLAUSES (node), spc, flags); goto dump_omp_body; dump_omp_body: if (!(flags & TDF_SLIM) && OMP_BODY (node)) { newline_and_indent (pp, spc + 2); pp_left_brace (pp); newline_and_indent (pp, spc + 4); dump_generic_node (pp, OMP_BODY (node), spc + 4, flags, false); newline_and_indent (pp, spc + 2); pp_right_brace (pp); } is_expr = false; break; case OMP_TASK: pp_string (pp, "#pragma omp task"); dump_omp_clauses (pp, OMP_TASK_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_FOR: pp_string (pp, "#pragma omp for"); goto dump_omp_loop; case OMP_SIMD: pp_string (pp, "#pragma omp simd"); goto dump_omp_loop; case OMP_DISTRIBUTE: pp_string (pp, "#pragma omp distribute"); goto dump_omp_loop; case OMP_TASKLOOP: pp_string (pp, "#pragma omp taskloop"); goto dump_omp_loop; case OACC_LOOP: pp_string (pp, "#pragma acc loop"); goto dump_omp_loop; case OMP_TEAMS: pp_string (pp, "#pragma omp teams"); dump_omp_clauses (pp, OMP_TEAMS_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_TARGET_DATA: pp_string (pp, "#pragma omp target data"); dump_omp_clauses (pp, OMP_TARGET_DATA_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_TARGET_ENTER_DATA: pp_string (pp, "#pragma omp target enter data"); dump_omp_clauses (pp, OMP_TARGET_ENTER_DATA_CLAUSES (node), spc, flags); is_expr = false; break; case OMP_TARGET_EXIT_DATA: pp_string (pp, "#pragma omp target exit data"); dump_omp_clauses (pp, OMP_TARGET_EXIT_DATA_CLAUSES (node), spc, flags); is_expr = false; break; case OMP_TARGET: pp_string (pp, "#pragma omp target"); dump_omp_clauses (pp, OMP_TARGET_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_TARGET_UPDATE: pp_string (pp, "#pragma omp target update"); dump_omp_clauses (pp, OMP_TARGET_UPDATE_CLAUSES (node), spc, flags); is_expr = false; break; dump_omp_loop: dump_omp_clauses (pp, OMP_FOR_CLAUSES (node), spc, flags); if (!(flags & TDF_SLIM)) { int i; if (OMP_FOR_PRE_BODY (node)) { newline_and_indent (pp, spc + 2); pp_left_brace (pp); spc += 4; newline_and_indent (pp, spc); dump_generic_node (pp, OMP_FOR_PRE_BODY (node), spc, flags, false); } if (OMP_FOR_INIT (node)) { spc -= 2; for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (node)); i++) { spc += 2; newline_and_indent (pp, spc); pp_string (pp, "for ("); dump_generic_node (pp, TREE_VEC_ELT (OMP_FOR_INIT (node), i), spc, flags, false); pp_string (pp, "; "); dump_generic_node (pp, TREE_VEC_ELT (OMP_FOR_COND (node), i), spc, flags, false); pp_string (pp, "; "); dump_generic_node (pp, TREE_VEC_ELT (OMP_FOR_INCR (node), i), spc, flags, false); pp_right_paren (pp); } } if (OMP_FOR_BODY (node)) { newline_and_indent (pp, spc + 2); pp_left_brace (pp); newline_and_indent (pp, spc + 4); dump_generic_node (pp, OMP_FOR_BODY (node), spc + 4, flags, false); newline_and_indent (pp, spc + 2); pp_right_brace (pp); } if (OMP_FOR_INIT (node)) spc -= 2 * TREE_VEC_LENGTH (OMP_FOR_INIT (node)) - 2; if (OMP_FOR_PRE_BODY (node)) { spc -= 4; newline_and_indent (pp, spc + 2); pp_right_brace (pp); } } is_expr = false; break; case OMP_SECTIONS: pp_string (pp, "#pragma omp sections"); dump_omp_clauses (pp, OMP_SECTIONS_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_SECTION: pp_string (pp, "#pragma omp section"); goto dump_omp_body; case OMP_MASTER: pp_string (pp, "#pragma omp master"); goto dump_omp_body; case OMP_TASKGROUP: pp_string (pp, "#pragma omp taskgroup"); goto dump_omp_body; case OMP_ORDERED: pp_string (pp, "#pragma omp ordered"); dump_omp_clauses (pp, OMP_ORDERED_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_CRITICAL: pp_string (pp, "#pragma omp critical"); if (OMP_CRITICAL_NAME (node)) { pp_space (pp); pp_left_paren (pp); dump_generic_node (pp, OMP_CRITICAL_NAME (node), spc, flags, false); pp_right_paren (pp); } dump_omp_clauses (pp, OMP_CRITICAL_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_ATOMIC: pp_string (pp, "#pragma omp atomic"); if (OMP_ATOMIC_SEQ_CST (node)) pp_string (pp, " seq_cst"); newline_and_indent (pp, spc + 2); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_space (pp); pp_equal (pp); pp_space (pp); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); break; case OMP_ATOMIC_READ: pp_string (pp, "#pragma omp atomic read"); if (OMP_ATOMIC_SEQ_CST (node)) pp_string (pp, " seq_cst"); newline_and_indent (pp, spc + 2); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_space (pp); break; case OMP_ATOMIC_CAPTURE_OLD: case OMP_ATOMIC_CAPTURE_NEW: pp_string (pp, "#pragma omp atomic capture"); if (OMP_ATOMIC_SEQ_CST (node)) pp_string (pp, " seq_cst"); newline_and_indent (pp, spc + 2); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_space (pp); pp_equal (pp); pp_space (pp); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); break; case OMP_SINGLE: pp_string (pp, "#pragma omp single"); dump_omp_clauses (pp, OMP_SINGLE_CLAUSES (node), spc, flags); goto dump_omp_body; case OMP_CLAUSE: dump_omp_clause (pp, node, spc, flags); is_expr = false; break; case TRANSACTION_EXPR: if (TRANSACTION_EXPR_OUTER (node)) pp_string (pp, "__transaction_atomic [[outer]]"); else if (TRANSACTION_EXPR_RELAXED (node)) pp_string (pp, "__transaction_relaxed"); else pp_string (pp, "__transaction_atomic"); if (!(flags & TDF_SLIM) && TRANSACTION_EXPR_BODY (node)) { newline_and_indent (pp, spc); pp_left_brace (pp); newline_and_indent (pp, spc + 2); dump_generic_node (pp, TRANSACTION_EXPR_BODY (node), spc + 2, flags, false); newline_and_indent (pp, spc); pp_right_brace (pp); } is_expr = false; break; case VEC_SERIES_EXPR: case VEC_WIDEN_MULT_HI_EXPR: case VEC_WIDEN_MULT_LO_EXPR: case VEC_WIDEN_MULT_EVEN_EXPR: case VEC_WIDEN_MULT_ODD_EXPR: case VEC_WIDEN_LSHIFT_HI_EXPR: case VEC_WIDEN_LSHIFT_LO_EXPR: pp_space (pp); for (str = get_tree_code_name (code); *str; str++) pp_character (pp, TOUPPER (*str)); pp_string (pp, " < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, " > "); break; case VEC_DUPLICATE_EXPR: pp_space (pp); for (str = get_tree_code_name (code); *str; str++) pp_character (pp, TOUPPER (*str)); pp_string (pp, " < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, " > "); break; case VEC_UNPACK_HI_EXPR: pp_string (pp, " VEC_UNPACK_HI_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, " > "); break; case VEC_UNPACK_LO_EXPR: pp_string (pp, " VEC_UNPACK_LO_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, " > "); break; case VEC_UNPACK_FLOAT_HI_EXPR: pp_string (pp, " VEC_UNPACK_FLOAT_HI_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, " > "); break; case VEC_UNPACK_FLOAT_LO_EXPR: pp_string (pp, " VEC_UNPACK_FLOAT_LO_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, " > "); break; case VEC_PACK_TRUNC_EXPR: pp_string (pp, " VEC_PACK_TRUNC_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, " > "); break; case VEC_PACK_SAT_EXPR: pp_string (pp, " VEC_PACK_SAT_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, " > "); break; case VEC_PACK_FIX_TRUNC_EXPR: pp_string (pp, " VEC_PACK_FIX_TRUNC_EXPR < "); dump_generic_node (pp, TREE_OPERAND (node, 0), spc, flags, false); pp_string (pp, ", "); dump_generic_node (pp, TREE_OPERAND (node, 1), spc, flags, false); pp_string (pp, " > "); break; case BLOCK: dump_block_node (pp, node, spc, flags); break; case DEBUG_BEGIN_STMT: pp_string (pp, "# DEBUG BEGIN STMT"); break; default: NIY; } if (is_stmt && is_expr) pp_semicolon (pp); return spc; } /* Print the declaration of a variable. */ void print_declaration (pretty_printer *pp, tree t, int spc, dump_flags_t flags) { INDENT (spc); if (TREE_CODE(t) == NAMELIST_DECL) { pp_string(pp, "namelist "); dump_decl_name (pp, t, flags); pp_semicolon (pp); return; } if (TREE_CODE (t) == TYPE_DECL) pp_string (pp, "typedef "); if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_DECL_WRTL) && DECL_REGISTER (t)) pp_string (pp, "register "); if (TREE_PUBLIC (t) && DECL_EXTERNAL (t)) pp_string (pp, "extern "); else if (TREE_STATIC (t)) pp_string (pp, "static "); /* Print the type and name. */ if (TREE_TYPE (t) && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) { tree tmp; /* Print array's type. */ tmp = TREE_TYPE (t); while (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE) tmp = TREE_TYPE (tmp); dump_generic_node (pp, TREE_TYPE (tmp), spc, flags, false); /* Print variable's name. */ pp_space (pp); dump_generic_node (pp, t, spc, flags, false); /* Print the dimensions. */ tmp = TREE_TYPE (t); while (TREE_CODE (tmp) == ARRAY_TYPE) { dump_array_domain (pp, TYPE_DOMAIN (tmp), spc, flags); tmp = TREE_TYPE (tmp); } } else if (TREE_CODE (t) == FUNCTION_DECL) { dump_generic_node (pp, TREE_TYPE (TREE_TYPE (t)), spc, flags, false); pp_space (pp); dump_decl_name (pp, t, flags); dump_function_declaration (pp, TREE_TYPE (t), spc, flags); } else { /* Print type declaration. */ dump_generic_node (pp, TREE_TYPE (t), spc, flags, false); /* Print variable's name. */ pp_space (pp); dump_generic_node (pp, t, spc, flags, false); } if (VAR_P (t) && DECL_HARD_REGISTER (t)) { pp_string (pp, " __asm__ "); pp_left_paren (pp); dump_generic_node (pp, DECL_ASSEMBLER_NAME (t), spc, flags, false); pp_right_paren (pp); } /* The initial value of a function serves to determine whether the function is declared or defined. So the following does not apply to function nodes. */ if (TREE_CODE (t) != FUNCTION_DECL) { /* Print the initial value. */ if (DECL_INITIAL (t)) { pp_space (pp); pp_equal (pp); pp_space (pp); if (!(flags & TDF_SLIM)) dump_generic_node (pp, DECL_INITIAL (t), spc, flags, false); else pp_string (pp, "<<< omitted >>>"); } } if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t)) { pp_string (pp, " [value-expr: "); dump_generic_node (pp, DECL_VALUE_EXPR (t), spc, flags, false); pp_right_bracket (pp); } pp_semicolon (pp); } /* Prints a structure: name, fields, and methods. FIXME: Still incomplete. */ static void print_struct_decl (pretty_printer *pp, const_tree node, int spc, dump_flags_t flags) { /* Print the name of the structure. */ if (TYPE_NAME (node)) { INDENT (spc); if (TREE_CODE (node) == RECORD_TYPE) pp_string (pp, "struct "); else if ((TREE_CODE (node) == UNION_TYPE || TREE_CODE (node) == QUAL_UNION_TYPE)) pp_string (pp, "union "); dump_generic_node (pp, TYPE_NAME (node), spc, 0, false); } /* Print the contents of the structure. */ pp_newline (pp); INDENT (spc); pp_left_brace (pp); pp_newline (pp); /* Print the fields of the structure. */ { tree tmp; tmp = TYPE_FIELDS (node); while (tmp) { /* Avoid to print recursively the structure. */ /* FIXME : Not implemented correctly..., what about the case when we have a cycle in the contain graph? ... Maybe this could be solved by looking at the scope in which the structure was declared. */ if (TREE_TYPE (tmp) != node && (TREE_CODE (TREE_TYPE (tmp)) != POINTER_TYPE || TREE_TYPE (TREE_TYPE (tmp)) != node)) { print_declaration (pp, tmp, spc+2, flags); pp_newline (pp); } tmp = DECL_CHAIN (tmp); } } INDENT (spc); pp_right_brace (pp); } /* Return the priority of the operator CODE. From lowest to highest precedence with either left-to-right (L-R) or right-to-left (R-L) associativity]: 1 [L-R] , 2 [R-L] = += -= *= /= %= &= ^= |= <<= >>= 3 [R-L] ?: 4 [L-R] || 5 [L-R] && 6 [L-R] | 7 [L-R] ^ 8 [L-R] & 9 [L-R] == != 10 [L-R] < <= > >= 11 [L-R] << >> 12 [L-R] + - 13 [L-R] * / % 14 [R-L] ! ~ ++ -- + - * & (type) sizeof 15 [L-R] fn() [] -> . unary +, - and * have higher precedence than the corresponding binary operators. */ int op_code_prio (enum tree_code code) { switch (code) { case TREE_LIST: case COMPOUND_EXPR: case BIND_EXPR: return 1; case MODIFY_EXPR: case INIT_EXPR: return 2; case COND_EXPR: return 3; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return 4; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return 5; case BIT_IOR_EXPR: return 6; case BIT_XOR_EXPR: case TRUTH_XOR_EXPR: return 7; case BIT_AND_EXPR: return 8; case EQ_EXPR: case NE_EXPR: return 9; case UNLT_EXPR: case UNLE_EXPR: case UNGT_EXPR: case UNGE_EXPR: case UNEQ_EXPR: case LTGT_EXPR: case ORDERED_EXPR: case UNORDERED_EXPR: case LT_EXPR: case LE_EXPR: case GT_EXPR: case GE_EXPR: return 10; case LSHIFT_EXPR: case RSHIFT_EXPR: case LROTATE_EXPR: case RROTATE_EXPR: case VEC_WIDEN_LSHIFT_HI_EXPR: case VEC_WIDEN_LSHIFT_LO_EXPR: case WIDEN_LSHIFT_EXPR: return 11; case WIDEN_SUM_EXPR: case PLUS_EXPR: case POINTER_PLUS_EXPR: case POINTER_DIFF_EXPR: case MINUS_EXPR: return 12; case VEC_WIDEN_MULT_HI_EXPR: case VEC_WIDEN_MULT_LO_EXPR: case WIDEN_MULT_EXPR: case DOT_PROD_EXPR: case WIDEN_MULT_PLUS_EXPR: case WIDEN_MULT_MINUS_EXPR: case MULT_EXPR: case MULT_HIGHPART_EXPR: case TRUNC_DIV_EXPR: case CEIL_DIV_EXPR: case FLOOR_DIV_EXPR: case ROUND_DIV_EXPR: case RDIV_EXPR: case EXACT_DIV_EXPR: case TRUNC_MOD_EXPR: case CEIL_MOD_EXPR: case FLOOR_MOD_EXPR: case ROUND_MOD_EXPR: case FMA_EXPR: return 13; case TRUTH_NOT_EXPR: case BIT_NOT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case NEGATE_EXPR: case INDIRECT_REF: case ADDR_EXPR: case FLOAT_EXPR: CASE_CONVERT: case FIX_TRUNC_EXPR: case TARGET_EXPR: return 14; case CALL_EXPR: case ARRAY_REF: case ARRAY_RANGE_REF: case COMPONENT_REF: return 15; /* Special expressions. */ case MIN_EXPR: case MAX_EXPR: case ABS_EXPR: case REALPART_EXPR: case IMAGPART_EXPR: case VEC_UNPACK_HI_EXPR: case VEC_UNPACK_LO_EXPR: case VEC_UNPACK_FLOAT_HI_EXPR: case VEC_UNPACK_FLOAT_LO_EXPR: case VEC_PACK_TRUNC_EXPR: case VEC_PACK_SAT_EXPR: return 16; default: /* Return an arbitrarily high precedence to avoid surrounding single VAR_DECLs in ()s. */ return 9999; } } /* Return the priority of the operator OP. */ int op_prio (const_tree op) { enum tree_code code; if (op == NULL) return 9999; code = TREE_CODE (op); if (code == SAVE_EXPR || code == NON_LVALUE_EXPR) return op_prio (TREE_OPERAND (op, 0)); return op_code_prio (code); } /* Return the symbol associated with operator CODE. */ const char * op_symbol_code (enum tree_code code) { switch (code) { case MODIFY_EXPR: return "="; case TRUTH_OR_EXPR: case TRUTH_ORIF_EXPR: return "||"; case TRUTH_AND_EXPR: case TRUTH_ANDIF_EXPR: return "&&"; case BIT_IOR_EXPR: return "|"; case TRUTH_XOR_EXPR: case BIT_XOR_EXPR: return "^"; case ADDR_EXPR: case BIT_AND_EXPR: return "&"; case ORDERED_EXPR: return "ord"; case UNORDERED_EXPR: return "unord"; case EQ_EXPR: return "=="; case UNEQ_EXPR: return "u=="; case NE_EXPR: return "!="; case LT_EXPR: return "<"; case UNLT_EXPR: return "u<"; case LE_EXPR: return "<="; case UNLE_EXPR: return "u<="; case GT_EXPR: return ">"; case UNGT_EXPR: return "u>"; case GE_EXPR: return ">="; case UNGE_EXPR: return "u>="; case LTGT_EXPR: return "<>"; case LSHIFT_EXPR: return "<<"; case RSHIFT_EXPR: return ">>"; case LROTATE_EXPR: return "r<<"; case RROTATE_EXPR: return "r>>"; case WIDEN_LSHIFT_EXPR: return "w<<"; case POINTER_PLUS_EXPR: return "+"; case PLUS_EXPR: return "+"; case WIDEN_SUM_EXPR: return "w+"; case WIDEN_MULT_EXPR: return "w*"; case MULT_HIGHPART_EXPR: return "h*"; case NEGATE_EXPR: case MINUS_EXPR: case POINTER_DIFF_EXPR: return "-"; case BIT_NOT_EXPR: return "~"; case TRUTH_NOT_EXPR: return "!"; case MULT_EXPR: case INDIRECT_REF: return "*"; case TRUNC_DIV_EXPR: case RDIV_EXPR: return "/"; case CEIL_DIV_EXPR: return "/[cl]"; case FLOOR_DIV_EXPR: return "/[fl]"; case ROUND_DIV_EXPR: return "/[rd]"; case EXACT_DIV_EXPR: return "/[ex]"; case TRUNC_MOD_EXPR: return "%"; case CEIL_MOD_EXPR: return "%[cl]"; case FLOOR_MOD_EXPR: return "%[fl]"; case ROUND_MOD_EXPR: return "%[rd]"; case PREDECREMENT_EXPR: return " --"; case PREINCREMENT_EXPR: return " ++"; case POSTDECREMENT_EXPR: return "-- "; case POSTINCREMENT_EXPR: return "++ "; case MAX_EXPR: return "max"; case MIN_EXPR: return "min"; default: return "<<< ??? >>>"; } } /* Return the symbol associated with operator OP. */ static const char * op_symbol (const_tree op) { return op_symbol_code (TREE_CODE (op)); } /* Prints the name of a call. NODE is the CALL_EXPR_FN of a CALL_EXPR or the gimple_call_fn of a GIMPLE_CALL. */ void print_call_name (pretty_printer *pp, tree node, dump_flags_t flags) { tree op0 = node; if (TREE_CODE (op0) == NON_LVALUE_EXPR) op0 = TREE_OPERAND (op0, 0); again: switch (TREE_CODE (op0)) { case VAR_DECL: case PARM_DECL: case FUNCTION_DECL: dump_function_name (pp, op0, flags); break; case ADDR_EXPR: case INDIRECT_REF: CASE_CONVERT: op0 = TREE_OPERAND (op0, 0); goto again; case COND_EXPR: pp_left_paren (pp); dump_generic_node (pp, TREE_OPERAND (op0, 0), 0, flags, false); pp_string (pp, ") ? "); dump_generic_node (pp, TREE_OPERAND (op0, 1), 0, flags, false); pp_string (pp, " : "); dump_generic_node (pp, TREE_OPERAND (op0, 2), 0, flags, false); break; case ARRAY_REF: if (TREE_CODE (TREE_OPERAND (op0, 0)) == VAR_DECL) dump_function_name (pp, TREE_OPERAND (op0, 0), flags); else dump_generic_node (pp, op0, 0, flags, false); break; case MEM_REF: if (integer_zerop (TREE_OPERAND (op0, 1))) { op0 = TREE_OPERAND (op0, 0); goto again; } /* Fallthru. */ case COMPONENT_REF: case SSA_NAME: case OBJ_TYPE_REF: dump_generic_node (pp, op0, 0, flags, false); break; default: NIY; } } /* Parses the string STR and replaces new-lines by '\n', tabs by '\t', ... */ static void pretty_print_string (pretty_printer *pp, const char *str) { if (str == NULL) return; while (*str) { switch (str[0]) { case '\b': pp_string (pp, "\\b"); break; case '\f': pp_string (pp, "\\f"); break; case '\n': pp_string (pp, "\\n"); break; case '\r': pp_string (pp, "\\r"); break; case '\t': pp_string (pp, "\\t"); break; case '\v': pp_string (pp, "\\v"); break; case '\\': pp_string (pp, "\\\\"); break; case '\"': pp_string (pp, "\\\""); break; case '\'': pp_string (pp, "\\'"); break; /* No need to handle \0; the loop terminates on \0. */ case '\1': pp_string (pp, "\\1"); break; case '\2': pp_string (pp, "\\2"); break; case '\3': pp_string (pp, "\\3"); break; case '\4': pp_string (pp, "\\4"); break; case '\5': pp_string (pp, "\\5"); break; case '\6': pp_string (pp, "\\6"); break; case '\7': pp_string (pp, "\\7"); break; default: if (!ISPRINT (str[0])) { char buf[5]; sprintf (buf, "\\x%x", (unsigned char)str[0]); pp_string (pp, buf); } else pp_character (pp, str[0]); break; } str++; } } static void maybe_init_pretty_print (FILE *file) { if (!tree_pp) { tree_pp = new pretty_printer (); pp_needs_newline (tree_pp) = true; pp_translate_identifiers (tree_pp) = false; } tree_pp->buffer->stream = file; } static void newline_and_indent (pretty_printer *pp, int spc) { pp_newline (pp); INDENT (spc); } /* Handle the %K format for TEXT. Separate from default_tree_printer so it can also be used in front ends. Argument is a statement from which EXPR_LOCATION and TREE_BLOCK will be recorded. */ void percent_K_format (text_info *text, tree t) { text->set_location (0, EXPR_LOCATION (t), true); gcc_assert (pp_ti_abstract_origin (text) != NULL); tree block = TREE_BLOCK (t); *pp_ti_abstract_origin (text) = NULL; if (in_lto_p) { /* ??? LTO drops all BLOCK_ABSTRACT_ORIGINs apart from those representing the outermost block of an inlined function. So walk the BLOCK tree until we hit such a scope. */ while (block && TREE_CODE (block) == BLOCK) { if (inlined_function_outer_scope_p (block)) { *pp_ti_abstract_origin (text) = block; break; } block = BLOCK_SUPERCONTEXT (block); } return; } while (block && TREE_CODE (block) == BLOCK && BLOCK_ABSTRACT_ORIGIN (block)) { tree ao = BLOCK_ABSTRACT_ORIGIN (block); while (TREE_CODE (ao) == BLOCK && BLOCK_ABSTRACT_ORIGIN (ao) && BLOCK_ABSTRACT_ORIGIN (ao) != ao) ao = BLOCK_ABSTRACT_ORIGIN (ao); if (TREE_CODE (ao) == FUNCTION_DECL) { *pp_ti_abstract_origin (text) = block; break; } block = BLOCK_SUPERCONTEXT (block); } } /* Print the identifier ID to PRETTY-PRINTER. */ void pp_tree_identifier (pretty_printer *pp, tree id) { if (pp_translate_identifiers (pp)) { const char *text = identifier_to_locale (IDENTIFIER_POINTER (id)); pp_append_text (pp, text, text + strlen (text)); } else pp_append_text (pp, IDENTIFIER_POINTER (id), IDENTIFIER_POINTER (id) + IDENTIFIER_LENGTH (id)); } /* A helper function that is used to dump function information before the function dump. */ void dump_function_header (FILE *dump_file, tree fdecl, dump_flags_t flags) { const char *dname, *aname; struct cgraph_node *node = cgraph_node::get (fdecl); struct function *fun = DECL_STRUCT_FUNCTION (fdecl); dname = lang_hooks.decl_printable_name (fdecl, 1); if (DECL_ASSEMBLER_NAME_SET_P (fdecl)) aname = (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (fdecl))); else aname = "<unset-asm-name>"; fprintf (dump_file, "\n;; Function %s (%s, funcdef_no=%d", dname, aname, fun->funcdef_no); if (!(flags & TDF_NOUID)) fprintf (dump_file, ", decl_uid=%d", DECL_UID (fdecl)); if (node) { fprintf (dump_file, ", cgraph_uid=%d", node->uid); fprintf (dump_file, ", symbol_order=%d)%s\n\n", node->order, node->frequency == NODE_FREQUENCY_HOT ? " (hot)" : node->frequency == NODE_FREQUENCY_UNLIKELY_EXECUTED ? " (unlikely executed)" : node->frequency == NODE_FREQUENCY_EXECUTED_ONCE ? " (executed once)" : ""); } else fprintf (dump_file, ")\n\n"); } /* Dump double_int D to pretty_printer PP. UNS is true if D is unsigned and false otherwise. */ void pp_double_int (pretty_printer *pp, double_int d, bool uns) { if (d.fits_shwi ()) pp_wide_integer (pp, d.low); else if (d.fits_uhwi ()) pp_unsigned_wide_integer (pp, d.low); else { unsigned HOST_WIDE_INT low = d.low; HOST_WIDE_INT high = d.high; if (!uns && d.is_negative ()) { pp_minus (pp); high = ~high + !low; low = -low; } /* Would "%x%0*x" or "%x%*0x" get zero-padding on all systems? */ sprintf (pp_buffer (pp)->digit_buffer, HOST_WIDE_INT_PRINT_DOUBLE_HEX, (unsigned HOST_WIDE_INT) high, low); pp_string (pp, pp_buffer (pp)->digit_buffer); } }
State.h
//===-------- State.h - OpenMP State & ICV interface ------------- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // //===----------------------------------------------------------------------===// #ifndef OMPTARGET_STATE_H #define OMPTARGET_STATE_H #include "Debug.h" #include "Types.h" #pragma omp declare target namespace _OMP { namespace state { inline constexpr uint32_t SharedScratchpadSize = SHARED_SCRATCHPAD_SIZE; /// Initialize the state machinery. Must be called by all threads. void init(bool IsSPMD); /// TODO enum ValueKind { VK_NThreads, VK_Level, VK_ActiveLevel, VK_MaxActiveLevels, VK_RunSched, // --- VK_RunSchedChunk, VK_ParallelRegionFn, VK_ParallelTeamSize, }; /// TODO void enterDataEnvironment(IdentTy *Ident); /// TODO void exitDataEnvironment(); /// TODO struct DateEnvironmentRAII { DateEnvironmentRAII(IdentTy *Ident) { enterDataEnvironment(Ident); } ~DateEnvironmentRAII() { exitDataEnvironment(); } }; /// TODO void resetStateForThread(uint32_t TId); uint32_t &lookup32(ValueKind VK, bool IsReadonly, IdentTy *Ident); void *&lookupPtr(ValueKind VK, bool IsReadonly); /// A class without actual state used to provide a nice interface to lookup and /// update ICV values we can declare in global scope. template <typename Ty, ValueKind Kind> struct Value { __attribute__((flatten, always_inline)) operator Ty() { return lookup(/* IsReadonly */ true, /* IdentTy */ nullptr); } __attribute__((flatten, always_inline)) Value &operator=(const Ty &Other) { set(Other, /* IdentTy */ nullptr); return *this; } __attribute__((flatten, always_inline)) Value &operator++() { inc(1, /* IdentTy */ nullptr); return *this; } __attribute__((flatten, always_inline)) Value &operator--() { inc(-1, /* IdentTy */ nullptr); return *this; } private: __attribute__((flatten, always_inline)) Ty &lookup(bool IsReadonly, IdentTy *Ident) { Ty &t = lookup32(Kind, IsReadonly, Ident); return t; } __attribute__((flatten, always_inline)) Ty &inc(int UpdateVal, IdentTy *Ident) { return (lookup(/* IsReadonly */ false, Ident) += UpdateVal); } __attribute__((flatten, always_inline)) Ty &set(Ty UpdateVal, IdentTy *Ident) { return (lookup(/* IsReadonly */ false, Ident) = UpdateVal); } template <typename VTy, typename Ty2> friend struct ValueRAII; }; /// A mookup class without actual state used to provide /// a nice interface to lookup and update ICV values /// we can declare in global scope. template <typename Ty, ValueKind Kind> struct PtrValue { __attribute__((flatten, always_inline)) operator Ty() { return lookup(/* IsReadonly */ true, /* IdentTy */ nullptr); } __attribute__((flatten, always_inline)) PtrValue &operator=(const Ty Other) { set(Other); return *this; } private: Ty &lookup(bool IsReadonly, IdentTy *) { return lookupPtr(Kind, IsReadonly); } Ty &set(Ty UpdateVal) { return (lookup(/* IsReadonly */ false, /* IdentTy */ nullptr) = UpdateVal); } template <typename VTy, typename Ty2> friend struct ValueRAII; }; template <typename VTy, typename Ty> struct ValueRAII { ValueRAII(VTy &V, Ty NewValue, Ty OldValue, bool Active, IdentTy *Ident) : Ptr(Active ? &V.lookup(/* IsReadonly */ false, Ident) : nullptr), Val(OldValue), Active(Active) { if (!Active) return; ASSERT(*Ptr == OldValue && "ValueRAII initialization with wrong old value!"); *Ptr = NewValue; } ~ValueRAII() { if (Active) *Ptr = Val; } private: Ty *Ptr; Ty Val; bool Active; }; /// TODO inline state::Value<uint32_t, state::VK_RunSchedChunk> RunSchedChunk; /// TODO inline state::Value<uint32_t, state::VK_ParallelTeamSize> ParallelTeamSize; /// TODO inline state::PtrValue<ParallelRegionFnTy, state::VK_ParallelRegionFn> ParallelRegionFn; void runAndCheckState(void(Func(void))); void assumeInitialState(bool IsSPMD); } // namespace state namespace icv { /// TODO inline state::Value<uint32_t, state::VK_NThreads> NThreads; /// TODO inline state::Value<uint32_t, state::VK_Level> Level; /// The `active-level` describes which of the parallel level counted with the /// `level-var` is active. There can only be one. /// /// active-level-var is 1, if ActiveLevelVar is not 0, otherweise it is 0. inline state::Value<uint32_t, state::VK_ActiveLevel> ActiveLevel; /// TODO inline state::Value<uint32_t, state::VK_MaxActiveLevels> MaxActiveLevels; /// TODO inline state::Value<uint32_t, state::VK_RunSched> RunSched; } // namespace icv namespace memory { /// Alloca \p Size bytes in shared memory, if possible, for \p Reason. /// /// Note: See the restrictions on __kmpc_alloc_shared for proper usage. void *allocShared(uint64_t Size, const char *Reason); /// Free \p Ptr, alloated via allocShared, for \p Reason. /// /// Note: See the restrictions on __kmpc_free_shared for proper usage. void freeShared(void *Ptr, uint64_t Bytes, const char *Reason); /// Alloca \p Size bytes in global memory, if possible, for \p Reason. void *allocGlobal(uint64_t Size, const char *Reason); /// Return a pointer to the dynamic shared memory buffer. void *getDynamicBuffer(); /// Free \p Ptr, alloated via allocGlobal, for \p Reason. void freeGlobal(void *Ptr, const char *Reason); } // namespace memory } // namespace _OMP #pragma omp end declare target #endif
communities.h
#include "nodeIntMap.h" node_t* comm = NULL; int maxItrs; void outputCommunities(graph *G) { // print output. node_t commList[10]; int commCount[10]; int i; for(i=0;i<10; i++) { commList[i] = NIL_NODE; commCount[i] = 0; } int found; int curIndex = 0; int t; for (t = 0; t < G->numNodes; t++) { found = -1; for(i = 0; i<10;i++) { if(comm[t] == commList[i]) { found = i; break; } } if(found != -1) { commCount[found]++; } else if(curIndex < 10) { commCount[curIndex] = 1; commList[curIndex] = comm[t]; curIndex++; } } printf("Community\t#Nodes\t\t(Showing max 10 entries)\n"); for (i=0; i<10; i++) { if(commList[i] != NIL_NODE) printf("%d\t\t%d\n", commList[i], commCount[i]); } free(comm); comm = NULL; } void initCommunities(graph *G) { comm = (node_t*) malloc (G->numNodes * sizeof(node_t)); assert(comm != NULL); } void communities(graph* G) { inittracking("communities.csv"); bool finished = false ; finished = true ; #if defined(PARFOR_GUIDED) #pragma omp parallel for schedule(guided, PAR_CHUNKSIZE) #elif defined(PARFOR_DYNAMIC) #pragma omp parallel for schedule(dynamic, PAR_CHUNKSIZE) #elif defined(TASKLOOP_DEFINED) #pragma omp parallel { #pragma omp taskloop num_tasks(NUM_TASKS) #else #pragma omp parallel for schedule(static) #endif for (node_t x = 0; x < G->numNodes; x ++) comm[x] = x ; #if defined(TASKLOOP_DEFINED) } #endif int itrs = 0; do { finished = true ; #pragma omp parallel { nodeIntMap *map; map = NULL; map = initNodeIntMap(map, 32, 0); node_t x0; #if defined(PARFOR_GUIDED) #pragma omp for schedule(guided, PAR_CHUNKSIZE) #elif defined(PARFOR_DYNAMIC) #pragma omp for schedule(dynamic, PAR_CHUNKSIZE) #elif defined(TASKLOOP_DEFINED) #pragma omp taskloop num_tasks(NUM_TASKS) #else #pragma omp for schedule(static) #endif for (x0 = 0; x0 < G->numNodes; x0 ++) { map = reinitNodeIntMap(map, G->begin[x0+1] - G->begin[x0], 0); for (edge_t y_idx = G->begin[x0];y_idx < G->begin[x0+1] ; y_idx ++) { node_t y = G->node_idx [y_idx]; node_t source; #pragma omp atomic read source = comm[y] ; changeValue(map, source, 1); } node_t maxVal = mapMaxValueKey(map); if ( comm[x0] != maxVal && maxVal != NIL_NODE) { #pragma omp atomic write comm[x0] = maxVal; finished = false ; } } closeNodeIntMap(map); } itrs++; } while ( !finished && maxItrs > itrs); printf( "Iterations %d \n", itrs); endtracking(); }
convolution_1x1_pack8to1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack8to1_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_pack8to1_fp16sa_neon(bottom_im2col, top_blob, kernel, _bias, opt); } static void conv1x1s2_sgemm_pack8to1_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); float16x8_t _v2 = vld1q_f16(r0 + 32); float16x8_t _v3 = vld1q_f16(r0 + 48); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); vst1q_f16(outptr + 16, _v2); vst1q_f16(outptr + 24, _v3); r0 += 64; outptr += 32; } for (; j + 1 < outw; j += 2) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); r0 += 32; outptr += 16; } for (; j < outw; j++) { float16x8_t _v = vld1q_f16(r0); vst1q_f16(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_pack8to1_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
raytracing.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "math-toolkit.h" #include "primitives.h" #include "raytracing.h" #include "idx_stack.h" #define MAX_REFLECTION_BOUNCES 3 #define MAX_DISTANCE 1000000000000.0 #define MIN_DISTANCE 0.00001 #define SAMPLES 4 #define SQUARE(x) (x * x) #define MAX(a, b) (a > b ? a : b) /* @param t t distance * @return 1 means hit, otherwise 0 */ static int raySphereIntersection(const point3 ray_e, const point3 ray_d, const sphere *sph, intersection *ip, double *t1) { point3 l; subtract_vector(sph->center, ray_e, l); double s = dot_product(l, ray_d); double l2 = dot_product(l, l); double r2 = sph->radius * sph->radius; if (s < 0 && l2 > r2) return 0; float m2 = l2 - s * s; if (m2 > r2) return 0; float q = sqrt(r2 - m2); *t1 = (l2 > r2) ? (s - q) : (s + q); /* p = e + t1 * d */ multiply_vector(ray_d, *t1, ip->point); add_vector(ray_e, ip->point, ip->point); subtract_vector(ip->point, sph->center, ip->normal); normalize(ip->normal); if (dot_product(ip->normal, ray_d) > 0.0) multiply_vector(ip->normal, -1, ip->normal); return 1; } /* @return 1 means hit, otherwise 0; */ static int rayRectangularIntersection(const point3 ray_e, const point3 ray_d, rectangular *rec, intersection *ip, double *t1) { point3 e01, e03, p; subtract_vector(rec->vertices[1], rec->vertices[0], e01); subtract_vector(rec->vertices[3], rec->vertices[0], e03); cross_product(ray_d, e03, p); double det = dot_product(e01, p); /* Reject rays orthagonal to the normal vector. * I.e. rays parallell to the plane. */ if (det < 1e-4) return 0; double inv_det = 1.0 / det; point3 s; subtract_vector(ray_e, rec->vertices[0], s); double alpha = inv_det * dot_product(s, p); if ((alpha > 1.0) || (alpha < 0.0)) return 0; point3 q; cross_product(s, e01, q); double beta = inv_det * dot_product(ray_d, q); if ((beta > 1.0) || (beta < 0.0)) return 0; *t1 = inv_det * dot_product(e03, q); if (alpha + beta > 1.0f) { /* for the second triangle */ point3 e23, e21; subtract_vector(rec->vertices[3], rec->vertices[2], e23); subtract_vector(rec->vertices[1], rec->vertices[2], e21); cross_product(ray_d, e21, p); det = dot_product(e23, p); if (det < 1e-4) return 0; inv_det = 1.0 / det; subtract_vector(ray_e, rec->vertices[2], s); alpha = inv_det * dot_product(s, p); if (alpha < 0.0) return 0; cross_product(s, e23, q); beta = inv_det * dot_product(ray_d, q); if ((beta < 0.0) || (beta + alpha > 1.0)) return 0; *t1 = inv_det * dot_product(e21, q); } if (*t1 < 1e-4) return 0; COPY_POINT3(ip->normal, rec->normal); if (dot_product(ip->normal, ray_d)>0.0) multiply_vector(ip->normal, -1, ip->normal); multiply_vector(ray_d, *t1, ip->point); add_vector(ray_e, ip->point, ip->point); return 1; } static void localColor(color local_color, const color light_color, double diffuse, double specular, const object_fill *fill) { color ambi = { 0.1, 0.1, 0.1 }; color diff, spec, lightCo, surface; /* Local Color = ambient * surface + * light * ( kd * surface * diffuse + ks * specular) */ COPY_COLOR(diff, fill->fill_color); multiply_vector(diff, fill->Kd, diff); multiply_vector(diff, diffuse, diff); COPY_COLOR(lightCo, light_color); multiply_vectors(diff, lightCo, diff); COPY_COLOR(spec, light_color); multiply_vector(spec, fill->Ks, spec); multiply_vector(spec, specular, spec); COPY_COLOR(surface, fill->fill_color); multiply_vectors(ambi,surface, ambi); add_vector(diff, ambi, diff); add_vector(diff, spec, diff); add_vector(local_color, diff, local_color); } /* @param d direction of the ray into intersection * @param l direction of intersection to light * @param n surface normal */ static void compute_specular_diffuse(double *diffuse, double *specular, const point3 d, const point3 l, const point3 n, double phong_pow) { point3 d_copy, l_copy, middle, r; /* Calculate vector to eye V */ COPY_POINT3(d_copy, d); multiply_vector(d_copy, -1, d_copy); normalize(d_copy); /* Calculate vector to light L */ COPY_POINT3(l_copy, l); multiply_vector(l_copy, -1, l_copy); normalize(l_copy); /* Calculate reflection direction R */ double tmp = dot_product(n, l_copy); multiply_vector(n, tmp, middle); multiply_vector(middle, 2, middle); subtract_vector(middle, l_copy, r); normalize(r); /* diffuse = max(0, dot_product(n, -l)) */ *diffuse = MAX(0, dot_product(n, l_copy)); /* specular = (dot_product(r, -d))^p */ *specular = pow(MAX(0, dot_product(r, d_copy)), phong_pow); } /* @param r direction of reflected ray * @param d direction of primary ray into intersection * @param n surface normal at intersection */ static void reflection(point3 r, const point3 d, const point3 n) { /* r = d - 2(d . n)n */ multiply_vector(n, -2.0 * dot_product(d, n), r); add_vector(r, d, r); } /* reference: https://www.opengl.org/sdk/docs/man/html/refract.xhtml */ static void refraction(point3 t, const point3 I, const point3 N, double n1, double n2) { double eta = n1 / n2; double dot_NI = dot_product(N,I); double k = 1.0 - eta * eta * (1.0 - dot_NI * dot_NI); if (k < 0.0 || n2 <= 0.0) t[0] = t[1] = t[2] = 0.0; else { point3 tmp; multiply_vector(I, eta, t); multiply_vector(N, eta * dot_NI + sqrt(k), tmp); subtract_vector(t, tmp, t); } } /* @param i direction of incoming ray, unit vector * @param r direction of refraction ray, unit vector * @param normal unit vector * @param n1 refraction index * @param n2 refraction index * * reference: http://graphics.stanford.edu/courses/cs148-10-summer/docs/2006--degreve--reflection_refraction.pdf */ static double fresnel(const point3 r, const point3 l, const point3 normal, double n1, double n2) { /* TIR */ if (length(l) < 0.99) return 1.0; double cos_theta_i = -dot_product(r, normal); double cos_theta_t = -dot_product(l, normal); double r_vertical_root = (n1 * cos_theta_i - n2 * cos_theta_t) / (n1 * cos_theta_i + n2 * cos_theta_t); double r_parallel_root = (n2 * cos_theta_i - n1 * cos_theta_t) / (n2 * cos_theta_i + n1 * cos_theta_t); return (r_vertical_root * r_vertical_root + r_parallel_root * r_parallel_root) / 2.0; } /* @param t distance */ static intersection ray_hit_object(const point3 e, const point3 d, double t0, double t1, const rectangular_node rectangulars, rectangular_node *hit_rectangular, const sphere_node spheres, sphere_node *hit_sphere) { /* set these to not hit */ *hit_rectangular = NULL; *hit_sphere = NULL; point3 biased_e; multiply_vector(d, t0, biased_e); add_vector(biased_e, e, biased_e); double nearest = t1; intersection result, tmpresult; for (rectangular_node rec = rectangulars; rec; rec = rec->next) { if (rayRectangularIntersection(biased_e, d, &(rec->element), &tmpresult, &t1) && (t1 < nearest)) { /* hit is closest so far */ *hit_rectangular = rec; nearest = t1; result = tmpresult; } } /* check the spheres */ for (sphere_node sphere = spheres; sphere; sphere = sphere->next) { if (raySphereIntersection(biased_e, d, &(sphere->element), &tmpresult, &t1) && (t1 < nearest)) { *hit_sphere = sphere; *hit_rectangular = NULL; nearest = t1; result = tmpresult; } } return result; } /* @param d direction of ray * @param w basic vectors */ static void rayConstruction(point3 d, const point3 u, const point3 v, const point3 w, unsigned int i, unsigned int j, const viewpoint *view, unsigned int width, unsigned int height) { double xmin = -0.0175; double ymin = -0.0175; double xmax = 0.0175; double ymax = 0.0175; double focal = 0.05; point3 u_tmp, v_tmp, w_tmp, s; double w_s = focal; double u_s = xmin + ((xmax - xmin) * (float) i / (width - 1)); double v_s = ymax + ((ymin - ymax) * (float) j / (height - 1)); /* s = e + u_s * u + v_s * v + w_s * w */ multiply_vector(u, u_s, u_tmp); multiply_vector(v, v_s, v_tmp); multiply_vector(w, w_s, w_tmp); add_vector(view->vrp, u_tmp, s); add_vector(s, v_tmp, s); add_vector(s, w_tmp, s); /* p(t) = e + td = e + t(s - e) */ subtract_vector(s, view->vrp, d); normalize(d); } static void calculateBasisVectors(point3 u, point3 v, point3 w, const viewpoint *view) { /* w */ COPY_POINT3(w, view->vpn); normalize(w); /* u = (t x w) / (|t x w|) */ cross_product(w, view->vup, u); normalize(u); /* v = w x u */ cross_product(u, w, v); normalize(v); } /* @brief protect color value overflow */ static void protect_color_overflow(color c) { for (int i = 0; i < 3; i++) if (c[i] > 1.0) c[i] = 1.0; } static unsigned int ray_color(const point3 e, double t, const point3 d, idx_stack *stk, const rectangular_node rectangulars, const sphere_node spheres, const light_node lights, color object_color, int bounces_left) { rectangular_node hit_rec = NULL, light_hit_rec = NULL; sphere_node hit_sphere = NULL, light_hit_sphere = NULL; double diffuse, specular; point3 l, _l, r, rr; object_fill fill; color reflection_part; color refraction_part; /* might be a reflection ray, so check how many times we've bounced */ if (bounces_left == 0) { SET_COLOR(object_color, 0.0, 0.0, 0.0); return 0; } /* check for intersection with a sphere or a rectangular */ intersection ip= ray_hit_object(e, d, t, MAX_DISTANCE, rectangulars, &hit_rec, spheres, &hit_sphere); if (!hit_rec && !hit_sphere) return 0; /* pick the fill of the object that was hit */ fill = hit_rec ? hit_rec->element.rectangular_fill : hit_sphere->element.sphere_fill; void *hit_obj = hit_rec ? (void *) hit_rec : (void *) hit_sphere; /* assume it is a shadow */ SET_COLOR(object_color, 0.0, 0.0, 0.0); for (light_node light = lights; light; light = light->next) { /* calculate the intersection vector pointing at the light */ subtract_vector(ip.point, light->element.position, l); multiply_vector(l, -1, _l); normalize(_l); /* check for intersection with an object. use ignore_me * because we don't care about this normal */ ray_hit_object(ip.point, _l, MIN_DISTANCE, length(l), rectangulars, &light_hit_rec, spheres, &light_hit_sphere); /* the light was not block by itself(lit object) */ if (light_hit_rec || light_hit_sphere) continue; compute_specular_diffuse(&diffuse, &specular, d, l, ip.normal, fill.phong_power); localColor(object_color, light->element.light_color, diffuse, specular, &fill); } reflection(r, d, ip.normal); double idx = idx_stack_top(stk).idx, idx_pass = fill.index_of_refraction; if (idx_stack_top(stk).obj == hit_obj) { idx_stack_pop(stk); idx_pass = idx_stack_top(stk).idx; } else { idx_stack_element e = { .obj = hit_obj, .idx = fill.index_of_refraction }; idx_stack_push(stk, e); } refraction(rr, d, ip.normal, idx, idx_pass); double R = (fill.T > 0.1) ? fresnel(d, rr, ip.normal, idx, idx_pass) : 1.0; /* totalColor = localColor + mix((1-fill.Kd) * fill.R * reflection, T * refraction, R) */ if (fill.R > 0) { /* if we hit something, add the color */ int old_top = stk->top; if (ray_color(ip.point, MIN_DISTANCE, r, stk, rectangulars, spheres, lights, reflection_part, bounces_left - 1)) { multiply_vector(reflection_part, R * (1.0 - fill.Kd) * fill.R, reflection_part); add_vector(object_color, reflection_part, object_color); } stk->top = old_top; } /* calculate refraction ray */ if ((length(rr) > 0.0) && (fill.T > 0.0) && (fill.index_of_refraction > 0.0)) { normalize(rr); if (ray_color(ip.point, MIN_DISTANCE, rr, stk,rectangulars, spheres, lights, refraction_part, bounces_left - 1)) { multiply_vector(refraction_part, (1 - R) * fill.T, refraction_part); add_vector(object_color, refraction_part, object_color); } } protect_color_overflow(object_color); return 1; } /* @param background_color this is not ambient light */ void raytracing(uint8_t *pixels, color background_color, rectangular_node rectangulars, sphere_node spheres, light_node lights, const viewpoint *view, int width, int height) { point3 u, v, w, d; color object_color = { 0.0, 0.0, 0.0 }; /* calculate u, v, w */ calculateBasisVectors(u, v, w, view); idx_stack stk; int factor = sqrt(SAMPLES); #pragma omp parallel for num_threads(64) private(stk), private(d), private(object_color) for (int j = 0; j < height; j++) { for (int i = 0; i < width; i++) { double r = 0, g = 0, b = 0; /* MSAA */ for (int s = 0; s < SAMPLES; s++) { idx_stack_init(&stk); rayConstruction(d, u, v, w, i * factor + s / factor, j * factor + s % factor, view, width * factor, height * factor); if (ray_color(view->vrp, 0.0, d, &stk, rectangulars, spheres, lights, object_color, MAX_REFLECTION_BOUNCES)) { r += object_color[0]; g += object_color[1]; b += object_color[2]; } else { r += background_color[0]; g += background_color[1]; b += background_color[2]; } pixels[((i + (j * width)) * 3) + 0] = r * 255 / SAMPLES; pixels[((i + (j * width)) * 3) + 1] = g * 255 / SAMPLES; pixels[((i + (j * width)) * 3) + 2] = b * 255 / SAMPLES; } } } }
core_claset.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_zlaset.c, normal z -> c, Fri Sep 28 17:38:22 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" // for memset function #include <string.h> /***************************************************************************//** * * @ingroup core_laset * * Sets the elements of the matrix A on the diagonal * to beta and on the off-diagonals to alpha * ******************************************************************************* * * @param[in] uplo * Specifies which elements of the matrix are to be set * - PlasmaUpper: Upper part of A is set; * - PlasmaLower: Lower part of A is set; * - PlasmaUpperLower: ALL elements of A are set. * * @param[in] m * The number of rows of the matrix A. m >= 0. * * @param[in] n * The number of columns of the matrix A. n >= 0. * * @param[in] alpha * The constant to which the off-diagonal elements are to be set. * * @param[in] beta * The constant to which the diagonal elements are to be set. * * @param[in,out] A * On entry, the m-by-n tile A. * On exit, A has been set accordingly. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * ******************************************************************************/ __attribute__((weak)) void plasma_core_claset(plasma_enum_t uplo, int m, int n, plasma_complex32_t alpha, plasma_complex32_t beta, plasma_complex32_t *A, int lda) { if (alpha == 0.0 && beta == 0.0 && uplo == PlasmaGeneral && m == lda) { // Use memset to zero continuous memory. memset((void*)A, 0, (size_t)m*n*sizeof(plasma_complex32_t)); } else { // Use LAPACKE_claset_work to initialize the matrix. LAPACKE_claset_work(LAPACK_COL_MAJOR, lapack_const(uplo), m, n, alpha, beta, A, lda); } } /******************************************************************************/ void plasma_core_omp_claset(plasma_enum_t uplo, int mb, int nb, int i, int j, int m, int n, plasma_complex32_t alpha, plasma_complex32_t beta, plasma_complex32_t *A) { #pragma omp task depend(out:A[0:mb*nb]) plasma_core_claset(uplo, m, n, alpha, beta, A+i+j*mb, mb); }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 4; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
hoTvOperator.h
#pragma once #include "hoNDArray_math.h" #include "generalOperator.h" #include "vector_td_operators.h" #ifdef USE_OMP #include <omp.h> #endif namespace Gadgetron{ template<class T, unsigned int D> class hoTvOperator : public generalOperator< hoNDArray<T> > { protected: typedef typename realType<T>::Type REAL; public: hoTvOperator() : generalOperator< hoNDArray<T> >(){ limit_ = REAL(1e-8); } virtual ~hoTvOperator() {} void set_limit(REAL limit){ limit_ = limit; } virtual void gradient( hoNDArray<T> *in_array, hoNDArray<T> *out_array, bool accumulate=false ) { if (in_array->get_number_of_elements() != out_array->get_number_of_elements()){ throw std::runtime_error("hoTvOperator: input/output array dimensions mismatch"); } T* in = in_array->get_data_ptr(); T* out = out_array->get_data_ptr(); auto dims = vector_td<int64_t,D>(from_std_vector<size_t, D>(in_array->dimensions())); if (!accumulate) clear(out_array); #ifdef USE_OMP #pragma omp parallel for #endif for (int64_t idx=0; idx < in_array->get_number_of_elements(); idx++){ T xi = in[idx]; T result = T(0); vector_td<int64_t,D> co = idx_to_co(idx, dims); REAL grad = gradient_(in,dims,co); if (grad > limit_) { result += REAL(D)*xi/grad; for (int i = 0; i < D; i++){ co[i]+=1; result -= in[co_to_idx((co+dims)%dims,dims)]/grad; co[i]-=1; } } for (int i = 0; i < D; i++){ co[i]-=1; grad = gradient_(in,dims,co); if (grad > limit_) { result +=(xi-in[co_to_idx((co+dims)%dims,dims)])/grad; } co[i]+=1; } out[idx] += this->weight_*result; } } virtual REAL magnitude( hoNDArray<T> *in_array ) { T* in = in_array->get_data_ptr(); auto dims = vector_td<int64_t,D>(from_std_vector<size_t, D>(in_array->dimensions())); REAL result =0; #ifdef USE_OMP #pragma omp parallel for reduction(+:result) #endif for (int64_t idx=0; idx < in_array->get_number_of_elements(); idx++){ auto co = idx_to_co(idx, dims); REAL grad = gradient_(in,dims,co); result += this->weight_*grad; } return result; } private: REAL inline gradient_(T* in, const vector_td<int64_t,D> dims, vector_td<int64_t,D> co) { REAL grad = REAL(0); T xi = in[co_to_idx((co+dims)%dims,dims)]; for (int i = 0; i < D; i++){ co[i]+=1; T dt = in[co_to_idx((co+dims)%dims,dims)]; grad += norm(xi-dt); co[i]-=1; } return std::sqrt(grad); } protected: REAL limit_; }; }
GB_unop__ainv_int64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__ainv_int64_int64) // op(A') function: GB (_unop_tran__ainv_int64_int64) // C type: int64_t // A type: int64_t // cast: int64_t cij = aij // unaryop: cij = -aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CAST(z, aij) \ int64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = aij ; \ Cx [pC] = -z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__ainv_int64_int64) ( int64_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = -z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = -z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__ainv_int64_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nlk_corpus.c
/****************************************************************************** * NLK - Neural Language Kit * * Copyright (c) 2015 Luis Rei <me@luisrei.com> http://luisrei.com @lmrei * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. *****************************************************************************/ /** * @file nlk_corpus.c * Create and use corpus structures */ #include <unistd.h> #include <time.h> #include <inttypes.h> #include <omp.h> #include "nlk_text.h" #include "nlk_vocabulary.h" #include "nlk_util.h" #include "nlk_tic.h" #include "nlk.h" #include "nlk_corpus.h" /** * Displays the progress stats while building a corpus from a file * * @param start the clocks used just before starting to read the file */ static void nlk_corpus_display_progress(const size_t line_counter, const size_t total_lines, const clock_t start) { double progress; double speed; char display_str[256]; clock_t now = clock(); progress = (line_counter / (double) total_lines) * 100; speed = line_counter / ((double)(now - start + 1) / (double)CLOCKS_PER_SEC * 1000), snprintf(display_str, 256, "Corpus Progress: %.2f%% Lines/Thread/sec: %.2fK Threads: %d", progress, speed, omp_get_num_threads()); nlk_tic(display_str, false); } /** * Reads a corpus (in id-text line delimited format) * * @param file_path the path to the corpus * @param vocab the vocabulary to use * * @return a corpus structure */ struct nlk_corpus_t * nlk_corpus_read(char *file_path, struct nlk_vocab_t **vocab, const bool verbose) { struct nlk_corpus_t *corpus = NULL; size_t total_lines; const int num_threads = nlk_get_num_threads(); /* count lines */ if(verbose) { nlk_tic("Reading Corpus: ", false); printf("%s\n", file_path); } total_lines = nlk_text_count_lines(file_path); if(verbose) { printf("Lines: %zu\n", total_lines); } /* allocate corpus */ corpus = (struct nlk_corpus_t *) malloc(sizeof(struct nlk_corpus_t)); if(corpus == NULL) { NLK_ERROR_NULL("unable to allocate memory for the vocabularized file", NLK_ENOMEM); } /* allocate memory for the file (the line array) */ corpus->lines = (struct nlk_line_t *) calloc(total_lines, sizeof(struct nlk_line_t)); if(corpus->lines == NULL) { NLK_ERROR_NULL("unable to allocate memory for the vocabularized file", NLK_ENOMEM); /* unreachable */ } corpus->len = total_lines; struct nlk_line_t *lines = corpus->lines; struct nlk_vocab_t *replacement = nlk_vocab_find(vocab, NLK_UNK_SYMBOL); uint64_t word_count = 0; size_t line_counter = 0; size_t updated = 0; clock_t start = clock(); /** * Start of Parallel Region */ #pragma omp parallel reduction(+ : word_count) shared(line_counter, updated) { /* allocate memory for a line of text */ char **text_line = nlk_text_line_create(); char *buffer = malloc(sizeof(char) * NLK_BUFFER_SIZE); if(buffer == NULL) { NLK_ERROR_ABORT("unable to allocate memory for buffering", NLK_ENOMEM); /* unreachable */ } /* memory for vocabularizing */ struct nlk_vocab_t *varray[NLK_MAX_LINE_SIZE]; struct nlk_line_t vline; vline.varray = varray; /* open file */ int fd = nlk_open(file_path); #pragma omp for for(int thread_id = 0; thread_id < num_threads; thread_id++) { /** @subsection File Reading Position * open file and get start and end positions for thread */ const size_t line_start = nlk_text_get_split_start_line(total_lines, num_threads, thread_id); const size_t end_line = nlk_text_get_split_end_line(total_lines, num_threads, thread_id); /* go to start position */ size_t line_cur = line_start; nlk_text_goto_line(fd, line_cur); /** @subsection Read lines */ while(line_cur <= end_line) { /* display */ if(verbose) { if(line_counter - updated > 1000) { updated = line_counter; nlk_corpus_display_progress(line_counter, total_lines, start); } } /* end of display */ /* read */ nlk_vocab_read_vocabularize(fd, true, vocab, replacement, text_line, &vline, buffer); /* check for errors */ if(vline.len == 0) { lines[line_cur].varray = NULL; lines[line_cur].len = 0; lines[line_cur].line_id = (size_t)-1; /* nlk_debug("\nBad line: %zu\n", line_cur); */ line_cur++; line_counter++; continue; } /* create */ lines[line_cur].varray = (struct nlk_vocab_t **) malloc(sizeof(struct nlk_vocab_t *) * vline.len); if(lines[line_cur].varray == NULL) { NLK_ERROR_ABORT("unable to allocate memory for line", NLK_ENOMEM); /* unreachable */ } /* copy */ lines[line_cur].len = vline.len; lines[line_cur].line_id = vline.line_id; for(size_t ii = 0; ii < vline.len; ii++) { lines[line_cur].varray[ii] = varray[ii]; } word_count += vline.len; line_cur++; line_counter++; } /* end of lines for thread */ } /* end of for() threads */ if(verbose) { nlk_corpus_display_progress(line_counter, total_lines, start); } /* free thread memory */ nlk_text_line_free(text_line); free(buffer); buffer = NULL; close(fd); fd = 0; } /* end of parallel region */ corpus->count = word_count; if(verbose) { printf("\n"); nlk_tic("done reading corpus: ", false); printf("%"PRIu64" words\n", word_count); } return corpus; } /** * Free a corpus structure * * @param corpus the corpus structure to free */ void nlk_corpus_free(struct nlk_corpus_t *corpus) { /* free individual lines */ for(size_t ii = 0; ii < corpus->len; ii++) { if(corpus->lines[ii].varray != NULL) { free(corpus->lines[ii].varray); corpus->lines[ii].varray = NULL; } } /* free the lines array */ free(corpus->lines); corpus->lines = NULL; /* free the corpus */ free(corpus); } /** * Counts the number of word occurrences in the subset of a corpus * * @param corpus the corpus * @param ids the ids of the corpus that make up the subset * @param n_ids the size of the ids array * * @return the number of word occurrences in the corpus subset */ uint64_t nlk_corpus_subset_count(const struct nlk_corpus_t *corpus, const size_t *ids, const size_t n_ids) { uint64_t total = 0; for(size_t ii = 0; ii < corpus->len; ii++) { if(nlk_in(corpus->lines[ii].line_id, ids, n_ids)) { total += corpus->lines[ii].len; } } return total; }
openmp.c
#include <sc.h> #include <omp.h> omp_lock_t writelock; void openmp_print_tid (void) { omp_set_lock (&writelock); SC_PRODUCTIONF ("Hello from thread %i.\n", omp_get_thread_num ()); omp_unset_lock (&writelock); } int main (int argc, char *argv[]) { int mpiret, mpisize; int thread_lvl, num_threads; mpiret = sc_MPI_Init_thread (&argc, &argv, sc_MPI_THREAD_MULTIPLE, &thread_lvl); SC_CHECK_MPI (mpiret); sc_init (sc_MPI_COMM_WORLD, 1, 1, NULL, SC_LP_DEFAULT); if (thread_lvl < sc_MPI_THREAD_MULTIPLE) { SC_GLOBAL_PRODUCTIONF ("MPI only supports thread level %d\n", thread_lvl); } else { mpiret = sc_MPI_Comm_size (sc_MPI_COMM_WORLD, &mpisize); SC_CHECK_MPI (mpiret); num_threads = omp_get_max_threads (); SC_GLOBAL_PRODUCTIONF ("Running on %i processes with %i threads each.\n", mpisize, num_threads); omp_set_num_threads (num_threads); omp_init_lock (&writelock); #pragma omp parallel { openmp_print_tid (); } } return 0; }
kiss_fft.c
/* Copyright (c) 2003-2010, Mark Borgerding All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the author nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "_kiss_fft_guts.h" /* The guts header contains all the multiplication and addition macros that are defined for fixed or floating point complex numbers. It also delares the kf_ internal functions. */ static void kf_bfly2( kiss_fft_cpx *Fout, const size_t fstride, const kiss_fft_cfg st, int m) { kiss_fft_cpx *Fout2; kiss_fft_cpx *tw1 = st->twiddles; kiss_fft_cpx t; Fout2 = Fout + m; do { C_FIXDIV(*Fout, 2); C_FIXDIV(*Fout2, 2); C_MUL(t, *Fout2, *tw1); tw1 += fstride; C_SUB(*Fout2, *Fout, t); C_ADDTO(*Fout, t); ++Fout2; ++Fout; } while (--m); } static void kf_bfly4( kiss_fft_cpx *Fout, const size_t fstride, const kiss_fft_cfg st, const size_t m) { kiss_fft_cpx *tw1, *tw2, *tw3; kiss_fft_cpx scratch[6]; size_t k = m; const size_t m2 = 2 * m; const size_t m3 = 3 * m; tw3 = tw2 = tw1 = st->twiddles; do { C_FIXDIV(*Fout, 4); C_FIXDIV(Fout[m], 4); C_FIXDIV(Fout[m2], 4); C_FIXDIV(Fout[m3], 4); C_MUL(scratch[0], Fout[m], *tw1); C_MUL(scratch[1], Fout[m2], *tw2); C_MUL(scratch[2], Fout[m3], *tw3); C_SUB(scratch[5], *Fout, scratch[1]); C_ADDTO(*Fout, scratch[1]); C_ADD(scratch[3], scratch[0], scratch[2]); C_SUB(scratch[4], scratch[0], scratch[2]); C_SUB(Fout[m2], *Fout, scratch[3]); tw1 += fstride; tw2 += fstride * 2; tw3 += fstride * 3; C_ADDTO(*Fout, scratch[3]); if (st->inverse) { Fout[m].r = scratch[5].r - scratch[4].i; Fout[m].i = scratch[5].i + scratch[4].r; Fout[m3].r = scratch[5].r + scratch[4].i; Fout[m3].i = scratch[5].i - scratch[4].r; } else { Fout[m].r = scratch[5].r + scratch[4].i; Fout[m].i = scratch[5].i - scratch[4].r; Fout[m3].r = scratch[5].r - scratch[4].i; Fout[m3].i = scratch[5].i + scratch[4].r; } ++Fout; } while (--k); } static void kf_bfly3( kiss_fft_cpx *Fout, const size_t fstride, const kiss_fft_cfg st, size_t m) { size_t k = m; const size_t m2 = 2 * m; kiss_fft_cpx *tw1, *tw2; kiss_fft_cpx scratch[5]; kiss_fft_cpx epi3; epi3 = st->twiddles[fstride * m]; tw1 = tw2 = st->twiddles; do { C_FIXDIV(*Fout, 3); C_FIXDIV(Fout[m], 3); C_FIXDIV(Fout[m2], 3); C_MUL(scratch[1], Fout[m], *tw1); C_MUL(scratch[2], Fout[m2], *tw2); C_ADD(scratch[3], scratch[1], scratch[2]); C_SUB(scratch[0], scratch[1], scratch[2]); tw1 += fstride; tw2 += fstride * 2; Fout[m].r = Fout->r - HALF_OF(scratch[3].r); Fout[m].i = Fout->i - HALF_OF(scratch[3].i); C_MULBYSCALAR(scratch[0], epi3.i); C_ADDTO(*Fout, scratch[3]); Fout[m2].r = Fout[m].r + scratch[0].i; Fout[m2].i = Fout[m].i - scratch[0].r; Fout[m].r -= scratch[0].i; Fout[m].i += scratch[0].r; ++Fout; } while (--k); } static void kf_bfly5( kiss_fft_cpx *Fout, const size_t fstride, const kiss_fft_cfg st, int m) { kiss_fft_cpx *Fout0, *Fout1, *Fout2, *Fout3, *Fout4; int u; kiss_fft_cpx scratch[13]; kiss_fft_cpx *twiddles = st->twiddles; kiss_fft_cpx *tw; kiss_fft_cpx ya, yb; ya = twiddles[fstride * m]; yb = twiddles[fstride * 2 * m]; Fout0 = Fout; Fout1 = Fout0 + m; Fout2 = Fout0 + 2 * m; Fout3 = Fout0 + 3 * m; Fout4 = Fout0 + 4 * m; tw = st->twiddles; for (u = 0; u < m; ++u) { C_FIXDIV(*Fout0, 5); C_FIXDIV(*Fout1, 5); C_FIXDIV(*Fout2, 5); C_FIXDIV(*Fout3, 5); C_FIXDIV(*Fout4, 5); scratch[0] = *Fout0; C_MUL(scratch[1], *Fout1, tw[u * fstride]); C_MUL(scratch[2], *Fout2, tw[2 * u * fstride]); C_MUL(scratch[3], *Fout3, tw[3 * u * fstride]); C_MUL(scratch[4], *Fout4, tw[4 * u * fstride]); C_ADD(scratch[7], scratch[1], scratch[4]); C_SUB(scratch[10], scratch[1], scratch[4]); C_ADD(scratch[8], scratch[2], scratch[3]); C_SUB(scratch[9], scratch[2], scratch[3]); Fout0->r += scratch[7].r + scratch[8].r; Fout0->i += scratch[7].i + scratch[8].i; scratch[5].r = scratch[0].r + S_MUL(scratch[7].r, ya.r) + S_MUL(scratch[8].r, yb.r); scratch[5].i = scratch[0].i + S_MUL(scratch[7].i, ya.r) + S_MUL(scratch[8].i, yb.r); scratch[6].r = S_MUL(scratch[10].i, ya.i) + S_MUL(scratch[9].i, yb.i); scratch[6].i = -S_MUL(scratch[10].r, ya.i) - S_MUL(scratch[9].r, yb.i); C_SUB(*Fout1, scratch[5], scratch[6]); C_ADD(*Fout4, scratch[5], scratch[6]); scratch[11].r = scratch[0].r + S_MUL(scratch[7].r, yb.r) + S_MUL(scratch[8].r, ya.r); scratch[11].i = scratch[0].i + S_MUL(scratch[7].i, yb.r) + S_MUL(scratch[8].i, ya.r); scratch[12].r = -S_MUL(scratch[10].i, yb.i) + S_MUL(scratch[9].i, ya.i); scratch[12].i = S_MUL(scratch[10].r, yb.i) - S_MUL(scratch[9].r, ya.i); C_ADD(*Fout2, scratch[11], scratch[12]); C_SUB(*Fout3, scratch[11], scratch[12]); ++Fout0; ++Fout1; ++Fout2; ++Fout3; ++Fout4; } } /* perform the butterfly for one stage of a mixed radix FFT */ static void kf_bfly_generic( kiss_fft_cpx *Fout, const size_t fstride, const kiss_fft_cfg st, int m, int p) { int u, k, q1, q; kiss_fft_cpx *twiddles = st->twiddles; kiss_fft_cpx t; int Norig = st->nfft; kiss_fft_cpx *scratch = (kiss_fft_cpx *)KISS_FFT_TMP_ALLOC(sizeof(kiss_fft_cpx) * p); for (u = 0; u < m; ++u) { k = u; for (q1 = 0; q1 < p; ++q1) { scratch[q1] = Fout[k]; C_FIXDIV(scratch[q1], p); k += m; } k = u; for (q1 = 0; q1 < p; ++q1) { int twidx = 0; Fout[k] = scratch[0]; for (q = 1; q < p; ++q) { twidx += fstride * k; if (twidx >= Norig) twidx -= Norig; C_MUL(t, scratch[q], twiddles[twidx]); C_ADDTO(Fout[k], t); } k += m; } } KISS_FFT_TMP_FREE(scratch); } static void kf_work( kiss_fft_cpx *Fout, const kiss_fft_cpx *f, const size_t fstride, int in_stride, int *factors, const kiss_fft_cfg st) { kiss_fft_cpx *Fout_beg = Fout; const int p = *factors++; /* the radix */ const int m = *factors++; /* stage's fft length/p */ const kiss_fft_cpx *Fout_end = Fout + p * m; #ifdef _OPENMP // use openmp extensions at the // top-level (not recursive) if (fstride == 1 && p <= 5) { int k; // execute the p different work units in different threads #pragma omp parallel for for (k = 0; k < p; ++k) kf_work(Fout + k * m, f + fstride * in_stride * k, fstride * p, in_stride, factors, st); // all threads have joined by this point switch (p) { case 2: kf_bfly2(Fout, fstride, st, m); break; case 3: kf_bfly3(Fout, fstride, st, m); break; case 4: kf_bfly4(Fout, fstride, st, m); break; case 5: kf_bfly5(Fout, fstride, st, m); break; default: kf_bfly_generic(Fout, fstride, st, m, p); break; } return; } #endif if (m == 1) { do { *Fout = *f; f += fstride * in_stride; } while (++Fout != Fout_end); } else { do { // recursive call: // DFT of size m*p performed by doing // p instances of smaller DFTs of size m, // each one takes a decimated version of the input kf_work(Fout, f, fstride * p, in_stride, factors, st); f += fstride * in_stride; } while ((Fout += m) != Fout_end); } Fout = Fout_beg; // recombine the p smaller DFTs switch (p) { case 2: kf_bfly2(Fout, fstride, st, m); break; case 3: kf_bfly3(Fout, fstride, st, m); break; case 4: kf_bfly4(Fout, fstride, st, m); break; case 5: kf_bfly5(Fout, fstride, st, m); break; default: kf_bfly_generic(Fout, fstride, st, m, p); break; } } /* facbuf is populated by p1,m1,p2,m2, ... where p[i] * m[i] = m[i-1] m0 = n */ static void kf_factor(int n, int *facbuf) { int p = 4; double floor_sqrt; floor_sqrt = floor(sqrt((double)n)); /*factor out powers of 4, powers of 2, then any remaining primes */ do { while (n % p) { switch (p) { case 4: p = 2; break; case 2: p = 3; break; default: p += 2; break; } if (p > floor_sqrt) p = n; /* no more factors, skip to end */ } n /= p; *facbuf++ = p; *facbuf++ = n; } while (n > 1); } /* * * User-callable function to allocate all necessary storage space for the fft. * * The return value is a contiguous block of memory, allocated with malloc. As such, * It can be freed with free(), rather than a kiss_fft-specific function. * */ kiss_fft_cfg kiss_fft_alloc(int nfft, int inverse_fft, void *mem, size_t *lenmem) { kiss_fft_cfg st = NULL; size_t memneeded = sizeof(struct kiss_fft_state) + sizeof(kiss_fft_cpx) * (nfft - 1); /* twiddle factors*/ if (lenmem == NULL) { st = (kiss_fft_cfg)KISS_FFT_MALLOC(memneeded); } else { if (mem != NULL && *lenmem >= memneeded) st = (kiss_fft_cfg)mem; *lenmem = memneeded; } if (st) { int i; st->nfft = nfft; st->inverse = inverse_fft; for (i = 0; i < nfft; ++i) { const double pi = 3.141592653589793238462643383279502884197169399375105820974944; double phase = -2 * pi * i / nfft; if (st->inverse) phase *= -1; kf_cexp(st->twiddles + i, phase); } kf_factor(nfft, st->factors); } return st; } void kiss_fft_stride(kiss_fft_cfg st, const kiss_fft_cpx *fin, kiss_fft_cpx *fout, int in_stride) { if (fin == fout) { //NOTE: this is not really an in-place FFT algorithm. //It just performs an out-of-place FFT into a temp buffer kiss_fft_cpx *tmpbuf = (kiss_fft_cpx *)KISS_FFT_TMP_ALLOC(sizeof(kiss_fft_cpx) * st->nfft); kf_work(tmpbuf, fin, 1, in_stride, st->factors, st); memcpy(fout, tmpbuf, sizeof(kiss_fft_cpx) * st->nfft); KISS_FFT_TMP_FREE(tmpbuf); } else { kf_work(fout, fin, 1, in_stride, st->factors, st); } } void kiss_fft(kiss_fft_cfg cfg, const kiss_fft_cpx *fin, kiss_fft_cpx *fout) { kiss_fft_stride(cfg, fin, fout, 1); } void kiss_fft_cleanup(void) { // nothing needed any more } int kiss_fft_next_fast_size(int n) { while (1) { int m = n; while ((m % 2) == 0) m /= 2; while ((m % 3) == 0) m /= 3; while ((m % 5) == 0) m /= 5; if (m <= 1) break; /* n is completely factorable by twos, threes, and fives */ n++; } return n; }
tally.h
#ifndef OPENMC_TALLIES_TALLY_H #define OPENMC_TALLIES_TALLY_H #include "openmc/constants.h" #include "openmc/tallies/filter.h" #include "openmc/tallies/trigger.h" #include <gsl/gsl> #include "pugixml.hpp" #include "xtensor/xfixed.hpp" #include "xtensor/xtensor.hpp" #include <memory> // for unique_ptr #include <unordered_map> #include <string> #include <vector> namespace openmc { //============================================================================== //! A user-specified flux-weighted (or current) measurement. //============================================================================== class Tally { public: //---------------------------------------------------------------------------- // Constructors, destructors, factory functions explicit Tally(int32_t id); explicit Tally(pugi::xml_node node); ~Tally(); static Tally* create(int32_t id = -1); //---------------------------------------------------------------------------- // Accessors void set_id(int32_t id); void set_active(bool active) { active_ = active; } void set_writable(bool writable) { writable_ = writable; } void set_scores(pugi::xml_node node); void set_scores(const std::vector<std::string>& scores); void set_nuclides(pugi::xml_node node); void set_nuclides(const std::vector<std::string>& nuclides); const std::vector<int32_t>& filters() const {return filters_;} int32_t filters(int i) const {return filters_[i];} void set_filters(gsl::span<Filter*> filters); int32_t strides(int i) const {return strides_[i];} int32_t n_filter_bins() const {return n_filter_bins_;} bool writable() const { return writable_;} //---------------------------------------------------------------------------- // Other methods. void add_filter(Filter* filter) { set_filters({&filter, 1}); } void init_triggers(pugi::xml_node node); void init_results(); void reset(); void accumulate(); //---------------------------------------------------------------------------- // Major public data members. int id_ {C_NONE}; //!< User-defined identifier std::string name_; //!< User-defined name int type_ {TALLY_VOLUME}; //!< e.g. volume, surface current //! Event type that contributes to this tally int estimator_ {ESTIMATOR_TRACKLENGTH}; //! Whether this tally is currently being updated bool active_ {false}; //! Number of realizations int n_realizations_ {0}; std::vector<int> scores_; //!< Filter integrands (e.g. flux, fission) //! Index of each nuclide to be tallied. -1 indicates total material. std::vector<int> nuclides_ {-1}; //! True if this tally has a bin for every nuclide in the problem bool all_nuclides_ {false}; //! Results for each bin -- the first dimension of the array is for scores //! (e.g. flux, total reaction rate, fission reaction rate, etc.) and the //! second dimension of the array is for the combination of filters //! (e.g. specific cell, specific energy group, etc.) xt::xtensor<double, 3> results_; //! True if this tally should be written to statepoint files bool writable_ {true}; //---------------------------------------------------------------------------- // Miscellaneous public members. // We need to have quick access to some filters. The following gives indices // for various filters that could be in the tally or C_NONE if they are not // present. int energyout_filter_ {C_NONE}; int delayedgroup_filter_ {C_NONE}; bool depletion_rx_ {false}; //!< Has depletion reactions (e.g. (n,2n)) std::vector<Trigger> triggers_; int deriv_ {C_NONE}; //!< Index of a TallyDerivative object for diff tallies. private: //---------------------------------------------------------------------------- // Private data. std::vector<int32_t> filters_; //!< Filter indices in global filters array //! Index strides assigned to each filter to support 1D indexing. std::vector<int32_t> strides_; int32_t n_filter_bins_ {0}; gsl::index index_; }; //============================================================================== // Global variable declarations //============================================================================== namespace model { extern std::vector<std::unique_ptr<Tally>> tallies; extern std::vector<int> active_tallies; extern std::vector<int> active_analog_tallies; extern std::vector<int> active_tracklength_tallies; extern std::vector<int> active_collision_tallies; extern std::vector<int> active_meshsurf_tallies; extern std::vector<int> active_surface_tallies; extern std::vector<int> active_surface_track; extern std::unordered_map<int, int> tally_map; } namespace simulation { //! Global tallies (such as k-effective estimators) extern xt::xtensor_fixed<double, xt::xshape<N_GLOBAL_TALLIES, 3>> global_tallies; //! Number of realizations for global tallies extern "C" int32_t n_realizations; } // It is possible to protect accumulate operations on global tallies by using an // atomic update. However, when multiple threads accumulate to the same global // tally, it can cause a higher cache miss rate due to invalidation. Thus, we // use threadprivate variables to accumulate global tallies and then reduce at // the end of a generation. extern double global_tally_absorption; extern double global_tally_collision; extern double global_tally_tracklength; extern double global_tally_leakage; #pragma omp threadprivate(global_tally_absorption, global_tally_collision, \ global_tally_tracklength, global_tally_leakage) //============================================================================== // Non-member functions //============================================================================== //! Read tally specification from tallies.xml void read_tallies_xml(); //! \brief Accumulate the sum of the contributions from each history within the //! batch to a new random variable void accumulate_tallies(); //! Determine which tallies should be active void setup_active_tallies(); // Alias for the type returned by xt::adapt(...). N is the dimension of the // multidimensional array template <std::size_t N> using adaptor_type = xt::xtensor_adaptor<xt::xbuffer_adaptor<double*&, xt::no_ownership>, N>; #ifdef OPENMC_MPI //! Collect all tally results onto master process void reduce_tally_results(); #endif void free_memory_tally(); } // namespace openmc #endif // OPENMC_TALLIES_TALLY_H
ccl_correlation.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_roots.h> #include <gsl/gsl_spline.h> #include <gsl/gsl_sf_bessel.h> #include <gsl/gsl_sf_legendre.h> #include "fftlog.h" #include "ccl.h" /*--------ROUTINE: taper_cl ------ TASK:n Apply cosine tapering to Cls to reduce aliasing INPUT: number of ell bins for Cl, ell vector, C_ell vector, limits for tapering e.g., ell_limits=[low_ell_limit_lower,low_ell_limit_upper,high_ell_limit_lower,high_ell_limit_upper] */ static int taper_cl(int n_ell,double *ell,double *cl, double *ell_limits) { for(int i=0;i<n_ell;i++) { if(ell[i]<ell_limits[0] || ell[i]>ell_limits[3]) { cl[i]=0;//ell outside desirable range continue; } if(ell[i]>=ell_limits[1] && ell[i]<=ell_limits[2]) continue;//ell within good ell range if(ell[i]<ell_limits[1])//tapering low ell cl[i]*=cos((ell[i]-ell_limits[1])/(ell_limits[1]-ell_limits[0])*M_PI/2.); if(ell[i]>ell_limits[2])//tapering high ell cl[i]*=cos((ell[i]-ell_limits[2])/(ell_limits[3]-ell_limits[2])*M_PI/2.); } return 0; } /*--------ROUTINE: ccl_tracer_corr_fftlog ------ TASK: For a given tracer, get the correlation function Following function takes a function to calculate angular cl as well. By default above function will call it using ccl_angular_cl INPUT: type of tracer, number of theta values to evaluate = NL, theta vector */ static void ccl_tracer_corr_fftlog(ccl_cosmology *cosmo, int n_ell,double *ell,double *cls, int n_theta,double *theta,double *wtheta, int corr_type,int do_taper_cl,double *taper_cl_limits, int *status) { int i; double *l_arr,*cl_arr,*th_arr,*wth_arr; l_arr=ccl_log_spacing(cosmo->spline_params.ELL_MIN_CORR,cosmo->spline_params.ELL_MAX_CORR,cosmo->spline_params.N_ELL_CORR); if(l_arr==NULL) { *status=CCL_ERROR_LINSPACE; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog ran out of memory\n"); return; } cl_arr=malloc(cosmo->spline_params.N_ELL_CORR*sizeof(double)); if(cl_arr==NULL) { free(l_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog ran out of memory\n"); return; } //Interpolate input Cl into array needed for FFTLog ccl_f1d_t *cl_spl=ccl_f1d_t_new(n_ell,ell,cls,cls[0],0); if(cl_spl==NULL) { free(l_arr); free(cl_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog ran out of memory\n"); return; } double cl_tilt,l_edge,cl_edge; l_edge=ell[n_ell-1]; if((cls[n_ell-1]*cls[n_ell-2]<0) || (cls[n_ell-2]==0)) { cl_tilt=0; cl_edge=0; } else { cl_tilt=log(cls[n_ell-1]/cls[n_ell-2])/log(ell[n_ell-1]/ell[n_ell-2]); cl_edge=cls[n_ell-1]; } for(i=0;i<cosmo->spline_params.N_ELL_CORR;i++) { if(l_arr[i]>=l_edge) cl_arr[i]=cl_edge*pow(l_arr[i]/l_edge,cl_tilt); else cl_arr[i]=ccl_f1d_t_eval(cl_spl,l_arr[i]); } ccl_f1d_t_free(cl_spl); if (do_taper_cl) taper_cl(cosmo->spline_params.N_ELL_CORR,l_arr,cl_arr,taper_cl_limits); th_arr=malloc(sizeof(double)*cosmo->spline_params.N_ELL_CORR); if(th_arr==NULL) { free(l_arr); free(cl_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog ran out of memory\n"); return; } wth_arr=(double *)malloc(sizeof(double)*cosmo->spline_params.N_ELL_CORR); if(wth_arr==NULL) { free(l_arr); free(cl_arr); free(th_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog ran out of memory\n"); return; } for(i=0;i<cosmo->spline_params.N_ELL_CORR;i++) th_arr[i]=0; //Although set here to 0, theta is modified by FFTlog to obtain the correlation at ~1/l int i_bessel=0; if(corr_type==CCL_CORR_GG) i_bessel=0; if(corr_type==CCL_CORR_GL) i_bessel=2; if(corr_type==CCL_CORR_LP) i_bessel=0; if(corr_type==CCL_CORR_LM) i_bessel=4; fftlog_ComputeXi2D(i_bessel,cosmo->spline_params.N_ELL_CORR,l_arr,cl_arr,th_arr,wth_arr); // Interpolate to output values of theta ccl_f1d_t *wth_spl=ccl_f1d_t_new(cosmo->spline_params.N_ELL_CORR,th_arr,wth_arr,wth_arr[0],0); if (wth_spl == NULL) { free(l_arr); free(cl_arr); free(th_arr); free(wth_arr); *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_fftlog ran out of memory\n"); return; } for(i=0;i<n_theta;i++) wtheta[i]=ccl_f1d_t_eval(wth_spl,theta[i]*M_PI/180.); ccl_f1d_t_free(wth_spl); free(l_arr); free(cl_arr); free(th_arr); free(wth_arr); return; } typedef struct { int nell; double ell0; double ellf; double cl0; double clf; int extrapol_0; int extrapol_f; double tilt0; double tiltf; ccl_f1d_t *cl_spl; int i_bessel; double th; } corr_int_par; static double corr_bessel_integrand(double l,void *params) { double cl,jbes; corr_int_par *p=(corr_int_par *)params; double x=l*p->th; if(l<p->ell0) { if(p->extrapol_0) cl=p->cl0*pow(l/p->ell0,p->tilt0); else cl=0; } else if(l>p->ellf) { if(p->extrapol_f) cl=p->clf*pow(l/p->ellf,p->tiltf); else cl=0; } else cl=ccl_f1d_t_eval(p->cl_spl,l); jbes=gsl_sf_bessel_Jn(p->i_bessel,x); return l*jbes*cl; } static void ccl_tracer_corr_bessel(ccl_cosmology *cosmo, int n_ell,double *ell,double *cls, int n_theta,double *theta,double *wtheta, int corr_type,int *status) { corr_int_par cp; ccl_f1d_t *cl_spl = NULL; cl_spl = ccl_f1d_t_new(n_ell, ell, cls, cls[0], 0); if(cl_spl == NULL) { *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_tracer_corr_bessel ran out of memory\n"); return; } int ith, gslstatus; double result,eresult; gsl_function F; gsl_integration_workspace *w = NULL; int local_status; #pragma omp parallel default(none) \ shared(cosmo, status, wtheta, n_ell, ell, cls, \ corr_type, cl_spl, theta, n_theta) \ private(w, F, result, eresult, local_status, ith, \ gslstatus, cp) { local_status = *status; cp.nell = n_ell; cp.ell0 = ell[0]; cp.ellf = ell[n_ell-1]; cp.cl0 = cls[0]; cp.clf = cls[n_ell-1]; switch(corr_type) { case CCL_CORR_GG: cp.i_bessel = 0; break; case CCL_CORR_GL: cp.i_bessel = 2; break; case CCL_CORR_LP: cp.i_bessel = 0; break; case CCL_CORR_LM: cp.i_bessel = 4; break; } if (cls[0]*cls[1] <= 0) cp.extrapol_0 = 0; else { cp.extrapol_0 = 1; cp.tilt0 = log10(cls[1]/cls[0])/log10(ell[1]/ell[0]); } if (cls[n_ell-2]*cls[n_ell-1] <= 0) cp.extrapol_f = 0; else { cp.extrapol_f = 1; cp.tiltf = log10(cls[n_ell-1]/cls[n_ell-2])/log10(ell[n_ell-1]/ell[n_ell-2]); } cp.cl_spl = cl_spl; w = gsl_integration_workspace_alloc(cosmo->gsl_params.N_ITERATION); if (w == NULL) { local_status = CCL_ERROR_MEMORY; } F.function = &corr_bessel_integrand; F.params = &cp; #pragma omp for schedule(dynamic) for(ith=0; ith < n_theta; ith++) { if (local_status == 0) { cp.th = theta[ith]*M_PI/180; //TODO: Split into intervals between first bessel zeros before integrating //This will help both speed and accuracy of the integral. gslstatus = gsl_integration_qag(&F, 0, cosmo->spline_params.ELL_MAX_CORR, 0, cosmo->gsl_params.INTEGRATION_EPSREL, cosmo->gsl_params.N_ITERATION, cosmo->gsl_params.INTEGRATION_GAUSS_KRONROD_POINTS, w, &result, &eresult); if(gslstatus != GSL_SUCCESS) { ccl_raise_gsl_warning(gslstatus, "ccl_correlation.c: ccl_tracer_corr_bessel():"); local_status |= gslstatus; } wtheta[ith] = result/(2*M_PI); } } if (local_status) { #pragma omp atomic write *status = local_status; } gsl_integration_workspace_free(w); } ccl_f1d_t_free(cl_spl); } /*--------ROUTINE: ccl_compute_legendre_polynomial ------ TASK: Compute input factor for ccl_tracer_corr_legendre INPUT: tracer 1, tracer 2, i_bessel, theta array, n_theta, L_max, output Pl_theta */ static void ccl_compute_legendre_polynomial(int corr_type,double theta,int ell_max,double *Pl_theta) { int i,j; double k=0; double cth=cos(theta*M_PI/180); //Initialize Pl_theta for (j=0;j<=ell_max;j++) Pl_theta[j]=0.; if(corr_type==CCL_CORR_GG) { gsl_sf_legendre_Pl_array(ell_max,cth,Pl_theta); for (j=0;j<=ell_max;j++) Pl_theta[j]*=(2*j+1); } else if(corr_type==CCL_CORR_GL) { for (j=2;j<=ell_max;j++) {//https://arxiv.org/pdf/1007.4809.pdf Pl_theta[j]=gsl_sf_legendre_Plm(j,2,cth); Pl_theta[j]*=(2*j+1.)/((j+0.)*(j+1.)); } } } /*--------ROUTINE: ccl_tracer_corr_legendre ------ TASK: Compute correlation function via Legendre polynomials INPUT: cosmology, number of theta bins, theta array, tracer 1, tracer 2, i_bessel, boolean for tapering, vector of tapering limits, correlation vector, angular_cl function. */ static void ccl_tracer_corr_legendre(ccl_cosmology *cosmo, int n_ell,double *ell,double *cls, int n_theta,double *theta,double *wtheta, int corr_type,int do_taper_cl,double *taper_cl_limits, int *status) { int i; double *l_arr = NULL, *cl_arr = NULL, *Pl_theta = NULL; ccl_f1d_t *cl_spl; if(corr_type==CCL_CORR_LM || corr_type==CCL_CORR_LP){ *status=CCL_ERROR_NOT_IMPLEMENTED; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: CCL does not support full-sky xi+- calcuations.\nhttps://arxiv.org/abs/1702.05301 indicates flat-sky to be sufficient.\n"); } if(*status==0) { l_arr=malloc(((int)(cosmo->spline_params.ELL_MAX_CORR)+1)*sizeof(double)); if(l_arr==NULL) { *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_legendre ran out of memory\n"); } } if(*status==0) { cl_arr=malloc(((int)(cosmo->spline_params.ELL_MAX_CORR)+1)*sizeof(double)); if(cl_arr==NULL) { *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_legendre ran out of memory\n"); } } if(*status==0) { //Interpolate input Cl into cl_spl=ccl_f1d_t_new(n_ell,ell,cls,cls[0],0); if(cl_spl==NULL) { *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_tracer_corr_legendre ran out of memory\n"); } } if(*status==0) { double cl_tilt,l_edge,cl_edge; l_edge=ell[n_ell-1]; if((cls[n_ell-1]*cls[n_ell-2]<0) || (cls[n_ell-2]==0)) { cl_tilt=0; cl_edge=0; } else { cl_tilt=log(cls[n_ell-1]/cls[n_ell-2])/log(ell[n_ell-1]/ell[n_ell-2]); cl_edge=cls[n_ell-1]; } for(i=0;i<=(int)(cosmo->spline_params.ELL_MAX_CORR);i++) { double l=(double)i; l_arr[i]=l; if(l>=l_edge) cl_arr[i]=cl_edge*pow(l/l_edge,cl_tilt); else cl_arr[i]=ccl_f1d_t_eval(cl_spl,l); } ccl_f1d_t_free(cl_spl); if (do_taper_cl) *status=taper_cl((int)(cosmo->spline_params.ELL_MAX_CORR)+1,l_arr,cl_arr,taper_cl_limits); } int local_status, i_L; #pragma omp parallel default(none) \ shared(cosmo, theta, cl_arr, wtheta, n_theta, status, corr_type) \ private(Pl_theta, i, i_L, local_status) { Pl_theta = NULL; local_status = *status; if (local_status == 0) { Pl_theta = malloc(sizeof(double)*((int)(cosmo->spline_params.ELL_MAX_CORR)+1)); if (Pl_theta == NULL) { local_status = CCL_ERROR_MEMORY; } } #pragma omp for schedule(dynamic) for (int i=0; i < n_theta; i++) { if (local_status == 0) { wtheta[i] = 0; ccl_compute_legendre_polynomial(corr_type, theta[i], (int)(cosmo->spline_params.ELL_MAX_CORR), Pl_theta); for (i_L=1; i_L < (int)(cosmo->spline_params.ELL_MAX_CORR); i_L+=1) wtheta[i] += cl_arr[i_L]*Pl_theta[i_L]; wtheta[i] /= (M_PI*4); } } if (local_status) { #pragma omp atomic write *status = local_status; } free(Pl_theta); } free(l_arr); free(cl_arr); } /*--------ROUTINE: ccl_tracer_corr ------ TASK: For a given tracer, get the correlation function. Do so by running ccl_angular_cls. If you already have Cls calculated, go to the next function to pass them directly. INPUT: cosmology, number of theta values to evaluate = NL, theta vector, tracer 1, tracer 2, i_bessel, key for tapering, limits of tapering correlation function. */ void ccl_correlation(ccl_cosmology *cosmo, int n_ell,double *ell,double *cls, int n_theta,double *theta,double *wtheta, int corr_type,int do_taper_cl,double *taper_cl_limits,int flag_method, int *status) { switch(flag_method) { case CCL_CORR_FFTLOG : ccl_tracer_corr_fftlog(cosmo,n_ell,ell,cls,n_theta,theta,wtheta,corr_type, do_taper_cl,taper_cl_limits,status); break; case CCL_CORR_LGNDRE : ccl_tracer_corr_legendre(cosmo,n_ell,ell,cls,n_theta,theta,wtheta,corr_type, do_taper_cl,taper_cl_limits,status); break; case CCL_CORR_BESSEL : ccl_tracer_corr_bessel(cosmo,n_ell,ell,cls,n_theta,theta,wtheta,corr_type,status); break; default : *status=CCL_ERROR_INCONSISTENT; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation. Unknown algorithm\n"); } } /*--------ROUTINE: ccl_correlation_3d ------ TASK: Calculate the 3d-correlation function. Do so by using FFTLog. INPUT: cosmology, scale factor a, number of r values, r values, key for tapering, limits of tapering Correlation function result will be in array xi */ void ccl_correlation_3d(ccl_cosmology *cosmo, double a, int n_r,double *r,double *xi, int do_taper_pk,double *taper_pk_limits, int *status) { int i,N_ARR; double *k_arr,*pk_arr,*r_arr,*xi_arr; if (!cosmo->computed_nonlin_power) { *status = CCL_ERROR_NONLIN_POWER_INIT; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_correlation_3d(): non-linear power spctrum has not been computed!"); return; } //number of data points for k and pk array N_ARR=(int)(cosmo->spline_params.N_K_3DCOR*log10(cosmo->spline_params.K_MAX/cosmo->spline_params.K_MIN)); k_arr=ccl_log_spacing(cosmo->spline_params.K_MIN,cosmo->spline_params.K_MAX,N_ARR); if(k_arr==NULL) { *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d ran out of memory\n"); return; } pk_arr=malloc(N_ARR*sizeof(double)); if(pk_arr==NULL) { free(k_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d ran out of memory\n"); return; } for (i=0; i<N_ARR; i++){ pk_arr[i] = ccl_nonlin_matter_power(cosmo, k_arr[i], a, status); } if (do_taper_pk) taper_cl(N_ARR,k_arr,pk_arr,taper_pk_limits); r_arr=malloc(sizeof(double)*N_ARR); if(r_arr==NULL) { free(k_arr); free(pk_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d ran out of memory\n"); return; } xi_arr=malloc(sizeof(double)*N_ARR); if(xi_arr==NULL) { free(k_arr); free(pk_arr); free(r_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d ran out of memory\n"); return; } for(i=0;i<N_ARR;i++) r_arr[i]=0; pk2xi(N_ARR,k_arr,pk_arr,r_arr,xi_arr); // Interpolate to output values of r ccl_f1d_t *xi_spl=ccl_f1d_t_new(N_ARR,r_arr,xi_arr,xi_arr[0],0); if (xi_spl == NULL) { free(k_arr); free(pk_arr); free(r_arr); free(xi_arr); *status=CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_correlation.c: ccl_correlation_3d ran out of memory\n"); return; } for(i=0;i<n_r;i++) xi[i]=ccl_f1d_t_eval(xi_spl,r[i]); ccl_f1d_t_free(xi_spl); free(k_arr); free(pk_arr); free(r_arr); free(xi_arr); return; } /*--------ROUTINE: ccl_correlation_multipole ------ TASK: Calculate multipole of the redshift space correlation function. Do so using FFTLog. INPUT: cosmology, scale factor a, beta (= growth rate / bias), multipole order l = 0, 2, or 4, number of s values, s values Multipole function result will be in array xi */ void ccl_correlation_multipole(ccl_cosmology *cosmo, double a, double beta, int l, int n_s, double *s, double *xi, int *status) { int i, N_ARR; double *k_arr, *pk_arr, *s_arr, *xi_arr, *xi_arr0; if (!cosmo->computed_nonlin_power) { *status = CCL_ERROR_NONLIN_POWER_INIT; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_correlation_multipole(): non-linear power spctrum has not been computed!"); return; } N_ARR = (int)(cosmo->spline_params.N_K_3DCOR * log10(cosmo->spline_params.K_MAX / cosmo->spline_params.K_MIN)); k_arr = ccl_log_spacing(cosmo->spline_params.K_MIN, cosmo->spline_params.K_MAX, N_ARR); if (k_arr == NULL) { *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole ran out of memory\n"); return; } pk_arr = malloc(N_ARR * sizeof(double)); if (pk_arr == NULL) { free(k_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole ran out of memory\n"); return; } for (i = 0; i < N_ARR; i++) pk_arr[i] = ccl_nonlin_matter_power(cosmo, k_arr[i], a, status); s_arr = malloc(sizeof(double) * N_ARR); if (s_arr == NULL) { free(k_arr); free(pk_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole ran out of memory\n"); return; } xi_arr = malloc(sizeof(double) * N_ARR); if (xi_arr == NULL) { free(k_arr); free(pk_arr); free(s_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole ran out of memory\n"); return; } xi_arr0 = malloc(sizeof(double) * N_ARR); if (xi_arr0 == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole ran out of memory\n"); return; } for (i = 0; i < N_ARR; i++) s_arr[i] = 0; // Calculate multipoles if (l == 0) { fftlog_ComputeXiLM(0, 2, N_ARR, k_arr, pk_arr, s_arr, xi_arr0); for (i = 0; i < N_ARR; i++) xi_arr[i] = (1. + 2. / 3 * beta + 1. / 5 * beta * beta) * xi_arr0[i]; } else if (l == 2) { fftlog_ComputeXiLM(2, 2, N_ARR, k_arr, pk_arr, s_arr, xi_arr0); for (i = 0; i < N_ARR; i++) xi_arr[i] = -(4. / 3 * beta + 4. / 7 * beta * beta) * xi_arr0[i]; } else if (l == 4) { fftlog_ComputeXiLM(4, 2, N_ARR, k_arr, pk_arr, s_arr, xi_arr0); for (i = 0; i < N_ARR; i++) xi_arr[i] = 8. / 35 * beta * beta * xi_arr0[i]; } else { strcpy(cosmo->status_message, "unavailable value of l\n"); return; } // Interpolate to output values of s ccl_f1d_t *xi_spl = ccl_f1d_t_new(N_ARR, s_arr, xi_arr, xi_arr[0], 0); if (xi_spl == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole ran out of memory\n"); } for (i = 0; i < n_s; i++) xi[i] = ccl_f1d_t_eval(xi_spl,s[i]); ccl_f1d_t_free(xi_spl); free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); return; } /*--------ROUTINE: ccl_correlation_multipole_spline ------ TASK: Store multipoles of the redshift-space correlation in global splines INPUT: cosmology, scale factor a Result is stored in cosmo->data.rsd_splines[] */ void ccl_correlation_multipole_spline(ccl_cosmology *cosmo, double a, int *status) { int i, N_ARR; double *k_arr, *pk_arr, *s_arr, *xi_arr, *xi_arr0, *xi_arr2, *xi_arr4; if (!cosmo->computed_nonlin_power) { *status = CCL_ERROR_NONLIN_POWER_INIT; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_correlation_multipole_spline(): non-linear power spctrum has not been computed!"); return; } N_ARR = (int)(cosmo->spline_params.N_K_3DCOR * log10(cosmo->spline_params.K_MAX / cosmo->spline_params.K_MIN)); k_arr = ccl_log_spacing(cosmo->spline_params.K_MIN, cosmo->spline_params.K_MAX, N_ARR); if (k_arr == NULL) { *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } pk_arr = malloc(N_ARR * sizeof(double)); if (pk_arr == NULL) { free(k_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } for (i = 0; i < N_ARR; i++) pk_arr[i] = ccl_nonlin_matter_power(cosmo, k_arr[i], a, status); s_arr = malloc(sizeof(double) * N_ARR); if (s_arr == NULL) { free(k_arr); free(pk_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } xi_arr = malloc(sizeof(double) * N_ARR); if (xi_arr == NULL) { free(k_arr); free(pk_arr); free(s_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } xi_arr0 = malloc(sizeof(double) * N_ARR); if (xi_arr0 == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } xi_arr2 = malloc(sizeof(double) * N_ARR); if (xi_arr2 == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } xi_arr4 = malloc(sizeof(double) * N_ARR); if (xi_arr4 == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } for (i = 0; i < N_ARR; i++) s_arr[i] = 0; // Calculate multipoles fftlog_ComputeXiLM(0, 2, N_ARR, k_arr, pk_arr, s_arr, xi_arr0); fftlog_ComputeXiLM(2, 2, N_ARR, k_arr, pk_arr, s_arr, xi_arr2); fftlog_ComputeXiLM(4, 2, N_ARR, k_arr, pk_arr, s_arr, xi_arr4); // free any memory that may have been allocated ccl_f1d_t_free(cosmo->data.rsd_splines[0]); ccl_f1d_t_free(cosmo->data.rsd_splines[1]); ccl_f1d_t_free(cosmo->data.rsd_splines[2]); cosmo->data.rsd_splines[0] = NULL; cosmo->data.rsd_splines[1] = NULL; cosmo->data.rsd_splines[1] = NULL; // Interpolate to output values of s cosmo->data.rsd_splines[0] = ccl_f1d_t_new(N_ARR, s_arr, xi_arr0, xi_arr0[0], 0); if (cosmo->data.rsd_splines[0] == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); free(xi_arr4); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } cosmo->data.rsd_splines[1] = ccl_f1d_t_new(N_ARR, s_arr, xi_arr2, xi_arr2[0], 0); if (cosmo->data.rsd_splines[1] == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); free(xi_arr4); ccl_f1d_t_free(cosmo->data.rsd_splines[0]); cosmo->data.rsd_splines[0] = NULL; *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } cosmo->data.rsd_splines[2] = ccl_f1d_t_new(N_ARR, s_arr, xi_arr4, xi_arr4[0], 0); if (cosmo->data.rsd_splines[2] == NULL) { free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); free(xi_arr4); ccl_f1d_t_free(cosmo->data.rsd_splines[0]); cosmo->data.rsd_splines[0] = NULL; ccl_f1d_t_free(cosmo->data.rsd_splines[1]); cosmo->data.rsd_splines[1] = NULL; *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_multipole_spline ran out of " "memory\n"); return; } // set the scale factor cosmo->data.rsd_splines_scalefactor = a; free(k_arr); free(pk_arr); free(s_arr); free(xi_arr); free(xi_arr0); free(xi_arr2); free(xi_arr4); return; } /*--------ROUTINE: ccl_correlation_3dRsd ------ TASK: Calculate the redshift-space correlation function. INPUT: cosmology, scale factor a, number of s values, s values, mu = cosine of galaxy separation angle w.r.t. line of sight, beta (= growth rate / bias), key for using spline Correlation function result will be in array xi */ void ccl_correlation_3dRsd(ccl_cosmology *cosmo, double a, int n_s, double *s, double mu, double beta, double *xi, int use_spline, int *status) { int i; double *xi_arr0, *xi_arr2, *xi_arr4; if (!cosmo->computed_nonlin_power) { *status = CCL_ERROR_NONLIN_POWER_INIT; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_correlation_3dRsd(): non-linear power spctrum has not been computed!"); return; } if (use_spline == 0) { xi_arr0 = malloc(sizeof(double) * n_s); if (xi_arr0 == NULL) { *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_3dRsd ran out of memory\n"); return; } xi_arr2 = malloc(sizeof(double) * n_s); if (xi_arr2 == NULL) { free(xi_arr0); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_3dRsd ran out of memory\n"); return; } xi_arr4 = malloc(sizeof(double) * n_s); if (xi_arr4 == NULL) { free(xi_arr0); free(xi_arr2); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_3dRsd ran out of memory\n"); return; } ccl_correlation_multipole(cosmo, a, beta, 0, n_s, s, xi_arr0, status); ccl_correlation_multipole(cosmo, a, beta, 2, n_s, s, xi_arr2, status); ccl_correlation_multipole(cosmo, a, beta, 4, n_s, s, xi_arr4, status); for (i = 0; i < n_s; i++) xi[i] = xi_arr0[i] + xi_arr2[i] * gsl_sf_legendre_Pl(2, mu) + xi_arr4[i] * gsl_sf_legendre_Pl(4, mu); free(xi_arr0); free(xi_arr2); free(xi_arr4); } else { if ((cosmo->data.rsd_splines[0] == NULL) || (cosmo->data.rsd_splines[1] == NULL) || (cosmo->data.rsd_splines[2] == NULL) || (cosmo->data.rsd_splines_scalefactor != a)) ccl_correlation_multipole_spline(cosmo, a, status); for (i = 0; i < n_s; i++) xi[i] = (1. + 2. / 3 * beta + 1. / 5 * beta * beta) * ccl_f1d_t_eval(cosmo->data.rsd_splines[0],s[i]) - (4. / 3 * beta + 4. / 7 * beta * beta) * ccl_f1d_t_eval(cosmo->data.rsd_splines[1],s[i]) * gsl_sf_legendre_Pl(2, mu) + 8. / 35 * beta * beta * ccl_f1d_t_eval(cosmo->data.rsd_splines[2],s[i]) * gsl_sf_legendre_Pl(4, mu); } return; } /*--------ROUTINE: ccl_correlation_3dRsd_avgmu ------ TASK: Calculate the average of redshift-space correlation function xi(s,mu) over mu at constant s INPUT: cosmology, scale factor a, number of s values, s values, beta (= growth rate / bias) The result will be in array xi */ void ccl_correlation_3dRsd_avgmu(ccl_cosmology *cosmo, double a, int n_s, double *s, double beta, double *xi, int *status) { // The average is just the l=0 multipole - the higher multiples inetegrate to zero. ccl_correlation_multipole(cosmo, a, beta, 0, n_s, s, xi, status); return; } /*--------ROUTINE: ccl_correlation_pi_sigma ------ TASK: Calculate the redshift-space correlation function using longitudinal and transverse coordinates pi and sigma. INPUT: cosmology, scale factor a, beta (= growth rate / bias), pi, number of sigma values, sigma values, key for using spline Correlation function result will be in array xi */ void ccl_correlation_pi_sigma(ccl_cosmology *cosmo, double a, double beta, double pi, int n_sig, double *sig, double *xi, int use_spline, int *status) { int i; double *mu_arr, *s_arr, *xi_arr; if (!cosmo->computed_nonlin_power) { *status = CCL_ERROR_NONLIN_POWER_INIT; ccl_cosmology_set_status_message( cosmo, "ccl_correlation.c: ccl_correlation_pi_sigma(): non-linear power spctrum has not been computed!"); return; } mu_arr = malloc(sizeof(double) * n_sig); if (mu_arr == NULL) { *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_pi_sigma ran out of memory\n"); return; } s_arr = malloc(sizeof(double) * n_sig); if (s_arr == NULL) { free(mu_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_pi_sigma ran out of memory\n"); return; } xi_arr = malloc(sizeof(double) * n_sig); if (xi_arr == NULL) { free(mu_arr); free(s_arr); *status = CCL_ERROR_MEMORY; strcpy(cosmo->status_message, "ccl_correlation.c: ccl_correlation_pi_sigma ran out of memory\n"); return; } for (i = 0; i < n_sig; i++) { s_arr[i] = sqrt(pi * pi + sig[i] * sig[i]); mu_arr[i] = pi / s_arr[i]; } for (i = 0; i < n_sig; i++) { ccl_correlation_3dRsd(cosmo, a, n_sig, s_arr, mu_arr[i], beta, xi_arr, use_spline, status); xi[i] = xi_arr[i]; } free(mu_arr); free(xi_arr); free(s_arr); return; }
mkldnn_graph.h
// Copyright (C) 2018-2019 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once #include <map> #include <string> #include <vector> #include <memory> #include <cpp_interfaces/impl/ie_executable_network_thread_safe_default.hpp> #include "ie_parallel.hpp" #include "mkldnn_memory.h" #include "config.h" #include "perf_count.h" #include "mkldnn_dims.h" #include "mean_image.h" #include "mkldnn_node.h" #include "mkldnn_edge.h" #include "mkldnn_extension_utils.h" #include "mkldnn_streams.h" namespace MKLDNNPlugin { class MKLDNNGraph { public: typedef std::shared_ptr<MKLDNNGraph> Ptr; int socket; enum Status { NotReady = 0, Ready = 1, }; MKLDNNGraph(): status(NotReady), eng(mkldnn::engine(mkldnn::engine::kind::cpu, 0)), socket(0) {} Status GetStatus() { return status; } bool IsReady() { return (GetStatus() == Ready); } void setConfig(const Config &cfg); void setProperty(const std::map<std::string, std::string> &properties); Config getProperty(); void getInputBlobs(InferenceEngine::BlobMap &in_map); void getOutputBlobs(InferenceEngine::BlobMap &out_map); template<typename NET> void CreateGraph(const NET &network, const MKLDNNExtensionManager::Ptr& extMgr, int socket = 0); bool hasMeanImageFor(const std::string& name) { return _meanImages.find(name) != _meanImages.end(); } void PushInputData(const std::string& name, const InferenceEngine::Blob::Ptr &in); void PullOutputData(InferenceEngine::BlobMap &out); void Infer(int batch = -1); std::vector<MKLDNNNodePtr>& GetNodes() { return graphNodes; } std::vector<MKLDNNEdgePtr>& GetEdges() { return graphEdges; } std::vector<MKLDNNNodePtr>& GetOutputNodes() { return outputNodes; } mkldnn::engine getEngine() const { return eng; } void GetPerfData(std::map<std::string, InferenceEngine::InferenceEngineProfileInfo> &perfMap) const; void RemoveDroppedNodes(); void RemoveDroppedEdges(); void DropNode(const MKLDNNNodePtr& node); void CreateArena(int threads_per_stream) { #if IE_THREAD == IE_THREAD_OMP omp_set_num_threads(threads_per_stream); #elif(IE_THREAD == IE_THREAD_TBB || IE_THREAD == IE_THREAD_TBB_AUTO) ptrArena = std::unique_ptr<tbb::task_arena>(new tbb::task_arena(threads_per_stream)); #endif } void CreateObserver(int _stream_id, int _threads_per_stream, int _pinning_step = 1) { #if (IE_THREAD == IE_THREAD_TBB || IE_THREAD == IE_THREAD_TBB_AUTO) ptrObserver = std::unique_ptr<tbb::task_scheduler_observer>( new pinning_observer(*ptrArena.get(), _stream_id, _threads_per_stream, _pinning_step)); #else cpu_set_t *process_mask = nullptr; int ncpus = 0; get_process_mask(ncpus, process_mask); #if IE_THREAD == IE_THREAD_OMP #pragma omp parallel for for (int thread_index = 0; thread_index < _threads_per_stream; thread_index++) { pin_thread_to_vacant_core(_stream_id * _threads_per_stream + thread_index, 1, ncpus, process_mask); } #elif IE_THREAD == IE_THREAD_SEQ pin_thread_to_vacant_core(_stream_id * _threads_per_stream, 1, ncpus, process_mask); #endif CPU_FREE(process_mask); #endif } InferenceEngine::ICNNNetwork::Ptr dump() const; template<typename NET> static void ApplyUnrollPasses(NET &net); void ResetInferCount() { infer_count = 0; } protected: void VisitNode(MKLDNNNodePtr node, std::vector<MKLDNNNodePtr>& sortedNodes); void SortTopologically(); void ForgetGraphData() { status = NotReady; eng = mkldnn::engine(mkldnn::engine::kind::cpu, 0); inputNodes.clear(); outputNodes.clear(); graphNodes.clear(); graphEdges.clear(); _meanImages.clear(); } Status status; Config config; // For dumping purposes. -1 - no counting, all other positive // values mean increment it within each Infer() call int infer_count = -1; bool reuse_io_tensors = true; MKLDNNMemoryPtr memWorkspace; std::map<std::string, MKLDNNNodePtr> inputNodes; std::vector<MKLDNNNodePtr> outputNodes; std::vector<MKLDNNNodePtr> graphNodes; std::vector<MKLDNNEdgePtr> graphEdges; std::map<std::string, MeanImage> _meanImages; std::string _name; #if (IE_THREAD == IE_THREAD_TBB || IE_THREAD == IE_THREAD_TBB_AUTO) std::unique_ptr<tbb::task_arena> ptrArena; std::unique_ptr<tbb::task_scheduler_observer> ptrObserver; #endif mkldnn::engine eng; void Replicate(const ICNNNetwork &network, const MKLDNNExtensionManager::Ptr& extMgr); void Replicate(const TensorIterator::Body &subgraph, const MKLDNNExtensionManager::Ptr& extMgr); void InitGraph(); void InitNodes(); void InitEdges(); void Allocate(); void AllocateWithReuse(); void CreatePrimitives(); void do_before(const std::string &dir, const MKLDNNNodePtr &node); void do_after(const std::string &dir, const MKLDNNNodePtr &node); friend class MKLDNNInferRequest; friend class MKLDNNGraphlessInferRequest; friend std::shared_ptr<InferenceEngine::ICNNNetwork> dump_graph_as_ie_net(const MKLDNNGraph &graph); private: void dumpToDotFile(std::string file) const; struct ParsedLayer { MKLDNNNodePtr parent; InferenceEngine::CNNLayerPtr cnnLayer; size_t outIdx; }; }; class MKLDNNExecNetwork: public InferenceEngine::ExecutableNetworkThreadSafeDefault { public: typedef std::shared_ptr<MKLDNNExecNetwork> Ptr; InferenceEngine::InferRequestInternal::Ptr CreateInferRequestImpl(InferenceEngine::InputsDataMap networkInputs, InferenceEngine::OutputsDataMap networkOutputs) override; void CreateInferRequest(InferenceEngine::IInferRequest::Ptr &asyncRequest) override; MKLDNNExecNetwork(const InferenceEngine::ICNNNetwork &network, const Config &cfg, const MKLDNNExtensionManager::Ptr& extMgr); ~MKLDNNExecNetwork() { graphs.clear(); extensionManager.reset(); } void setProperty(const std::map<std::string, std::string> &properties); void GetConfig(const std::string &name, Parameter &result, ResponseDesc *resp) const override; void GetMetric(const std::string &name, Parameter &result, ResponseDesc *resp) const override; void GetExecGraphInfo(InferenceEngine::ICNNNetwork::Ptr &graphPtr) override; protected: std::vector<MKLDNNGraph::Ptr> graphs; MKLDNNExtensionManager::Ptr extensionManager; bool CanProcessDynBatch(const InferenceEngine::ICNNNetwork &network) const; }; } // namespace MKLDNNPlugin
mandel-omp-taskloop-Point.c
/* * Sequential Mandelbrot program * * This program computes and displays all or part of the Mandelbrot * set. By default, it examines all points in the complex plane * that have both real and imaginary parts between -2 and 2. * Command-line parameters allow zooming in on a specific part of * this range. * * Usage: * mandel [-i maxiter -c x0 y0 -s size -w windowsize] * where * maxiter denotes the maximum number of iterations at each point -- by default 1000 * x0, y0, and size specify the range to examine (a square * centered at (x0 + iy0) of size 2*size by 2*size -- by default, * a square of size 4 by 4 centered at the origin) * windowsize denotes the size of the image (diplay window) to compute * * Input: none, except the optional command-line arguments * Output: a graphical display as described in Wilkinson & Allen, * displayed using the X Window system, plus text output to * standard output showing the above parameters, plus execution * time in seconds. * * Code based on the original code from Web site for Wilkinson and Allen's * text on parallel programming: * http://www.cs.uncc.edu/~abw/parallel/par_prog/ * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <unistd.h> #include <malloc.h> #if _DISPLAY_ #include <X11/Xlib.h> #include <X11/Xutil.h> #include <X11/Xos.h> #endif #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6fs\n",(_m), stamp); /* Default values for things. */ #define N 2 /* size of problem space (x, y from -N to N) */ #define NPIXELS 800 /* size of display window in pixels */ int row, col; // variables used to traverse the problem space /* Structure definition for complex numbers */ typedef struct { double real, imag; } complex; #if _DISPLAY_ /* Functions for GUI */ #include "mandelbrot-gui.h" /* has setup(), interact() */ #endif void mandelbrot(int height, int width, double real_min, double imag_min, double scale_real, double scale_imag, int maxiter, #if _DISPLAY_ int setup_return, Display *display, Window win, GC gc, double scale_color, double min_color) #else int ** output) #endif { /* Calculate points and save/display */ //#pragma omp for schedule(runtime) #pragma omp parallel #pragma omp single for (row = 0; row < height; ++row) { #pragma omp taskloop grainsize(8) for (col = 0; col < width; ++col) { complex z, c; z.real = z.imag = 0; /* Scale display coordinates to actual region */ c.real = real_min + ((double) col * scale_real); c.imag = imag_min + ((double) (height-1-row) * scale_imag); /* height-1-row so y axis displays * with larger values at top */ /* Calculate z0, z1, .... until divergence or maximum iterations */ int k = 0; double lengthsq, temp; do { temp = z.real*z.real - z.imag*z.imag + c.real; z.imag = 2*z.real*z.imag + c.imag; z.real = temp; lengthsq = z.real*z.real + z.imag*z.imag; ++k; } while (lengthsq < (N*N) && k < maxiter); #if _DISPLAY_ /* Scale color and display point */ long color = (long) ((k-1) * scale_color) + min_color; if (setup_return == EXIT_SUCCESS) { #pragma omp critical { XSetForeground (display, gc, color); XDrawPoint (display, win, gc, col, row); } } #else output[row][col]=k; #endif } } } int main(int argc, char *argv[]) { int maxiter = 1000; double real_min; double real_max; double imag_min; double imag_max; int width = NPIXELS; /* dimensions of display window */ int height = NPIXELS; double size=N, x0 = 0, y0 = 0; #if _DISPLAY_ Display *display; Window win; GC gc; int setup_return; long min_color = 0, max_color = 0; double scale_color; #else int ** output; FILE *fp = NULL; #endif double scale_real, scale_imag; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-i")==0) { maxiter = atoi(argv[++i]); } else if (strcmp(argv[i], "-w")==0) { width = atoi(argv[++i]); height = width; } else if (strcmp(argv[i], "-s")==0) { size = atof(argv[++i]); } #if !_DISPLAY_ else if (strcmp(argv[i], "-o")==0) { if((fp=fopen("mandel.out", "wb"))==NULL) { fprintf(stderr, "Unable to open file\n"); return EXIT_FAILURE; } } #endif else if (strcmp(argv[i], "-c")==0) { x0 = atof(argv[++i]); y0 = atof(argv[++i]); } else { #if _DISPLAY_ fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); #else fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]); fprintf(stderr, " -o to write computed image to disk (default no file generated)\n"); #endif fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n"); #if _DISPLAY_ fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n"); #else fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n"); #endif fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n"); fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n"); return EXIT_FAILURE; } } real_min = x0 - size; real_max = x0 + size; imag_min = y0 - size; imag_max = y0 + size; /* Produce text output */ fprintf(stdout, "\n"); fprintf(stdout, "Mandelbrot program\n"); fprintf(stdout, "center = (%g, %g), size = %g\n", (real_max + real_min)/2, (imag_max + imag_min)/2, (real_max - real_min)/2); fprintf(stdout, "maximum iterations = %d\n", maxiter); fprintf(stdout, "\n"); #if _DISPLAY_ /* Initialize for graphical display */ setup_return = setup(width, height, &display, &win, &gc, &min_color, &max_color); if (setup_return != EXIT_SUCCESS) { fprintf(stderr, "Unable to initialize display, continuing\n"); return EXIT_FAILURE; } #else output = malloc(height*sizeof(int *)); for (int row = 0; row < height; ++row) output[row] = malloc(width*sizeof(int)); #endif /* Compute factors to scale computational region to window */ scale_real = (double) (real_max - real_min) / (double) width; scale_imag = (double) (imag_max - imag_min) / (double) height; #if _DISPLAY_ /* Compute factor for color scaling */ scale_color = (double) (max_color - min_color) / (double) (maxiter - 1); #endif /* Start timing */ double stamp; START_COUNT_TIME; #if _DISPLAY_ mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, setup_return, display, win, gc, scale_color, min_color); #else mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter, output); #endif /* End timing */ STOP_COUNT_TIME("Total execution time"); /* Be sure all output is written */ #if _DISPLAY_ if (setup_return == EXIT_SUCCESS) { XFlush (display); } #else if (fp != NULL) { for (int row = 0; row < height; ++row) if(fwrite(output[row], sizeof(int), width, fp) != width) { fprintf(stderr, "Output file not written correctly\n"); } } #endif #if _DISPLAY_ /* Wait for user response, then exit program */ if (setup_return == EXIT_SUCCESS) { interact(display, &win, width, height, real_min, real_max, imag_min, imag_max); } return EXIT_SUCCESS; #endif }
GB_subref_phase0.c
//------------------------------------------------------------------------------ // GB_subref_phase0: find vectors of C = A(I,J) and determine I,J properties //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB_subref.h" #define GB_Ai(p) GBI_UNFLIP (Ai, p, avlen) //------------------------------------------------------------------------------ // GB_find_Ap_start_end //------------------------------------------------------------------------------ // Find pA and pA_end so that Ai,Ax [pA:pA_end-1] contains the vector // A(imin:imax,kA). If A(:,kA) is dense, [pA:pA_end-1] is the entire dense // vector (it is not trimmed). Otherwise, if A(imin:imax,kA) is empty, then // pA and pA_end are set to -1 to denote an empty list. The resulting pointers // are then returned in Ap_start [kC] and Ap_end [kC]. static inline void GB_find_Ap_start_end ( // input, not modified const int64_t kA, const int64_t *restrict Ap, const int64_t *restrict Ai, const int64_t avlen, const int64_t imin, const int64_t imax, const int64_t kC, const int64_t nzombies, // output: Ap_start [kC] and Ap_end [kC]: int64_t *restrict Ap_start, int64_t *restrict Ap_end ) { //-------------------------------------------------------------------------- // get A(:,kA) //-------------------------------------------------------------------------- int64_t pA = GBP (Ap, kA, avlen) ; int64_t pA_end = GBP (Ap, kA+1, avlen) ; int64_t ajnz = pA_end - pA ; //-------------------------------------------------------------------------- // trim it to A(imin:imax,kA) //-------------------------------------------------------------------------- if (ajnz == avlen) { //---------------------------------------------------------------------- // A (:,kA) is dense; use pA and pA_end as-is //---------------------------------------------------------------------- ; } else if (ajnz == 0 || GB_Ai (pA) > imax || GB_Ai (pA_end-1) < imin) { //---------------------------------------------------------------------- // intersection of A(:,kA) and imin:imax is empty //---------------------------------------------------------------------- pA = -1 ; pA_end = -1 ; } else { //---------------------------------------------------------------------- // A (:,kA) is sparse, with at least one entry //---------------------------------------------------------------------- // trim the leading part of A(:,kA) if (GB_Ai (pA) < imin) { bool found, is_zombie ; int64_t pright = pA_end - 1 ; GB_SPLIT_BINARY_SEARCH_ZOMBIE (imin, Ai, pA, pright, found, nzombies, is_zombie) ; } // trim the trailing part of A (:,kA) if (imin == imax) { if (GB_Ai (pA) == imin) { // found the the single entry A (i,kA) pA_end = pA + 1 ; } else { // A (i,kA) has not been found pA = -1 ; pA_end = -1 ; } } else if (imax < GB_Ai (pA_end-1)) { bool found, is_zombie ; int64_t pleft = pA ; int64_t pright = pA_end - 1 ; GB_SPLIT_BINARY_SEARCH_ZOMBIE (imax, Ai, pleft, pright, found, nzombies, is_zombie) ; pA_end = (found) ? (pleft + 1) : pleft ; } #ifdef GB_DEBUG ajnz = pA_end - pA ; if (ajnz > 0 && Ap != NULL) { // A(imin:imax,kA) is now in Ai [pA:pA_end-1] ASSERT (GB_IMPLIES (Ap [kA] < pA, GB_Ai (pA-1) < imin)) ; ASSERT (GB_IMPLIES (pA_end < Ap [kA+1], imax < GB_Ai (pA_end))) ; ASSERT (imin <= GB_Ai (pA)) ; ASSERT (GB_Ai (pA_end-1) <= imax) ; } #endif } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // The result [pA:pA_end-1] defines the range of entries that need to be // accessed for constructing C(:,kC). Ap_start [kC] = pA ; Ap_end [kC] = pA_end ; } //------------------------------------------------------------------------------ // GB_subref_phase0 //------------------------------------------------------------------------------ #define GB_FREE_WORKSPACE \ { \ GB_WERK_POP (Count, int64_t) ; \ } GrB_Info GB_subref_phase0 ( // output int64_t *restrict *p_Ch, // Ch = C->h hyperlist, or NULL standard size_t *p_Ch_size, int64_t *restrict *p_Ap_start, // A(:,kA) starts at Ap_start [kC] size_t *p_Ap_start_size, int64_t *restrict *p_Ap_end, // ... and ends at Ap_end [kC] - 1 size_t *p_Ap_end_size, int64_t *p_Cnvec, // # of vectors in C bool *p_need_qsort, // true if C must be sorted int *p_Ikind, // kind of I int64_t *p_nI, // length of I int64_t Icolon [3], // for GB_RANGE, GB_STRIDE int64_t *p_nJ, // length of J // input, not modified const GrB_Matrix A, const GrB_Index *I, // index list for C = A(I,J), or GrB_ALL, etc. const int64_t ni, // length of I, or special const GrB_Index *J, // index list for C = A(I,J), or GrB_ALL, etc. const int64_t nj, // length of J, or special // const bool must_sort, // true if C must be returned sorted GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK (A, "A for subref phase 0", GB0) ; ASSERT (!GB_IS_BITMAP (A)) ; // GB_bitmap_subref is used instead ASSERT (p_Ch != NULL) ; ASSERT (p_Ap_start != NULL) ; ASSERT (p_Ap_end != NULL) ; ASSERT (p_Cnvec != NULL) ; ASSERT (p_nJ != NULL) ; ASSERT (p_Ikind != NULL) ; ASSERT (p_nI != NULL) ; ASSERT (Icolon != NULL) ; ASSERT (I != NULL) ; ASSERT (J != NULL) ; GrB_Info info ; (*p_Ch ) = NULL ; (*p_Ap_start ) = NULL ; (*p_Ap_end ) = NULL ; (*p_Cnvec ) = 0 ; (*p_need_qsort) = false ; (*p_Ikind ) = 0 ; (*p_nI ) = 0 ; (*p_nJ ) = 0 ; //-------------------------------------------------------------------------- // get A //-------------------------------------------------------------------------- int64_t *restrict Ap = A->p ; // Ap (but not A->p) may be trimmed int64_t *restrict Ah = A->h ; // Ah (but not A->h) may be trimmed int64_t *restrict Ai = A->i ; int64_t anvec = A->nvec ; // may be trimmed int64_t avlen = A->vlen ; int64_t avdim = A->vdim ; int64_t nzombies = A->nzombies ; //-------------------------------------------------------------------------- // check the properties of I and J //-------------------------------------------------------------------------- // C = A(I,J) so I is in range 0:avlen-1 and J is in range 0:avdim-1 int64_t nI, nJ, Jcolon [3] ; int Ikind, Jkind ; GB_ijlength (I, ni, avlen, &nI, &Ikind, Icolon) ; GB_ijlength (J, nj, avdim, &nJ, &Jkind, Jcolon) ; bool I_unsorted, I_has_dupl, I_contig, J_unsorted, J_has_dupl, J_contig ; int64_t imin, imax, jmin, jmax ; info = GB_ijproperties (I, ni, nI, avlen, &Ikind, Icolon, &I_unsorted, &I_has_dupl, &I_contig, &imin, &imax, Context) ; if (info != GrB_SUCCESS) { // I invalid or out of memory return (info) ; } info = GB_ijproperties (J, nj, nJ, avdim, &Jkind, Jcolon, &J_unsorted, &J_has_dupl, &J_contig, &jmin, &jmax, Context) ; if (info != GrB_SUCCESS) { // J invalid or out of memory return (info) ; } bool need_qsort = I_unsorted ; //-------------------------------------------------------------------------- // determine if C is empty //-------------------------------------------------------------------------- bool C_empty = (nI == 0 || nJ == 0) ; //-------------------------------------------------------------------------- // trim the hyperlist of A //-------------------------------------------------------------------------- // Ah, Ap, and anvec are modified to include just the vectors in range // jmin:jmax, inclusive. A itself is not modified, just the Ah and Ap // pointers, and the scalar anvec. If J is ":", then jmin is zero and // jmax is avdim-1, so there is nothing to trim from Ah. If C is empty, // then Ah and Ap will not be accessed at all, so this can be skipped. bool A_is_hyper = (Ah != NULL) ; if (A_is_hyper && !C_empty) { //---------------------------------------------------------------------- // trim the leading end of Ah so that it starts with jmin:... //---------------------------------------------------------------------- if (jmin > 0) { bool found ; int64_t kleft = 0 ; int64_t kright = anvec-1 ; GB_SPLIT_BINARY_SEARCH (jmin, Ah, kleft, kright, found) ; Ah += kleft ; Ap += kleft ; anvec -= kleft ; } //---------------------------------------------------------------------- // trim the trailing end of Ah so that it ends with ..:jmax //---------------------------------------------------------------------- if (jmax < avdim-1) { bool found ; int64_t kleft = 0 ; int64_t kright = anvec-1 ; GB_SPLIT_BINARY_SEARCH (jmax, Ah, kleft, kright, found) ; anvec = (found) ? (kleft + 1) : kleft ; } // Ah has been trimmed ASSERT (GB_IMPLIES (anvec > 0, jmin <= Ah [0] && Ah [anvec-1] <= jmax)); } // Ah may now be empty, after being trimmed C_empty = C_empty || (anvec == 0) ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- #define NTASKS_PER_THREAD 8 GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = 1, ntasks = 1 ; int ntasks_max = nthreads_max * NTASKS_PER_THREAD ; #define GB_GET_NTHREADS_AND_NTASKS(work) \ { \ nthreads = GB_nthreads (work, chunk, nthreads_max) ; \ ntasks = (nthreads == 1) ? 1 : (NTASKS_PER_THREAD * nthreads) ; \ ntasks = GB_IMIN (ntasks, work) ; \ ntasks = GB_IMAX (ntasks, 1) ; \ } //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_WERK_DECLARE (Count, int64_t) ; GB_WERK_PUSH (Count, ntasks_max+1, int64_t) ; if (Count == NULL) { // out of memory return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // compute Cnvec and determine the format of Ch //-------------------------------------------------------------------------- // Ch is an explicit or implicit array of size Cnvec <= nJ. jC = Ch [kC] // if C(:,jC) is the (kC)th vector of C. If NULL, then C is standard, and // jC == kC. jC is in the range 0 to nJ-1. int64_t *restrict Ch = NULL ; size_t Ch_size = 0 ; int64_t *restrict Ap_start = NULL ; size_t Ap_start_size = 0 ; int64_t *restrict Ap_end = NULL ; size_t Ap_end_size = 0 ; int64_t Cnvec = 0 ; int64_t jbegin = Jcolon [GxB_BEGIN] ; int64_t jinc = Jcolon [GxB_INC ] ; if (C_empty) { //---------------------------------------------------------------------- // C is an empty hypersparse matrix //---------------------------------------------------------------------- ; } else if (!A_is_hyper) { //---------------------------------------------------------------------- // both C and A are standard matrices //---------------------------------------------------------------------- Cnvec = nJ ; GB_GET_NTHREADS_AND_NTASKS (nJ) ; } else if (Jkind == GB_ALL || Jkind == GB_RANGE) { //---------------------------------------------------------------------- // J is ":" or jbegin:jend //---------------------------------------------------------------------- // Ch is a shifted copy of the trimmed Ah, of length Cnvec = anvec. // so kA = kC, and jC = Ch [kC] = jA - jmin. Ap has also been trimmed. Cnvec = anvec ; ASSERT (Cnvec <= nJ) ; GB_GET_NTHREADS_AND_NTASKS (anvec) ; } else if (Jkind == GB_STRIDE && anvec < nJ * 64) { //---------------------------------------------------------------------- // J is jbegin:jinc:jend, but J is large //---------------------------------------------------------------------- // The case for Jkind == GB_STRIDE can be done by either this method, // or the one below. This takes O(anvec) time, and the one below // takes O(nj*log2(anvec)), so use this method if anvec < nj * 64. // Ch is a list of length Cnvec, where Cnvec is the length of // the intersection of Ah and jbegin:jinc:jend. // count the length of Ch Cnvec = 0 ; GB_GET_NTHREADS_AND_NTASKS (anvec) ; // scan all of Ah and check each entry if it appears in J int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t kA_start, kA_end, my_Cnvec = 0 ; GB_PARTITION (kA_start, kA_end, anvec, (jinc > 0) ? tid : (ntasks-tid-1), ntasks) ; for (int64_t kA = kA_start ; kA < kA_end ; kA++) { int64_t jA = Ah [kA] ; if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon)) { my_Cnvec++ ; } } Count [tid] = my_Cnvec ; } GB_cumsum (Count, ntasks, NULL, 1, NULL) ; Cnvec = Count [ntasks] ; } else // Jkind == GB_LIST or GB_STRIDE { //---------------------------------------------------------------------- // J is an explicit list, or jbegin:jinc:end //---------------------------------------------------------------------- // Ch is an explicit list: the intersection of Ah and J // count the length of Ch Cnvec = 0 ; GB_GET_NTHREADS_AND_NTASKS (nJ) ; // scan all of J and check each entry if it appears in Ah int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jC_start, jC_end, my_Cnvec = 0 ; GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ; for (int64_t jC = jC_start ; jC < jC_end ; jC++) { int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ; bool found ; int64_t kA = 0 ; int64_t kright = anvec-1 ; GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ; if (found) my_Cnvec++ ; } Count [tid] = my_Cnvec ; } GB_cumsum (Count, ntasks, NULL, 1, NULL) ; Cnvec = Count [ntasks] ; } //-------------------------------------------------------------------------- // allocate Ch, Ap_start, and Ap_end //-------------------------------------------------------------------------- C_empty = C_empty || (Cnvec == 0) ; // C is hypersparse if A is hypersparse, or if C is empty bool C_is_hyper = A_is_hyper || C_empty ; if (C_is_hyper) { Ch = GB_MALLOC (Cnvec, int64_t, &Ch_size) ; if (Ch == NULL) { GB_FREE_WORKSPACE ; return (GrB_OUT_OF_MEMORY) ; } } if (Cnvec > 0) { Ap_start = GB_MALLOC_WORK (Cnvec, int64_t, &Ap_start_size) ; Ap_end = GB_MALLOC_WORK (Cnvec, int64_t, &Ap_end_size) ; if (Ap_start == NULL || Ap_end == NULL) { // out of memory GB_FREE_WORKSPACE ; GB_FREE (&Ch, Ch_size) ; GB_FREE_WORK (&Ap_start, Ap_start_size) ; GB_FREE_WORK (&Ap_end, Ap_end_size) ; return (GrB_OUT_OF_MEMORY) ; } } //-------------------------------------------------------------------------- // create Ch, Ap_start, and Ap_end //-------------------------------------------------------------------------- // For the (kC)th vector of C, which corresponds to the (kA)th vector of A, // pA = Ap_start [kC] and pA_end = Ap_end [kC] are pointers to the range // of entries in A(imin:imax,kA). if (C_empty) { //---------------------------------------------------------------------- // C is an empty hypersparse matrix //---------------------------------------------------------------------- ; } else if (!A_is_hyper) { //---------------------------------------------------------------------- // both C and A are standard matrices //---------------------------------------------------------------------- int64_t jC ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (jC = 0 ; jC < nJ ; jC++) { int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ; GB_find_Ap_start_end (jA, Ap, Ai, avlen, imin, imax, jC, nzombies, Ap_start, Ap_end) ; } } else if (Jkind == GB_ALL || Jkind == GB_RANGE) { //---------------------------------------------------------------------- // J is ":" or jbegin:jend //---------------------------------------------------------------------- // C and A are both hypersparse. Ch is a shifted copy of the trimmed // Ah, of length Cnvec = anvec. so kA = kC. Ap has also been trimmed. int64_t kC ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (kC = 0 ; kC < Cnvec ; kC++) { int64_t kA = kC ; int64_t jA = Ah [kA] ; int64_t jC = jA - jmin ; Ch [kC] = jC ; GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax, kC, nzombies, Ap_start, Ap_end) ; } } else if (Jkind == GB_STRIDE && anvec < nJ * 64) { //---------------------------------------------------------------------- // J is jbegin:jinc:jend where jinc may be positive or negative //---------------------------------------------------------------------- // C and A are both hypersparse. Ch is constructed by scanning all // vectors in Ah [0..anvec-1] and checking if they appear in the // jbegin:jinc:jend sequence. if (jinc > 0) { int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t kA_start, kA_end ; GB_PARTITION (kA_start, kA_end, anvec, tid, ntasks) ; int64_t kC = Count [tid] ; for (int64_t kA = kA_start ; kA < kA_end ; kA++) { int64_t jA = Ah [kA] ; if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon)) { int64_t jC = (jA - jbegin) / jinc ; Ch [kC] = jC ; GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax, kC, nzombies, Ap_start, Ap_end) ; kC++ ; } } } } else { int tid; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t kA_start, kA_end ; GB_PARTITION (kA_start, kA_end, anvec, ntasks-tid-1, ntasks) ; int64_t kC = Count [tid] ; for (int64_t kA = kA_end-1 ; kA >= kA_start ; kA--) { int64_t jA = Ah [kA] ; if (GB_ij_is_in_list (J, nJ, jA, GB_STRIDE, Jcolon)) { int64_t jC = (jA - jbegin) / jinc ; Ch [kC] = jC ; GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax, kC, nzombies, Ap_start, Ap_end) ; kC++ ; } } } } } else // Jkind == GB_LIST or GB_STRIDE { //---------------------------------------------------------------------- // J is an explicit list, or jbegin:jinc:jend //---------------------------------------------------------------------- // C and A are both hypersparse. Ch is constructed by scanning the // list J, or the entire jbegin:jinc:jend sequence. Each vector is // then found in Ah, via binary search. int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t jC_start, jC_end ; GB_PARTITION (jC_start, jC_end, nJ, tid, ntasks) ; int64_t kC = Count [tid] ; for (int64_t jC = jC_start ; jC < jC_end ; jC++) { int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ; bool found ; int64_t kA = 0 ; int64_t kright = anvec-1 ; GB_BINARY_SEARCH (jA, Ah, kA, kright, found) ; if (found) { ASSERT (jA == Ah [kA]) ; Ch [kC] = jC ; GB_find_Ap_start_end (kA, Ap, Ai, avlen, imin, imax, kC, nzombies, Ap_start, Ap_end) ; kC++ ; } } } } //-------------------------------------------------------------------------- // check result //-------------------------------------------------------------------------- #ifdef GB_DEBUG for (int64_t kC = 0 ; kC < Cnvec ; kC++) { // jC is the (kC)th vector of C = A(I,J) int64_t jC = GBH (Ch, kC) ; int64_t jA = GB_ijlist (J, jC, Jkind, Jcolon) ; // jA is the corresponding (kA)th vector of A. int64_t kA = 0 ; int64_t pright = A->nvec - 1 ; int64_t pA_start_all, pA_end_all ; bool found = GB_lookup (A->h != NULL, A->h, A->p, A->vlen, &kA, pright, jA, &pA_start_all, &pA_end_all) ; if (found && A->h != NULL) { ASSERT (jA == A->h [kA]) ; } int64_t pA = Ap_start [kC] ; int64_t pA_end = Ap_end [kC] ; int64_t ajnz = pA_end - pA ; if (ajnz == avlen) { // A(:,kA) is dense; Ai [pA:pA_end-1] is the entire vector. // C(:,kC) will have exactly nI entries. ASSERT (pA == pA_start_all) ; ASSERT (pA_end == pA_end_all ) ; ; } else if (ajnz > 0) { // A(imin:imax,kA) has at least one entry, in Ai [pA:pA_end-1] ASSERT (imin <= GB_Ai (pA)) ; ASSERT (GB_Ai (pA_end-1) <= imax) ; ASSERT (pA_start_all <= pA && pA < pA_end && pA_end <= pA_end_all) ; } else { // A(imin:imax,kA) and C(:,kC) are empty ; } } #endif //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; (*p_Ch ) = Ch ; (*p_Ch_size) = Ch_size ; (*p_Ap_start ) = Ap_start ; (*p_Ap_start_size) = Ap_start_size ; (*p_Ap_end ) = Ap_end ; (*p_Ap_end_size) = Ap_end_size ; (*p_Cnvec ) = Cnvec ; (*p_need_qsort) = need_qsort ; (*p_Ikind ) = Ikind ; (*p_nI ) = nI ; (*p_nJ ) = nJ ; return (GrB_SUCCESS) ; }
Compiler.c
// this is autogenerated file, do not edit it. #include "ficus/ficus.h" struct _fx_Nt6option1N10Ast__typ_t_data_t; static void _fx_free_Nt6option1N10Ast__typ_t(struct _fx_Nt6option1N10Ast__typ_t_data_t** dst); struct _fx_Nt6option1N10Ast__exp_t_data_t; static void _fx_free_Nt6option1N10Ast__exp_t(struct _fx_Nt6option1N10Ast__exp_t_data_t** dst); struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t; static void _fx_free_Nt9Dynvec__t1N14Ast__id_info_t(struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t** dst); struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t; static void _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t** dst); struct _fx_N10Ast__typ_t_data_t; static void _fx_free_N10Ast__typ_t(struct _fx_N10Ast__typ_t_data_t** dst); struct _fx_N13Ast__binary_t_data_t; static void _fx_free_N13Ast__binary_t(struct _fx_N13Ast__binary_t_data_t** dst); struct _fx_N10Ast__exp_t_data_t; static void _fx_free_N10Ast__exp_t(struct _fx_N10Ast__exp_t_data_t** dst); struct _fx_N10Ast__pat_t_data_t; static void _fx_free_N10Ast__pat_t(struct _fx_N10Ast__pat_t_data_t** dst); struct _fx_N16Ast__env_entry_t_data_t; static void _fx_free_N16Ast__env_entry_t(struct _fx_N16Ast__env_entry_t_data_t** dst); struct _fx_N16Ast__defmodule_t_data_t; static void _fx_free_N16Ast__defmodule_t(struct _fx_N16Ast__defmodule_t_data_t** dst); struct _fx_N14K_form__ktyp_t_data_t; static void _fx_free_N14K_form__ktyp_t(struct _fx_N14K_form__ktyp_t_data_t** dst); struct _fx_N14K_form__kexp_t_data_t; static void _fx_free_N14K_form__kexp_t(struct _fx_N14K_form__kexp_t_data_t** dst); struct _fx_N14C_form__ctyp_t_data_t; static void _fx_free_N14C_form__ctyp_t(struct _fx_N14C_form__ctyp_t_data_t** dst); struct _fx_N14C_form__cexp_t_data_t; static void _fx_free_N14C_form__cexp_t(struct _fx_N14C_form__cexp_t_data_t** dst); struct _fx_N15C_form__cstmt_t_data_t; static void _fx_free_N15C_form__cstmt_t(struct _fx_N15C_form__cstmt_t_data_t** dst); typedef struct _fx_Nt6option1N10Ast__typ_t_data_t { int_ rc; union { struct _fx_N10Ast__typ_t_data_t* Some; } u; } _fx_Nt6option1N10Ast__typ_t_data_t, *_fx_Nt6option1N10Ast__typ_t; typedef struct _fx_LS_data_t { int_ rc; struct _fx_LS_data_t* tl; fx_str_t hd; } _fx_LS_data_t, *_fx_LS; typedef struct _fx_FPS1B { int (*fp)(bool, fx_str_t*, void*); fx_fcv_t* fcv; } _fx_FPS1B; typedef struct _fx_N17Options__optval_t { int tag; union { bool OptBool; int_ OptInt; fx_str_t OptString; } u; } _fx_N17Options__optval_t; typedef struct _fx_T2SN17Options__optval_t { fx_str_t t0; struct _fx_N17Options__optval_t t1; } _fx_T2SN17Options__optval_t; typedef struct _fx_LT2SN17Options__optval_t_data_t { int_ rc; struct _fx_LT2SN17Options__optval_t_data_t* tl; struct _fx_T2SN17Options__optval_t hd; } _fx_LT2SN17Options__optval_t_data_t, *_fx_LT2SN17Options__optval_t; typedef struct _fx_R18Options__options_t { struct _fx_LS_data_t* app_args; fx_str_t app_filename; bool arch64; bool force_rebuild; fx_str_t build_dir; fx_str_t build_rootdir; fx_str_t cflags; fx_str_t clibs; bool compile_by_cpp; fx_str_t filename; bool gen_c; struct _fx_LS_data_t* include_path; bool debug; struct _fx_LT2SN17Options__optval_t_data_t* defines; int_ optim_iters; int_ inline_thresh; bool enable_openmp; bool relax; bool use_preamble; bool make_app; int_ optimize_level; fx_str_t output_name; bool print_ast0; bool print_ast; bool print_k0; bool print_k; bool print_tokens; bool run_app; bool verbose; bool W_unused; } _fx_R18Options__options_t; typedef struct _fx_Ta2i { int_ t0; int_ t1; } _fx_Ta2i; typedef struct _fx_T2Ta2iS { struct _fx_Ta2i t0; fx_str_t t1; } _fx_T2Ta2iS; typedef struct _fx_R9Ast__id_t { int_ m; int_ i; int_ j; } _fx_R9Ast__id_t; typedef struct _fx_R10Ast__loc_t { int_ m_idx; int_ line0; int_ col0; int_ line1; int_ col1; } _fx_R10Ast__loc_t; typedef struct _fx_T2R9Ast__id_ti { struct _fx_R9Ast__id_t t0; int_ t1; } _fx_T2R9Ast__id_ti; typedef struct _fx_T2Bi { bool t0; int_ t1; } _fx_T2Bi; typedef struct _fx_N12Ast__scope_t { int tag; union { int_ ScBlock; struct _fx_T2Bi ScLoop; int_ ScFold; int_ ScArrMap; int_ ScMap; int_ ScTry; struct _fx_R9Ast__id_t ScFun; struct _fx_R9Ast__id_t ScClass; struct _fx_R9Ast__id_t ScInterface; int_ ScModule; } u; } _fx_N12Ast__scope_t; typedef struct _fx_LN12Ast__scope_t_data_t { int_ rc; struct _fx_LN12Ast__scope_t_data_t* tl; struct _fx_N12Ast__scope_t hd; } _fx_LN12Ast__scope_t_data_t, *_fx_LN12Ast__scope_t; typedef struct _fx_R16Ast__val_flags_t { bool val_flag_arg; bool val_flag_mutable; bool val_flag_temp; bool val_flag_tempref; bool val_flag_private; bool val_flag_subarray; bool val_flag_instance; struct _fx_T2R9Ast__id_ti val_flag_method; int_ val_flag_ctor; struct _fx_LN12Ast__scope_t_data_t* val_flag_global; } _fx_R16Ast__val_flags_t; typedef struct _fx_T2R9Ast__id_tN14C_form__ctyp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* tl; struct _fx_T2R9Ast__id_tN14C_form__ctyp_t hd; } _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t, *_fx_LT2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_R23C_form__cdefinterface_t { struct _fx_R9Ast__id_t ci_name; fx_str_t ci_cname; struct _fx_R9Ast__id_t ci_id; struct _fx_R9Ast__id_t ci_vtbl; struct _fx_R9Ast__id_t ci_base; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* ci_all_methods; struct _fx_LN12Ast__scope_t_data_t* ci_scope; struct _fx_R10Ast__loc_t ci_loc; } _fx_R23C_form__cdefinterface_t; typedef struct _fx_rR23C_form__cdefinterface_t_data_t { int_ rc; struct _fx_R23C_form__cdefinterface_t data; } _fx_rR23C_form__cdefinterface_t_data_t, *_fx_rR23C_form__cdefinterface_t; typedef struct _fx_N17Ast__fun_constr_t { int tag; union { int_ CtorVariant; struct _fx_R9Ast__id_t CtorFP; struct _fx_R9Ast__id_t CtorExn; } u; } _fx_N17Ast__fun_constr_t; typedef struct _fx_R16Ast__fun_flags_t { int_ fun_flag_pure; bool fun_flag_ccode; bool fun_flag_have_keywords; bool fun_flag_inline; bool fun_flag_nothrow; bool fun_flag_really_nothrow; bool fun_flag_private; struct _fx_N17Ast__fun_constr_t fun_flag_ctor; struct _fx_R9Ast__id_t fun_flag_method_of; bool fun_flag_uses_fv; bool fun_flag_recursive; bool fun_flag_instance; } _fx_R16Ast__fun_flags_t; typedef struct _fx_LN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LN15C_form__cstmt_t_data_t* tl; struct _fx_N15C_form__cstmt_t_data_t* hd; } _fx_LN15C_form__cstmt_t_data_t, *_fx_LN15C_form__cstmt_t; typedef struct _fx_N19C_form__carg_attr_t { int tag; } _fx_N19C_form__carg_attr_t; typedef struct _fx_LN19C_form__carg_attr_t_data_t { int_ rc; struct _fx_LN19C_form__carg_attr_t_data_t* tl; struct _fx_N19C_form__carg_attr_t hd; } _fx_LN19C_form__carg_attr_t_data_t, *_fx_LN19C_form__carg_attr_t; typedef struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t { struct _fx_R9Ast__id_t t0; struct _fx_N14C_form__ctyp_t_data_t* t1; struct _fx_LN19C_form__carg_attr_t_data_t* t2; } _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t; typedef struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t { int_ rc; struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* tl; struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t hd; } _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t, *_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t; typedef struct _fx_R17C_form__cdeffun_t { struct _fx_R9Ast__id_t cf_name; fx_str_t cf_cname; struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* cf_args; struct _fx_N14C_form__ctyp_t_data_t* cf_rt; struct _fx_LN15C_form__cstmt_t_data_t* cf_body; struct _fx_R16Ast__fun_flags_t cf_flags; struct _fx_LN12Ast__scope_t_data_t* cf_scope; struct _fx_R10Ast__loc_t cf_loc; } _fx_R17C_form__cdeffun_t; typedef struct _fx_rR17C_form__cdeffun_t_data_t { int_ rc; struct _fx_R17C_form__cdeffun_t data; } _fx_rR17C_form__cdeffun_t_data_t, *_fx_rR17C_form__cdeffun_t; typedef struct _fx_Ta2R9Ast__id_t { struct _fx_R9Ast__id_t t0; struct _fx_R9Ast__id_t t1; } _fx_Ta2R9Ast__id_t; typedef struct _fx_LR9Ast__id_t_data_t { int_ rc; struct _fx_LR9Ast__id_t_data_t* tl; struct _fx_R9Ast__id_t hd; } _fx_LR9Ast__id_t_data_t, *_fx_LR9Ast__id_t; typedef struct _fx_R17C_form__ctprops_t { bool ctp_scalar; bool ctp_complex; bool ctp_ptr; bool ctp_pass_by_ref; struct _fx_LR9Ast__id_t_data_t* ctp_make; struct _fx_Ta2R9Ast__id_t ctp_free; struct _fx_Ta2R9Ast__id_t ctp_copy; } _fx_R17C_form__ctprops_t; typedef struct _fx_R17C_form__cdeftyp_t { struct _fx_R9Ast__id_t ct_name; struct _fx_N14C_form__ctyp_t_data_t* ct_typ; fx_str_t ct_cname; struct _fx_R17C_form__ctprops_t ct_props; int_ ct_data_start; struct _fx_R9Ast__id_t ct_enum; struct _fx_LR9Ast__id_t_data_t* ct_ifaces; struct _fx_R9Ast__id_t ct_ifaces_id; struct _fx_LN12Ast__scope_t_data_t* ct_scope; struct _fx_R10Ast__loc_t ct_loc; } _fx_R17C_form__cdeftyp_t; typedef struct _fx_rR17C_form__cdeftyp_t_data_t { int_ rc; struct _fx_R17C_form__cdeftyp_t data; } _fx_rR17C_form__cdeftyp_t_data_t, *_fx_rR17C_form__cdeftyp_t; typedef struct _fx_Nt6option1N14C_form__cexp_t { int tag; union { struct _fx_N14C_form__cexp_t_data_t* Some; } u; } _fx_Nt6option1N14C_form__cexp_t; typedef struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t { struct _fx_R9Ast__id_t t0; struct _fx_Nt6option1N14C_form__cexp_t t1; } _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t; typedef struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* tl; struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t hd; } _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t, *_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t; typedef struct _fx_R18C_form__cdefenum_t { struct _fx_R9Ast__id_t cenum_name; struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* cenum_members; fx_str_t cenum_cname; struct _fx_LN12Ast__scope_t_data_t* cenum_scope; struct _fx_R10Ast__loc_t cenum_loc; } _fx_R18C_form__cdefenum_t; typedef struct _fx_rR18C_form__cdefenum_t_data_t { int_ rc; struct _fx_R18C_form__cdefenum_t data; } _fx_rR18C_form__cdefenum_t_data_t, *_fx_rR18C_form__cdefenum_t; typedef struct _fx_R19C_form__cdefmacro_t { struct _fx_R9Ast__id_t cm_name; fx_str_t cm_cname; struct _fx_LR9Ast__id_t_data_t* cm_args; struct _fx_LN15C_form__cstmt_t_data_t* cm_body; struct _fx_LN12Ast__scope_t_data_t* cm_scope; struct _fx_R10Ast__loc_t cm_loc; } _fx_R19C_form__cdefmacro_t; typedef struct _fx_rR19C_form__cdefmacro_t_data_t { int_ rc; struct _fx_R19C_form__cdefmacro_t data; } _fx_rR19C_form__cdefmacro_t_data_t, *_fx_rR19C_form__cdefmacro_t; typedef struct _fx_T2R9Ast__id_tN14K_form__ktyp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* tl; struct _fx_T2R9Ast__id_tN14K_form__ktyp_t hd; } _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t, *_fx_LT2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_R23K_form__kdefinterface_t { struct _fx_R9Ast__id_t ki_name; struct _fx_R9Ast__id_t ki_base; fx_str_t ki_cname; struct _fx_R9Ast__id_t ki_id; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* ki_all_methods; struct _fx_LN12Ast__scope_t_data_t* ki_scope; struct _fx_R10Ast__loc_t ki_loc; } _fx_R23K_form__kdefinterface_t; typedef struct _fx_rR23K_form__kdefinterface_t_data_t { int_ rc; struct _fx_R23K_form__kdefinterface_t data; } _fx_rR23K_form__kdefinterface_t_data_t, *_fx_rR23K_form__kdefinterface_t; typedef struct _fx_R25K_form__kdefclosureinfo_t { struct _fx_R9Ast__id_t kci_arg; struct _fx_R9Ast__id_t kci_fcv_t; struct _fx_R9Ast__id_t kci_fp_typ; struct _fx_R9Ast__id_t kci_make_fp; struct _fx_R9Ast__id_t kci_wrap_f; } _fx_R25K_form__kdefclosureinfo_t; typedef struct _fx_R17K_form__kdeffun_t { struct _fx_R9Ast__id_t kf_name; fx_str_t kf_cname; struct _fx_LR9Ast__id_t_data_t* kf_params; struct _fx_N14K_form__ktyp_t_data_t* kf_rt; struct _fx_N14K_form__kexp_t_data_t* kf_body; struct _fx_R16Ast__fun_flags_t kf_flags; struct _fx_R25K_form__kdefclosureinfo_t kf_closure; struct _fx_LN12Ast__scope_t_data_t* kf_scope; struct _fx_R10Ast__loc_t kf_loc; } _fx_R17K_form__kdeffun_t; typedef struct _fx_rR17K_form__kdeffun_t_data_t { int_ rc; struct _fx_R17K_form__kdeffun_t data; } _fx_rR17K_form__kdeffun_t_data_t, *_fx_rR17K_form__kdeffun_t; typedef struct _fx_R17K_form__kdefexn_t { struct _fx_R9Ast__id_t ke_name; fx_str_t ke_cname; fx_str_t ke_base_cname; struct _fx_N14K_form__ktyp_t_data_t* ke_typ; bool ke_std; struct _fx_R9Ast__id_t ke_tag; struct _fx_R9Ast__id_t ke_make; struct _fx_LN12Ast__scope_t_data_t* ke_scope; struct _fx_R10Ast__loc_t ke_loc; } _fx_R17K_form__kdefexn_t; typedef struct _fx_rR17K_form__kdefexn_t_data_t { int_ rc; struct _fx_R17K_form__kdefexn_t data; } _fx_rR17K_form__kdefexn_t_data_t, *_fx_rR17K_form__kdefexn_t; typedef struct _fx_R17K_form__ktprops_t { bool ktp_complex; bool ktp_scalar; bool ktp_ptr; bool ktp_pass_by_ref; bool ktp_custom_free; bool ktp_custom_copy; } _fx_R17K_form__ktprops_t; typedef struct _fx_Nt6option1R17K_form__ktprops_t { int tag; union { struct _fx_R17K_form__ktprops_t Some; } u; } _fx_Nt6option1R17K_form__ktprops_t; typedef struct _fx_R16Ast__var_flags_t { int_ var_flag_class_from; bool var_flag_record; bool var_flag_recursive; bool var_flag_have_tag; bool var_flag_have_mutable; bool var_flag_opt; bool var_flag_instance; } _fx_R16Ast__var_flags_t; typedef struct _fx_LN14K_form__ktyp_t_data_t { int_ rc; struct _fx_LN14K_form__ktyp_t_data_t* tl; struct _fx_N14K_form__ktyp_t_data_t* hd; } _fx_LN14K_form__ktyp_t_data_t, *_fx_LN14K_form__ktyp_t; typedef struct _fx_T2R9Ast__id_tLR9Ast__id_t { struct _fx_R9Ast__id_t t0; struct _fx_LR9Ast__id_t_data_t* t1; } _fx_T2R9Ast__id_tLR9Ast__id_t; typedef struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* tl; struct _fx_T2R9Ast__id_tLR9Ast__id_t hd; } _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t, *_fx_LT2R9Ast__id_tLR9Ast__id_t; typedef struct _fx_R21K_form__kdefvariant_t { struct _fx_R9Ast__id_t kvar_name; fx_str_t kvar_cname; struct _fx_R9Ast__id_t kvar_proto; struct _fx_Nt6option1R17K_form__ktprops_t kvar_props; struct _fx_LN14K_form__ktyp_t_data_t* kvar_targs; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* kvar_cases; struct _fx_LR9Ast__id_t_data_t* kvar_ctors; struct _fx_R16Ast__var_flags_t kvar_flags; struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* kvar_ifaces; struct _fx_LN12Ast__scope_t_data_t* kvar_scope; struct _fx_R10Ast__loc_t kvar_loc; } _fx_R21K_form__kdefvariant_t; typedef struct _fx_rR21K_form__kdefvariant_t_data_t { int_ rc; struct _fx_R21K_form__kdefvariant_t data; } _fx_rR21K_form__kdefvariant_t_data_t, *_fx_rR21K_form__kdefvariant_t; typedef struct _fx_R17K_form__kdeftyp_t { struct _fx_R9Ast__id_t kt_name; fx_str_t kt_cname; struct _fx_R9Ast__id_t kt_proto; struct _fx_Nt6option1R17K_form__ktprops_t kt_props; struct _fx_LN14K_form__ktyp_t_data_t* kt_targs; struct _fx_N14K_form__ktyp_t_data_t* kt_typ; struct _fx_LN12Ast__scope_t_data_t* kt_scope; struct _fx_R10Ast__loc_t kt_loc; } _fx_R17K_form__kdeftyp_t; typedef struct _fx_rR17K_form__kdeftyp_t_data_t { int_ rc; struct _fx_R17K_form__kdeftyp_t data; } _fx_rR17K_form__kdeftyp_t_data_t, *_fx_rR17K_form__kdeftyp_t; typedef struct _fx_R25K_form__kdefclosurevars_t { struct _fx_R9Ast__id_t kcv_name; fx_str_t kcv_cname; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* kcv_freevars; struct _fx_LR9Ast__id_t_data_t* kcv_orig_freevars; struct _fx_LN12Ast__scope_t_data_t* kcv_scope; struct _fx_R10Ast__loc_t kcv_loc; } _fx_R25K_form__kdefclosurevars_t; typedef struct _fx_rR25K_form__kdefclosurevars_t_data_t { int_ rc; struct _fx_R25K_form__kdefclosurevars_t data; } _fx_rR25K_form__kdefclosurevars_t_data_t, *_fx_rR25K_form__kdefclosurevars_t; typedef struct _fx_Nt6option1R9Ast__id_t { int tag; union { struct _fx_R9Ast__id_t Some; } u; } _fx_Nt6option1R9Ast__id_t; typedef struct _fx_Nt6option1N10Ast__exp_t_data_t { int_ rc; union { struct _fx_N10Ast__exp_t_data_t* Some; } u; } _fx_Nt6option1N10Ast__exp_t_data_t, *_fx_Nt6option1N10Ast__exp_t; typedef struct _fx_R13Ast__defval_t { struct _fx_R9Ast__id_t dv_name; struct _fx_N10Ast__typ_t_data_t* dv_typ; struct _fx_R16Ast__val_flags_t dv_flags; struct _fx_LN12Ast__scope_t_data_t* dv_scope; struct _fx_R10Ast__loc_t dv_loc; } _fx_R13Ast__defval_t; typedef struct _fx_FPi2R9Ast__id_tR9Ast__id_t { int (*fp)(struct _fx_R9Ast__id_t*, struct _fx_R9Ast__id_t*, int_*, void*); fx_fcv_t* fcv; } _fx_FPi2R9Ast__id_tR9Ast__id_t; typedef struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t { struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* root; struct _fx_FPi2R9Ast__id_tR9Ast__id_t cmp; } _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t; typedef struct _fx_LN10Ast__pat_t_data_t { int_ rc; struct _fx_LN10Ast__pat_t_data_t* tl; struct _fx_N10Ast__pat_t_data_t* hd; } _fx_LN10Ast__pat_t_data_t, *_fx_LN10Ast__pat_t; typedef struct _fx_rLR9Ast__id_t_data_t { int_ rc; struct _fx_LR9Ast__id_t_data_t* data; } _fx_rLR9Ast__id_t_data_t, *_fx_rLR9Ast__id_t; typedef struct _fx_R13Ast__deffun_t { struct _fx_R9Ast__id_t df_name; struct _fx_LR9Ast__id_t_data_t* df_templ_args; struct _fx_LN10Ast__pat_t_data_t* df_args; struct _fx_N10Ast__typ_t_data_t* df_typ; struct _fx_N10Ast__exp_t_data_t* df_body; struct _fx_R16Ast__fun_flags_t df_flags; struct _fx_LN12Ast__scope_t_data_t* df_scope; struct _fx_R10Ast__loc_t df_loc; struct _fx_rLR9Ast__id_t_data_t* df_templ_inst; struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t df_env; } _fx_R13Ast__deffun_t; typedef struct _fx_rR13Ast__deffun_t_data_t { int_ rc; struct _fx_R13Ast__deffun_t data; } _fx_rR13Ast__deffun_t_data_t, *_fx_rR13Ast__deffun_t; typedef struct _fx_R13Ast__defexn_t { struct _fx_R9Ast__id_t dexn_name; struct _fx_N10Ast__typ_t_data_t* dexn_typ; struct _fx_LN12Ast__scope_t_data_t* dexn_scope; struct _fx_R10Ast__loc_t dexn_loc; } _fx_R13Ast__defexn_t; typedef struct _fx_rR13Ast__defexn_t_data_t { int_ rc; struct _fx_R13Ast__defexn_t data; } _fx_rR13Ast__defexn_t_data_t, *_fx_rR13Ast__defexn_t; typedef struct _fx_R13Ast__deftyp_t { struct _fx_R9Ast__id_t dt_name; struct _fx_LR9Ast__id_t_data_t* dt_templ_args; struct _fx_N10Ast__typ_t_data_t* dt_typ; bool dt_finalized; struct _fx_LN12Ast__scope_t_data_t* dt_scope; struct _fx_R10Ast__loc_t dt_loc; } _fx_R13Ast__deftyp_t; typedef struct _fx_rR13Ast__deftyp_t_data_t { int_ rc; struct _fx_R13Ast__deftyp_t data; } _fx_rR13Ast__deftyp_t_data_t, *_fx_rR13Ast__deftyp_t; typedef struct _fx_T2R9Ast__id_tN10Ast__typ_t { struct _fx_R9Ast__id_t t0; struct _fx_N10Ast__typ_t_data_t* t1; } _fx_T2R9Ast__id_tN10Ast__typ_t; typedef struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* tl; struct _fx_T2R9Ast__id_tN10Ast__typ_t hd; } _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t, *_fx_LT2R9Ast__id_tN10Ast__typ_t; typedef struct _fx_LTa2R9Ast__id_t_data_t { int_ rc; struct _fx_LTa2R9Ast__id_t_data_t* tl; struct _fx_Ta2R9Ast__id_t hd; } _fx_LTa2R9Ast__id_t_data_t, *_fx_LTa2R9Ast__id_t; typedef struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t { struct _fx_R9Ast__id_t t0; struct _fx_LTa2R9Ast__id_t_data_t* t1; } _fx_T2R9Ast__id_tLTa2R9Ast__id_t; typedef struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* tl; struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t hd; } _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t, *_fx_LT2R9Ast__id_tLTa2R9Ast__id_t; typedef struct _fx_R17Ast__defvariant_t { struct _fx_R9Ast__id_t dvar_name; struct _fx_LR9Ast__id_t_data_t* dvar_templ_args; struct _fx_N10Ast__typ_t_data_t* dvar_alias; struct _fx_R16Ast__var_flags_t dvar_flags; struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* dvar_cases; struct _fx_LR9Ast__id_t_data_t* dvar_ctors; struct _fx_rLR9Ast__id_t_data_t* dvar_templ_inst; struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* dvar_ifaces; struct _fx_LN12Ast__scope_t_data_t* dvar_scope; struct _fx_R10Ast__loc_t dvar_loc; } _fx_R17Ast__defvariant_t; typedef struct _fx_rR17Ast__defvariant_t_data_t { int_ rc; struct _fx_R17Ast__defvariant_t data; } _fx_rR17Ast__defvariant_t_data_t, *_fx_rR17Ast__defvariant_t; typedef struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t { struct _fx_R9Ast__id_t t0; struct _fx_N10Ast__typ_t_data_t* t1; struct _fx_R16Ast__fun_flags_t t2; } _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t; typedef struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t { int_ rc; struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* tl; struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t hd; } _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t, *_fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t; typedef struct _fx_R19Ast__definterface_t { struct _fx_R9Ast__id_t di_name; struct _fx_R9Ast__id_t di_base; struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* di_new_methods; struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* di_all_methods; struct _fx_LN12Ast__scope_t_data_t* di_scope; struct _fx_R10Ast__loc_t di_loc; } _fx_R19Ast__definterface_t; typedef struct _fx_rR19Ast__definterface_t_data_t { int_ rc; struct _fx_R19Ast__definterface_t data; } _fx_rR19Ast__definterface_t_data_t, *_fx_rR19Ast__definterface_t; typedef struct _fx_N14Ast__id_info_t { int tag; union { struct _fx_R13Ast__defval_t IdDVal; struct _fx_rR13Ast__deffun_t_data_t* IdFun; struct _fx_rR13Ast__defexn_t_data_t* IdExn; struct _fx_rR13Ast__deftyp_t_data_t* IdTyp; struct _fx_rR17Ast__defvariant_t_data_t* IdVariant; struct _fx_rR19Ast__definterface_t_data_t* IdInterface; int_ IdModule; } u; } _fx_N14Ast__id_info_t; typedef struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t { int_ t0; fx_arr_t t1; struct _fx_N14Ast__id_info_t t2; } _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t; typedef struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t { int_ rc; union { struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t t; } u; } _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t, *_fx_Nt9Dynvec__t1N14Ast__id_info_t; typedef struct _fx_N12Map__color_t { int tag; } _fx_N12Map__color_t; typedef struct _fx_LN16Ast__env_entry_t_data_t { int_ rc; struct _fx_LN16Ast__env_entry_t_data_t* tl; struct _fx_N16Ast__env_entry_t_data_t* hd; } _fx_LN16Ast__env_entry_t_data_t, *_fx_LN16Ast__env_entry_t; typedef struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t { struct _fx_N12Map__color_t t0; struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t1; struct _fx_R9Ast__id_t t2; struct _fx_LN16Ast__env_entry_t_data_t* t3; struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t4; } _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t; typedef struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t { int_ rc; union { struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t Node; } u; } _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t, *_fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t; typedef struct _fx_T2R10Ast__loc_tS { struct _fx_R10Ast__loc_t t0; fx_str_t t1; } _fx_T2R10Ast__loc_tS; typedef struct _fx_T2il { int_ t0; int64_t t1; } _fx_T2il; typedef struct _fx_T2iq { int_ t0; uint64_t t1; } _fx_T2iq; typedef struct _fx_T2id { int_ t0; double t1; } _fx_T2id; typedef struct _fx_N10Ast__lit_t { int tag; union { int64_t LitInt; struct _fx_T2il LitSInt; struct _fx_T2iq LitUInt; struct _fx_T2id LitFloat; fx_str_t LitString; char_ LitChar; bool LitBool; } u; } _fx_N10Ast__lit_t; typedef struct _fx_rNt6option1N10Ast__typ_t_data_t { int_ rc; struct _fx_Nt6option1N10Ast__typ_t_data_t* data; } _fx_rNt6option1N10Ast__typ_t_data_t, *_fx_rNt6option1N10Ast__typ_t; typedef struct _fx_LN10Ast__typ_t_data_t { int_ rc; struct _fx_LN10Ast__typ_t_data_t* tl; struct _fx_N10Ast__typ_t_data_t* hd; } _fx_LN10Ast__typ_t_data_t, *_fx_LN10Ast__typ_t; typedef struct _fx_T2LN10Ast__typ_tN10Ast__typ_t { struct _fx_LN10Ast__typ_t_data_t* t0; struct _fx_N10Ast__typ_t_data_t* t1; } _fx_T2LN10Ast__typ_tN10Ast__typ_t; typedef struct _fx_T2iN10Ast__typ_t { int_ t0; struct _fx_N10Ast__typ_t_data_t* t1; } _fx_T2iN10Ast__typ_t; typedef struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t { struct _fx_R16Ast__val_flags_t t0; struct _fx_R9Ast__id_t t1; struct _fx_N10Ast__typ_t_data_t* t2; struct _fx_N10Ast__exp_t_data_t* t3; } _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t; typedef struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t { int_ rc; struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* tl; struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t hd; } _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t, *_fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t; typedef struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB { struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* t0; bool t1; } _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB; typedef struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t { int_ rc; struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB data; } _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t, *_fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB; typedef struct _fx_T2LN10Ast__typ_tR9Ast__id_t { struct _fx_LN10Ast__typ_t_data_t* t0; struct _fx_R9Ast__id_t t1; } _fx_T2LN10Ast__typ_tR9Ast__id_t; typedef struct _fx_N10Ast__typ_t_data_t { int_ rc; int tag; union { struct _fx_rNt6option1N10Ast__typ_t_data_t* TypVar; struct _fx_Nt6option1N10Ast__typ_t_data_t* TypVarTuple; struct _fx_N10Ast__typ_t_data_t* TypVarArray; int_ TypSInt; int_ TypUInt; int_ TypFloat; struct _fx_T2LN10Ast__typ_tN10Ast__typ_t TypFun; struct _fx_N10Ast__typ_t_data_t* TypList; struct _fx_N10Ast__typ_t_data_t* TypVector; struct _fx_LN10Ast__typ_t_data_t* TypTuple; struct _fx_N10Ast__typ_t_data_t* TypRef; struct _fx_T2iN10Ast__typ_t TypArray; struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t* TypRecord; struct _fx_T2LN10Ast__typ_tR9Ast__id_t TypApp; } u; } _fx_N10Ast__typ_t_data_t, *_fx_N10Ast__typ_t; typedef struct _fx_N12Ast__cmpop_t { int tag; } _fx_N12Ast__cmpop_t; typedef struct _fx_N13Ast__binary_t_data_t { int_ rc; int tag; union { struct _fx_N12Ast__cmpop_t OpCmp; struct _fx_N12Ast__cmpop_t OpDotCmp; struct _fx_N13Ast__binary_t_data_t* OpAugBinary; } u; } _fx_N13Ast__binary_t_data_t, *_fx_N13Ast__binary_t; typedef struct _fx_N12Ast__unary_t { int tag; } _fx_N12Ast__unary_t; typedef struct _fx_N13Ast__intrin_t { int tag; union { struct _fx_R9Ast__id_t IntrinMath; } u; } _fx_N13Ast__intrin_t; typedef struct _fx_N15Ast__for_make_t { int tag; } _fx_N15Ast__for_make_t; typedef struct _fx_R16Ast__for_flags_t { bool for_flag_parallel; struct _fx_N15Ast__for_make_t for_flag_make; bool for_flag_unzip; bool for_flag_fold; bool for_flag_nested; } _fx_R16Ast__for_flags_t; typedef struct _fx_N13Ast__border_t { int tag; } _fx_N13Ast__border_t; typedef struct _fx_N18Ast__interpolate_t { int tag; } _fx_N18Ast__interpolate_t; typedef struct _fx_T2BR10Ast__loc_t { bool t0; struct _fx_R10Ast__loc_t t1; } _fx_T2BR10Ast__loc_t; typedef struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t { struct _fx_Nt6option1N10Ast__exp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__typ_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_Nt6option1N10Ast__exp_t_data_t* t0; struct _fx_Nt6option1N10Ast__exp_t_data_t* t1; struct _fx_Nt6option1N10Ast__exp_t_data_t* t2; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3; } _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__lit_t t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N13Ast__binary_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_N10Ast__exp_t_data_t* t2; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3; } _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N12Ast__unary_t t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_LN10Ast__exp_t_data_t { int_ rc; struct _fx_LN10Ast__exp_t_data_t* tl; struct _fx_N10Ast__exp_t_data_t* hd; } _fx_LN10Ast__exp_t_data_t, *_fx_LN10Ast__exp_t; typedef struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N13Ast__intrin_t t0; struct _fx_LN10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN10Ast__exp_t { struct _fx_R9Ast__id_t t0; struct _fx_N10Ast__exp_t_data_t* t1; } _fx_T2R9Ast__id_tN10Ast__exp_t; typedef struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_LN10Ast__exp_t_data_t* t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_LLN10Ast__exp_t_data_t { int_ rc; struct _fx_LLN10Ast__exp_t_data_t* tl; struct _fx_LN10Ast__exp_t_data_t* hd; } _fx_LLN10Ast__exp_t_data_t, *_fx_LLN10Ast__exp_t; typedef struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_LLN10Ast__exp_t_data_t* t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* tl; struct _fx_T2R9Ast__id_tN10Ast__exp_t hd; } _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t, *_fx_LT2R9Ast__id_tN10Ast__exp_t; typedef struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_LN10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N13Ast__border_t t1; struct _fx_N18Ast__interpolate_t t2; struct _fx_LN10Ast__exp_t_data_t* t3; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t4; } _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__exp_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N10Ast__exp_tR10Ast__loc_t; typedef struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_N10Ast__exp_t_data_t* t2; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3; } _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__pat_tN10Ast__exp_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; } _fx_T2N10Ast__pat_tN10Ast__exp_t; typedef struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t { int_ rc; struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* tl; struct _fx_T2N10Ast__pat_tN10Ast__exp_t hd; } _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t, *_fx_LT2N10Ast__pat_tN10Ast__exp_t; typedef struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t { struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0; struct _fx_N10Ast__pat_t_data_t* t1; struct _fx_N10Ast__exp_t_data_t* t2; struct _fx_R16Ast__for_flags_t t3; struct _fx_R10Ast__loc_t t4; } _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t; typedef struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t { struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0; struct _fx_N10Ast__pat_t_data_t* t1; } _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t; typedef struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t { int_ rc; struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* tl; struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t hd; } _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t, *_fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t; typedef struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_R16Ast__for_flags_t t2; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t3; } _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__exp_t_data_t* t0; struct _fx_N10Ast__typ_t_data_t* t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t { fx_str_t t0; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t1; } _fx_T2ST2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t { fx_str_t t0; fx_str_t t1; struct _fx_T2N10Ast__typ_tR10Ast__loc_t t2; } _fx_T3SST2N10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_R16Ast__val_flags_t t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t; typedef struct _fx_T2iR9Ast__id_t { int_ t0; struct _fx_R9Ast__id_t t1; } _fx_T2iR9Ast__id_t; typedef struct _fx_LT2iR9Ast__id_t_data_t { int_ rc; struct _fx_LT2iR9Ast__id_t_data_t* tl; struct _fx_T2iR9Ast__id_t hd; } _fx_LT2iR9Ast__id_t_data_t, *_fx_LT2iR9Ast__id_t; typedef struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t { struct _fx_LT2iR9Ast__id_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LT2iR9Ast__id_tR10Ast__loc_t; typedef struct _fx_T3iLR9Ast__id_tR10Ast__loc_t { int_ t0; struct _fx_LR9Ast__id_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3iLR9Ast__id_tR10Ast__loc_t; typedef struct _fx_T2LSR10Ast__loc_t { struct _fx_LS_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LSR10Ast__loc_t; typedef struct _fx_N10Ast__exp_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t ExpNop; struct _fx_T2BR10Ast__loc_t ExpBreak; struct _fx_R10Ast__loc_t ExpContinue; struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t ExpReturn; struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpRange; struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t ExpLit; struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t ExpIdent; struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpBinary; struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpUnary; struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpIntrin; struct _fx_T2R9Ast__id_tN10Ast__exp_t ExpSync; struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpSeq; struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkTuple; struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkArray; struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkVector; struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMkRecord; struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpUpdateRecord; struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpCall; struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpAt; struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t ExpAssign; struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMem; struct _fx_T2N10Ast__exp_tR10Ast__loc_t ExpThrow; struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpIf; struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t ExpWhile; struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t ExpDoWhile; struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t ExpFor; struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t ExpMap; struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpTryCatch; struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t ExpMatch; struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t ExpCast; struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t ExpTyped; struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t ExpCCode; struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t ExpData; struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t DefVal; struct _fx_rR13Ast__deffun_t_data_t* DefFun; struct _fx_rR13Ast__defexn_t_data_t* DefExn; struct _fx_rR13Ast__deftyp_t_data_t* DefTyp; struct _fx_rR17Ast__defvariant_t_data_t* DefVariant; struct _fx_rR19Ast__definterface_t_data_t* DefInterface; struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t DirImport; struct _fx_T3iLR9Ast__id_tR10Ast__loc_t DirImportFrom; struct _fx_T2LSR10Ast__loc_t DirPragma; } u; } _fx_N10Ast__exp_t_data_t, *_fx_N10Ast__exp_t; typedef struct _fx_T2N10Ast__lit_tR10Ast__loc_t { struct _fx_N10Ast__lit_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N10Ast__lit_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2R9Ast__id_tR10Ast__loc_t; typedef struct _fx_T2LN10Ast__pat_tR10Ast__loc_t { struct _fx_LN10Ast__pat_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LN10Ast__pat_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_LN10Ast__pat_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN10Ast__pat_t { struct _fx_R9Ast__id_t t0; struct _fx_N10Ast__pat_t_data_t* t1; } _fx_T2R9Ast__id_tN10Ast__pat_t; typedef struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* tl; struct _fx_T2R9Ast__id_tN10Ast__pat_t hd; } _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t, *_fx_LT2R9Ast__id_tN10Ast__pat_t; typedef struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t { struct _fx_Nt6option1R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__pat_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__typ_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t; typedef struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_N10Ast__exp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t; typedef struct _fx_T2N10Ast__pat_tR10Ast__loc_t { struct _fx_N10Ast__pat_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N10Ast__pat_tR10Ast__loc_t; typedef struct _fx_N10Ast__pat_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t PatAny; struct _fx_T2N10Ast__lit_tR10Ast__loc_t PatLit; struct _fx_T2R9Ast__id_tR10Ast__loc_t PatIdent; struct _fx_T2LN10Ast__pat_tR10Ast__loc_t PatTuple; struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t PatVariant; struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t PatRecord; struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t PatCons; struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t PatAs; struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t PatTyped; struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t PatWhen; struct _fx_T2LN10Ast__pat_tR10Ast__loc_t PatAlt; struct _fx_T2N10Ast__pat_tR10Ast__loc_t PatRef; } u; } _fx_N10Ast__pat_t_data_t, *_fx_N10Ast__pat_t; typedef struct _fx_N16Ast__env_entry_t_data_t { int_ rc; int tag; union { struct _fx_R9Ast__id_t EnvId; struct _fx_N10Ast__typ_t_data_t* EnvTyp; } u; } _fx_N16Ast__env_entry_t_data_t, *_fx_N16Ast__env_entry_t; typedef struct _fx_T2SR10Ast__loc_t { fx_str_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2SR10Ast__loc_t; typedef struct _fx_LT2SR10Ast__loc_t_data_t { int_ rc; struct _fx_LT2SR10Ast__loc_t_data_t* tl; struct _fx_T2SR10Ast__loc_t hd; } _fx_LT2SR10Ast__loc_t_data_t, *_fx_LT2SR10Ast__loc_t; typedef struct _fx_Li_data_t { int_ rc; struct _fx_Li_data_t* tl; int_ hd; } _fx_Li_data_t, *_fx_Li; typedef struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t { struct _fx_R9Ast__id_t t0; fx_str_t t1; int_ t2; bool t3; struct _fx_LN10Ast__exp_t_data_t* t4; struct _fx_Li_data_t* t5; struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t t6; bool t7; int_ t8; struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t* t9; } _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t; typedef struct _fx_N16Ast__defmodule_t_data_t { int_ rc; union { struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t defmodule_t; } u; } _fx_N16Ast__defmodule_t_data_t, *_fx_N16Ast__defmodule_t; typedef struct _fx_LE_data_t { int_ rc; struct _fx_LE_data_t* tl; fx_exn_t hd; } _fx_LE_data_t, *_fx_LE; typedef struct _fx_T2BS { bool t0; fx_str_t t1; } _fx_T2BS; typedef struct _fx_N14Lexer__token_t { int tag; union { struct _fx_N10Ast__lit_t LITERAL; struct _fx_T2BS IDENT; fx_str_t TYVAR; fx_str_t DATA; bool FOR; bool IMPORT; bool REF; bool RETURN; bool WHILE; bool LPAREN; bool LSQUARE; bool BACKSLASH; bool MINUS; bool PLUS; bool STAR; bool DOT_PLUS; bool DOT_MINUS; struct _fx_N13Ast__binary_t_data_t* AUG_BINOP; struct _fx_N12Ast__cmpop_t CMP; struct _fx_N12Ast__cmpop_t DOT_CMP; fx_str_t RESERVED; } u; } _fx_N14Lexer__token_t; typedef struct _fx_LN14Lexer__token_t_data_t { int_ rc; struct _fx_LN14Lexer__token_t_data_t* tl; struct _fx_N14Lexer__token_t hd; } _fx_LN14Lexer__token_t_data_t, *_fx_LN14Lexer__token_t; typedef struct _fx_N14K_form__klit_t { int tag; union { int64_t KLitInt; struct _fx_T2il KLitSInt; struct _fx_T2iq KLitUInt; struct _fx_T2id KLitFloat; fx_str_t KLitString; char_ KLitChar; bool KLitBool; struct _fx_N14K_form__ktyp_t_data_t* KLitNil; } u; } _fx_N14K_form__klit_t; typedef struct _fx_N14K_form__atom_t { int tag; union { struct _fx_R9Ast__id_t AtomId; struct _fx_N14K_form__klit_t AtomLit; } u; } _fx_N14K_form__atom_t; typedef struct _fx_Nt6option1N14K_form__atom_t { int tag; union { struct _fx_N14K_form__atom_t Some; } u; } _fx_Nt6option1N14K_form__atom_t; typedef struct _fx_LN14K_form__kexp_t_data_t { int_ rc; struct _fx_LN14K_form__kexp_t_data_t* tl; struct _fx_N14K_form__kexp_t_data_t* hd; } _fx_LN14K_form__kexp_t_data_t, *_fx_LN14K_form__kexp_t; typedef struct _fx_T2BN14K_form__atom_t { bool t0; struct _fx_N14K_form__atom_t t1; } _fx_T2BN14K_form__atom_t; typedef struct _fx_LT2BN14K_form__atom_t_data_t { int_ rc; struct _fx_LT2BN14K_form__atom_t_data_t* tl; struct _fx_T2BN14K_form__atom_t hd; } _fx_LT2BN14K_form__atom_t_data_t, *_fx_LT2BN14K_form__atom_t; typedef struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t { struct _fx_LN14K_form__ktyp_t_data_t* t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t; typedef struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t { struct _fx_R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* t1; } _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t; typedef struct _fx_T2iN14K_form__ktyp_t { int_ t0; struct _fx_N14K_form__ktyp_t_data_t* t1; } _fx_T2iN14K_form__ktyp_t; typedef struct _fx_N14K_form__ktyp_t_data_t { int_ rc; int tag; union { int_ KTypSInt; int_ KTypUInt; int_ KTypFloat; struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t KTypFun; struct _fx_LN14K_form__ktyp_t_data_t* KTypTuple; struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t KTypRecord; struct _fx_R9Ast__id_t KTypName; struct _fx_T2iN14K_form__ktyp_t KTypArray; struct _fx_N14K_form__ktyp_t_data_t* KTypVector; struct _fx_N14K_form__ktyp_t_data_t* KTypList; struct _fx_N14K_form__ktyp_t_data_t* KTypRef; } u; } _fx_N14K_form__ktyp_t_data_t, *_fx_N14K_form__ktyp_t; typedef struct _fx_Ta3N14K_form__atom_t { struct _fx_N14K_form__atom_t t0; struct _fx_N14K_form__atom_t t1; struct _fx_N14K_form__atom_t t2; } _fx_Ta3N14K_form__atom_t; typedef struct _fx_N13K_form__dom_t { int tag; union { struct _fx_N14K_form__atom_t DomainElem; struct _fx_N14K_form__atom_t DomainFast; struct _fx_Ta3N14K_form__atom_t DomainRange; } u; } _fx_N13K_form__dom_t; typedef struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t { struct _fx_Nt6option1N14K_form__atom_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t; typedef struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__ktyp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__atom_t t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N13Ast__binary_t_data_t* t0; struct _fx_N14K_form__atom_t t1; struct _fx_N14K_form__atom_t t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N12Ast__unary_t t0; struct _fx_N14K_form__atom_t t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_LN14K_form__atom_t_data_t { int_ rc; struct _fx_LN14K_form__atom_t_data_t* tl; struct _fx_N14K_form__atom_t hd; } _fx_LN14K_form__atom_t_data_t, *_fx_LN14K_form__atom_t; typedef struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N13Ast__intrin_t t0; struct _fx_LN14K_form__atom_t_data_t* t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN14K_form__kexp_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__kexp_t_data_t* t1; } _fx_T2R9Ast__id_tN14K_form__kexp_t; typedef struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LN14K_form__kexp_t_data_t* t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__kexp_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_N14K_form__kexp_t_data_t* t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_LN14K_form__atom_t_data_t* t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; int_ t1; struct _fx_LN14K_form__atom_t_data_t* t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LN14K_form__atom_t_data_t* t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_R9Ast__id_t t1; struct _fx_LN14K_form__atom_t_data_t* t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_LLT2BN14K_form__atom_t_data_t { int_ rc; struct _fx_LLT2BN14K_form__atom_t_data_t* tl; struct _fx_LT2BN14K_form__atom_t_data_t* hd; } _fx_LLT2BN14K_form__atom_t_data_t, *_fx_LLT2BN14K_form__atom_t; typedef struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { bool t0; struct _fx_LLT2BN14K_form__atom_t_data_t* t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LT2BN14K_form__atom_t_data_t* t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_LN13K_form__dom_t_data_t { int_ rc; struct _fx_LN13K_form__dom_t_data_t* tl; struct _fx_N13K_form__dom_t hd; } _fx_LN13K_form__dom_t_data_t, *_fx_LN13K_form__dom_t; typedef struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__atom_t t0; struct _fx_N13Ast__border_t t1; struct _fx_N18Ast__interpolate_t t2; struct _fx_LN13K_form__dom_t_data_t* t3; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t4; } _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; int_ t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__atom_t t1; struct _fx_R10Ast__loc_t t2; } _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t; typedef struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t { struct _fx_LN14K_form__kexp_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; } _fx_T2LN14K_form__kexp_tN14K_form__kexp_t; typedef struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t { int_ rc; struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* tl; struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t hd; } _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t, *_fx_LT2LN14K_form__kexp_tN14K_form__kexp_t; typedef struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__kexp_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t2; } _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tBR10Ast__loc_t { struct _fx_R9Ast__id_t t0; bool t1; struct _fx_R10Ast__loc_t t2; } _fx_T3R9Ast__id_tBR10Ast__loc_t; typedef struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t { struct _fx_N14K_form__atom_t t0; struct _fx_N14K_form__ktyp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN13K_form__dom_t { struct _fx_R9Ast__id_t t0; struct _fx_N13K_form__dom_t t1; } _fx_T2R9Ast__id_tN13K_form__dom_t; typedef struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t { int_ rc; struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* tl; struct _fx_T2R9Ast__id_tN13K_form__dom_t hd; } _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t, *_fx_LT2R9Ast__id_tN13K_form__dom_t; typedef struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t { struct _fx_N14K_form__kexp_t_data_t* t0; struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t1; struct _fx_LR9Ast__id_t_data_t* t2; } _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t; typedef struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t { int_ rc; struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* tl; struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t hd; } _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t, *_fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t; typedef struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t { struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_R16Ast__for_flags_t t2; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t3; } _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t { struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t0; struct _fx_LR9Ast__id_t_data_t* t1; struct _fx_N14K_form__kexp_t_data_t* t2; struct _fx_R16Ast__for_flags_t t3; struct _fx_R10Ast__loc_t t4; } _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t; typedef struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t { struct _fx_N14K_form__kexp_t_data_t* t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t; typedef struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t { fx_str_t t0; struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t t1; } _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t; typedef struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_N14K_form__kexp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t; typedef struct _fx_N14K_form__kexp_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t KExpNop; struct _fx_R10Ast__loc_t KExpBreak; struct _fx_R10Ast__loc_t KExpContinue; struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t KExpReturn; struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpAtom; struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpBinary; struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpUnary; struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpIntrin; struct _fx_T2R9Ast__id_tN14K_form__kexp_t KExpSync; struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpSeq; struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpIf; struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpCall; struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpICall; struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkTuple; struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkRecord; struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkClosure; struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkArray; struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMkVector; struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t KExpAt; struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t KExpMem; struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t KExpAssign; struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMatch; struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t KExpTryCatch; struct _fx_T3R9Ast__id_tBR10Ast__loc_t KExpThrow; struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t KExpCast; struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t KExpMap; struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t KExpFor; struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t KExpWhile; struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t KExpDoWhile; struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t KExpCCode; struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t KDefVal; struct _fx_rR17K_form__kdeffun_t_data_t* KDefFun; struct _fx_rR17K_form__kdefexn_t_data_t* KDefExn; struct _fx_rR21K_form__kdefvariant_t_data_t* KDefVariant; struct _fx_rR23K_form__kdefinterface_t_data_t* KDefInterface; struct _fx_rR17K_form__kdeftyp_t_data_t* KDefTyp; struct _fx_rR25K_form__kdefclosurevars_t_data_t* KDefClosureVars; } u; } _fx_N14K_form__kexp_t_data_t, *_fx_N14K_form__kexp_t; typedef struct _fx_R14Ast__pragmas_t { bool pragma_cpp; struct _fx_LT2SR10Ast__loc_t_data_t* pragma_clibs; } _fx_R14Ast__pragmas_t; typedef struct _fx_R17K_form__kmodule_t { struct _fx_R9Ast__id_t km_name; int_ km_idx; int_ km_toposort_idx; fx_str_t km_cname; struct _fx_LN14K_form__kexp_t_data_t* km_top; struct _fx_Li_data_t* km_deps; bool km_skip; bool km_main; struct _fx_R14Ast__pragmas_t km_pragmas; } _fx_R17K_form__kmodule_t; typedef struct _fx_LR17K_form__kmodule_t_data_t { int_ rc; struct _fx_LR17K_form__kmodule_t_data_t* tl; struct _fx_R17K_form__kmodule_t hd; } _fx_LR17K_form__kmodule_t_data_t, *_fx_LR17K_form__kmodule_t; typedef struct _fx_Nt6option1N14C_form__ctyp_t { int tag; union { struct _fx_N14C_form__ctyp_t_data_t* Some; } u; } _fx_Nt6option1N14C_form__ctyp_t; typedef struct _fx_N17C_form__cbinary_t { int tag; union { struct _fx_N12Ast__cmpop_t COpCmp; } u; } _fx_N17C_form__cbinary_t; typedef struct _fx_N16C_form__cunary_t { int tag; } _fx_N16C_form__cunary_t; typedef struct _fx_N19C_form__ctyp_attr_t { int tag; } _fx_N19C_form__ctyp_attr_t; typedef struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t { struct _fx_Nt6option1R9Ast__id_t t0; struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* t1; } _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t; typedef struct _fx_LN14C_form__ctyp_t_data_t { int_ rc; struct _fx_LN14C_form__ctyp_t_data_t* tl; struct _fx_N14C_form__ctyp_t_data_t* hd; } _fx_LN14C_form__ctyp_t_data_t, *_fx_LN14C_form__ctyp_t; typedef struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t { struct _fx_LN14C_form__ctyp_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t; typedef struct _fx_LN19C_form__ctyp_attr_t_data_t { int_ rc; struct _fx_LN19C_form__ctyp_attr_t_data_t* tl; struct _fx_N19C_form__ctyp_attr_t hd; } _fx_LN19C_form__ctyp_attr_t_data_t, *_fx_LN19C_form__ctyp_attr_t; typedef struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t { struct _fx_LN19C_form__ctyp_attr_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t; typedef struct _fx_T2iN14C_form__ctyp_t { int_ t0; struct _fx_N14C_form__ctyp_t_data_t* t1; } _fx_T2iN14C_form__ctyp_t; typedef struct _fx_N14C_form__ctyp_t_data_t { int_ rc; int tag; union { int_ CTypSInt; int_ CTypUInt; int_ CTypFloat; struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t CTypStruct; struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t CTypUnion; struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t CTypFunRawPtr; struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t CTypRawPtr; struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t CTypRawArray; struct _fx_T2iN14C_form__ctyp_t CTypArray; struct _fx_N14C_form__ctyp_t_data_t* CTypVector; struct _fx_R9Ast__id_t CTypName; } u; } _fx_N14C_form__ctyp_t_data_t, *_fx_N14C_form__ctyp_t; typedef struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__ctyp_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_R9Ast__id_t t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14K_form__klit_t t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N17C_form__cbinary_t t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_N14C_form__cexp_t_data_t* t2; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t3; } _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N16C_form__cunary_t t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N14C_form__ctyp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_N14C_form__cexp_t_data_t* t2; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t3; } _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_LN14C_form__cexp_t_data_t { int_ rc; struct _fx_LN14C_form__cexp_t_data_t* tl; struct _fx_N14C_form__cexp_t_data_t* hd; } _fx_LN14C_form__cexp_t_data_t, *_fx_LN14C_form__cexp_t; typedef struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LN14C_form__cexp_t_data_t* t1; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t { struct _fx_LN14C_form__cexp_t_data_t* t0; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t t1; } _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t; typedef struct _fx_N14C_form__cexp_t_data_t { int_ rc; int tag; union { struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpIdent; struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t CExpLit; struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpBinary; struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpUnary; struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpMem; struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t CExpArrow; struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t CExpCast; struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpTernary; struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpCall; struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t CExpInit; struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t CExpTyp; struct _fx_T2SR10Ast__loc_t CExpCCode; } u; } _fx_N14C_form__cexp_t_data_t, *_fx_N14C_form__cexp_t; typedef struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t { struct _fx_Nt6option1N14C_form__cexp_t t0; struct _fx_R10Ast__loc_t t1; } _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t { struct _fx_LN15C_form__cstmt_t_data_t* t0; struct _fx_R10Ast__loc_t t1; } _fx_T2LN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T2R9Ast__id_tN15C_form__cstmt_t { struct _fx_R9Ast__id_t t0; struct _fx_N15C_form__cstmt_t_data_t* t1; } _fx_T2R9Ast__id_tN15C_form__cstmt_t; typedef struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N15C_form__cstmt_t_data_t* t1; struct _fx_N15C_form__cstmt_t_data_t* t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_Nt6option1N14C_form__ctyp_t t0; struct _fx_LN14C_form__cexp_t_data_t* t1; struct _fx_Nt6option1N14C_form__cexp_t t2; struct _fx_LN14C_form__cexp_t_data_t* t3; struct _fx_N15C_form__cstmt_t_data_t* t4; struct _fx_R10Ast__loc_t t5; } _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_N15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t { struct _fx_N15C_form__cstmt_t_data_t* t0; struct _fx_N14C_form__cexp_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t { struct _fx_LN14C_form__cexp_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; } _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl; struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t hd; } _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t, *_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t { struct _fx_N14C_form__ctyp_t_data_t* t0; struct _fx_R9Ast__id_t t1; struct _fx_Nt6option1N14C_form__cexp_t t2; struct _fx_R10Ast__loc_t t3; } _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t; typedef struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t { struct _fx_N14C_form__cexp_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; } _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t { int_ rc; struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl; struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t hd; } _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t, *_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t; typedef struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t { struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* t0; struct _fx_LN15C_form__cstmt_t_data_t* t1; struct _fx_R10Ast__loc_t t2; } _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t; typedef struct _fx_N15C_form__cstmt_t_data_t { int_ rc; int tag; union { struct _fx_R10Ast__loc_t CStmtNop; struct _fx_T2SR10Ast__loc_t CComment; struct _fx_N14C_form__cexp_t_data_t* CExp; struct _fx_R10Ast__loc_t CStmtBreak; struct _fx_R10Ast__loc_t CStmtContinue; struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t CStmtReturn; struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t CStmtBlock; struct _fx_T2R9Ast__id_tN15C_form__cstmt_t CStmtSync; struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t CStmtIf; struct _fx_T2R9Ast__id_tR10Ast__loc_t CStmtGoto; struct _fx_T2R9Ast__id_tR10Ast__loc_t CStmtLabel; struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t CStmtFor; struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t CStmtWhile; struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t CStmtDoWhile; struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t CStmtSwitch; struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t CDefVal; struct _fx_rR17C_form__cdeffun_t_data_t* CDefFun; struct _fx_rR17C_form__cdeftyp_t_data_t* CDefTyp; struct _fx_T2R9Ast__id_tR10Ast__loc_t CDefForwardSym; struct _fx_T2R9Ast__id_tR10Ast__loc_t CDefForwardTyp; struct _fx_rR18C_form__cdefenum_t_data_t* CDefEnum; struct _fx_rR23C_form__cdefinterface_t_data_t* CDefInterface; struct _fx_rR19C_form__cdefmacro_t_data_t* CMacroDef; struct _fx_T2R9Ast__id_tR10Ast__loc_t CMacroUndef; struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t CMacroIf; struct _fx_T2SR10Ast__loc_t CMacroInclude; struct _fx_T2SR10Ast__loc_t CMacroPragma; } u; } _fx_N15C_form__cstmt_t_data_t, *_fx_N15C_form__cstmt_t; typedef struct _fx_R17C_form__cmodule_t { struct _fx_R9Ast__id_t cmod_name; fx_str_t cmod_cname; struct _fx_LN15C_form__cstmt_t_data_t* cmod_ccode; bool cmod_main; bool cmod_recompile; bool cmod_skip; struct _fx_R14Ast__pragmas_t cmod_pragmas; } _fx_R17C_form__cmodule_t; typedef struct _fx_LR17C_form__cmodule_t_data_t { int_ rc; struct _fx_LR17C_form__cmodule_t_data_t* tl; struct _fx_R17C_form__cmodule_t hd; } _fx_LR17C_form__cmodule_t_data_t, *_fx_LR17C_form__cmodule_t; typedef struct _fx_N20Compiler__msgcolor_t { int tag; } _fx_N20Compiler__msgcolor_t; typedef struct _fx_T2LN14Lexer__token_tB { struct _fx_LN14Lexer__token_t_data_t* t0; bool t1; } _fx_T2LN14Lexer__token_tB; typedef struct _fx_T2SB { fx_str_t t0; bool t1; } _fx_T2SB; typedef struct _fx_LT2SB_data_t { int_ rc; struct _fx_LT2SB_data_t* tl; struct _fx_T2SB hd; } _fx_LT2SB_data_t, *_fx_LT2SB; typedef struct _fx_T2SLS { fx_str_t t0; struct _fx_LS_data_t* t1; } _fx_T2SLS; typedef struct _fx_Ta2LS { struct _fx_LS_data_t* t0; struct _fx_LS_data_t* t1; } _fx_Ta2LS; typedef struct _fx_T2iLi { int_ t0; struct _fx_Li_data_t* t1; } _fx_T2iLi; typedef struct _fx_LT2iLi_data_t { int_ rc; struct _fx_LT2iLi_data_t* tl; struct _fx_T2iLi hd; } _fx_LT2iLi_data_t, *_fx_LT2iLi; typedef struct _fx_rLi_data_t { int_ rc; struct _fx_Li_data_t* data; } _fx_rLi_data_t, *_fx_rLi; typedef struct _fx_T3BBS { bool t0; bool t1; fx_str_t t2; } _fx_T3BBS; typedef struct _fx_T2LR17K_form__kmodule_tB { struct _fx_LR17K_form__kmodule_t_data_t* t0; bool t1; } _fx_T2LR17K_form__kmodule_tB; typedef struct _fx_Ta9S { fx_str_t t0; fx_str_t t1; fx_str_t t2; fx_str_t t3; fx_str_t t4; fx_str_t t5; fx_str_t t6; fx_str_t t7; fx_str_t t8; } _fx_Ta9S; typedef struct _fx_Ta2S { fx_str_t t0; fx_str_t t1; } _fx_Ta2S; typedef struct _fx_Ta3S { fx_str_t t0; fx_str_t t1; fx_str_t t2; } _fx_Ta3S; typedef struct _fx_Ta4S { fx_str_t t0; fx_str_t t1; fx_str_t t2; fx_str_t t3; } _fx_Ta4S; typedef struct _fx_T5BBLSBS { bool t0; bool t1; struct _fx_LS_data_t* t2; bool t3; fx_str_t t4; } _fx_T5BBLSBS; typedef struct _fx_T5BBLSBLS { bool t0; bool t1; struct _fx_LS_data_t* t2; bool t3; struct _fx_LS_data_t* t4; } _fx_T5BBLSBLS; typedef struct _fx_T2LR17C_form__cmodule_tB { struct _fx_LR17C_form__cmodule_t_data_t* t0; bool t1; } _fx_T2LR17C_form__cmodule_tB; typedef struct { int_ rc; int_ data; } _fx_E4Exit_data_t; typedef struct { int_ rc; fx_str_t data; } _fx_E4Fail_data_t; typedef struct { int_ rc; struct _fx_T2Ta2iS data; } _fx_E22LexerUtils__LexerError_data_t; typedef struct { int_ rc; struct _fx_T2R10Ast__loc_tS data; } _fx_E17Ast__CompileError_data_t; typedef struct { int_ rc; struct _fx_T2R10Ast__loc_tS data; } _fx_E18Parser__ParseError_data_t; static void _fx_free_Nt6option1N10Ast__typ_t(struct _fx_Nt6option1N10Ast__typ_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_N10Ast__typ_t(&(*dst)->u.Some); fx_free(*dst); } *dst = 0; } static void _fx_free_LS(struct _fx_LS_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LS, fx_free_str); } static int _fx_cons_LS(fx_str_t* hd, struct _fx_LS_data_t* tl, bool addref_tl, struct _fx_LS_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LS, fx_copy_str); } static void _fx_free_N17Options__optval_t(struct _fx_N17Options__optval_t* dst) { switch (dst->tag) { case 3: fx_free_str(&dst->u.OptString); break; default: ; } dst->tag = 0; } static void _fx_copy_N17Options__optval_t(struct _fx_N17Options__optval_t* src, struct _fx_N17Options__optval_t* dst) { dst->tag = src->tag; switch (src->tag) { case 3: fx_copy_str(&src->u.OptString, &dst->u.OptString); break; default: dst->u = src->u; } } static void _fx_free_T2SN17Options__optval_t(struct _fx_T2SN17Options__optval_t* dst) { fx_free_str(&dst->t0); _fx_free_N17Options__optval_t(&dst->t1); } static void _fx_copy_T2SN17Options__optval_t(struct _fx_T2SN17Options__optval_t* src, struct _fx_T2SN17Options__optval_t* dst) { fx_copy_str(&src->t0, &dst->t0); _fx_copy_N17Options__optval_t(&src->t1, &dst->t1); } static void _fx_make_T2SN17Options__optval_t( fx_str_t* t0, struct _fx_N17Options__optval_t* t1, struct _fx_T2SN17Options__optval_t* fx_result) { fx_copy_str(t0, &fx_result->t0); _fx_copy_N17Options__optval_t(t1, &fx_result->t1); } static void _fx_free_LT2SN17Options__optval_t(struct _fx_LT2SN17Options__optval_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2SN17Options__optval_t, _fx_free_T2SN17Options__optval_t); } static int _fx_cons_LT2SN17Options__optval_t( struct _fx_T2SN17Options__optval_t* hd, struct _fx_LT2SN17Options__optval_t_data_t* tl, bool addref_tl, struct _fx_LT2SN17Options__optval_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2SN17Options__optval_t, _fx_copy_T2SN17Options__optval_t); } static void _fx_free_R18Options__options_t(struct _fx_R18Options__options_t* dst) { _fx_free_LS(&dst->app_args); fx_free_str(&dst->app_filename); fx_free_str(&dst->build_dir); fx_free_str(&dst->build_rootdir); fx_free_str(&dst->cflags); fx_free_str(&dst->clibs); fx_free_str(&dst->filename); _fx_free_LS(&dst->include_path); _fx_free_LT2SN17Options__optval_t(&dst->defines); fx_free_str(&dst->output_name); } static void _fx_copy_R18Options__options_t(struct _fx_R18Options__options_t* src, struct _fx_R18Options__options_t* dst) { FX_COPY_PTR(src->app_args, &dst->app_args); fx_copy_str(&src->app_filename, &dst->app_filename); dst->arch64 = src->arch64; dst->force_rebuild = src->force_rebuild; fx_copy_str(&src->build_dir, &dst->build_dir); fx_copy_str(&src->build_rootdir, &dst->build_rootdir); fx_copy_str(&src->cflags, &dst->cflags); fx_copy_str(&src->clibs, &dst->clibs); dst->compile_by_cpp = src->compile_by_cpp; fx_copy_str(&src->filename, &dst->filename); dst->gen_c = src->gen_c; FX_COPY_PTR(src->include_path, &dst->include_path); dst->debug = src->debug; FX_COPY_PTR(src->defines, &dst->defines); dst->optim_iters = src->optim_iters; dst->inline_thresh = src->inline_thresh; dst->enable_openmp = src->enable_openmp; dst->relax = src->relax; dst->use_preamble = src->use_preamble; dst->make_app = src->make_app; dst->optimize_level = src->optimize_level; fx_copy_str(&src->output_name, &dst->output_name); dst->print_ast0 = src->print_ast0; dst->print_ast = src->print_ast; dst->print_k0 = src->print_k0; dst->print_k = src->print_k; dst->print_tokens = src->print_tokens; dst->run_app = src->run_app; dst->verbose = src->verbose; dst->W_unused = src->W_unused; } static void _fx_make_R18Options__options_t( struct _fx_LS_data_t* r_app_args, fx_str_t* r_app_filename, bool r_arch64, bool r_force_rebuild, fx_str_t* r_build_dir, fx_str_t* r_build_rootdir, fx_str_t* r_cflags, fx_str_t* r_clibs, bool r_compile_by_cpp, fx_str_t* r_filename, bool r_gen_c, struct _fx_LS_data_t* r_include_path, bool r_debug, struct _fx_LT2SN17Options__optval_t_data_t* r_defines, int_ r_optim_iters, int_ r_inline_thresh, bool r_enable_openmp, bool r_relax, bool r_use_preamble, bool r_make_app, int_ r_optimize_level, fx_str_t* r_output_name, bool r_print_ast0, bool r_print_ast, bool r_print_k0, bool r_print_k, bool r_print_tokens, bool r_run_app, bool r_verbose, bool r_W_unused, struct _fx_R18Options__options_t* fx_result) { FX_COPY_PTR(r_app_args, &fx_result->app_args); fx_copy_str(r_app_filename, &fx_result->app_filename); fx_result->arch64 = r_arch64; fx_result->force_rebuild = r_force_rebuild; fx_copy_str(r_build_dir, &fx_result->build_dir); fx_copy_str(r_build_rootdir, &fx_result->build_rootdir); fx_copy_str(r_cflags, &fx_result->cflags); fx_copy_str(r_clibs, &fx_result->clibs); fx_result->compile_by_cpp = r_compile_by_cpp; fx_copy_str(r_filename, &fx_result->filename); fx_result->gen_c = r_gen_c; FX_COPY_PTR(r_include_path, &fx_result->include_path); fx_result->debug = r_debug; FX_COPY_PTR(r_defines, &fx_result->defines); fx_result->optim_iters = r_optim_iters; fx_result->inline_thresh = r_inline_thresh; fx_result->enable_openmp = r_enable_openmp; fx_result->relax = r_relax; fx_result->use_preamble = r_use_preamble; fx_result->make_app = r_make_app; fx_result->optimize_level = r_optimize_level; fx_copy_str(r_output_name, &fx_result->output_name); fx_result->print_ast0 = r_print_ast0; fx_result->print_ast = r_print_ast; fx_result->print_k0 = r_print_k0; fx_result->print_k = r_print_k; fx_result->print_tokens = r_print_tokens; fx_result->run_app = r_run_app; fx_result->verbose = r_verbose; fx_result->W_unused = r_W_unused; } static void _fx_free_T2Ta2iS(struct _fx_T2Ta2iS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2Ta2iS(struct _fx_T2Ta2iS* src, struct _fx_T2Ta2iS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2Ta2iS(struct _fx_Ta2i* t0, fx_str_t* t1, struct _fx_T2Ta2iS* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); } static int _fx_cons_LN12Ast__scope_t( struct _fx_N12Ast__scope_t* hd, struct _fx_LN12Ast__scope_t_data_t* tl, bool addref_tl, struct _fx_LN12Ast__scope_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN12Ast__scope_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_R16Ast__val_flags_t(struct _fx_R16Ast__val_flags_t* dst) { fx_free_list_simple(&dst->val_flag_global); } static void _fx_copy_R16Ast__val_flags_t(struct _fx_R16Ast__val_flags_t* src, struct _fx_R16Ast__val_flags_t* dst) { dst->val_flag_arg = src->val_flag_arg; dst->val_flag_mutable = src->val_flag_mutable; dst->val_flag_temp = src->val_flag_temp; dst->val_flag_tempref = src->val_flag_tempref; dst->val_flag_private = src->val_flag_private; dst->val_flag_subarray = src->val_flag_subarray; dst->val_flag_instance = src->val_flag_instance; dst->val_flag_method = src->val_flag_method; dst->val_flag_ctor = src->val_flag_ctor; FX_COPY_PTR(src->val_flag_global, &dst->val_flag_global); } static void _fx_make_R16Ast__val_flags_t( bool r_val_flag_arg, bool r_val_flag_mutable, bool r_val_flag_temp, bool r_val_flag_tempref, bool r_val_flag_private, bool r_val_flag_subarray, bool r_val_flag_instance, struct _fx_T2R9Ast__id_ti* r_val_flag_method, int_ r_val_flag_ctor, struct _fx_LN12Ast__scope_t_data_t* r_val_flag_global, struct _fx_R16Ast__val_flags_t* fx_result) { fx_result->val_flag_arg = r_val_flag_arg; fx_result->val_flag_mutable = r_val_flag_mutable; fx_result->val_flag_temp = r_val_flag_temp; fx_result->val_flag_tempref = r_val_flag_tempref; fx_result->val_flag_private = r_val_flag_private; fx_result->val_flag_subarray = r_val_flag_subarray; fx_result->val_flag_instance = r_val_flag_instance; fx_result->val_flag_method = *r_val_flag_method; fx_result->val_flag_ctor = r_val_flag_ctor; FX_COPY_PTR(r_val_flag_global, &fx_result->val_flag_global); } static void _fx_free_T2R9Ast__id_tN14C_form__ctyp_t(struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* src, struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14C_form__ctyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN14C_form__ctyp_t, _fx_free_T2R9Ast__id_tN14C_form__ctyp_t); } static int _fx_cons_LT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2R9Ast__id_tN14C_form__ctyp_t* hd, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN14C_form__ctyp_t, _fx_copy_T2R9Ast__id_tN14C_form__ctyp_t); } static void _fx_free_R23C_form__cdefinterface_t(struct _fx_R23C_form__cdefinterface_t* dst) { fx_free_str(&dst->ci_cname); _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(&dst->ci_all_methods); fx_free_list_simple(&dst->ci_scope); } static void _fx_copy_R23C_form__cdefinterface_t( struct _fx_R23C_form__cdefinterface_t* src, struct _fx_R23C_form__cdefinterface_t* dst) { dst->ci_name = src->ci_name; fx_copy_str(&src->ci_cname, &dst->ci_cname); dst->ci_id = src->ci_id; dst->ci_vtbl = src->ci_vtbl; dst->ci_base = src->ci_base; FX_COPY_PTR(src->ci_all_methods, &dst->ci_all_methods); FX_COPY_PTR(src->ci_scope, &dst->ci_scope); dst->ci_loc = src->ci_loc; } static void _fx_make_R23C_form__cdefinterface_t( struct _fx_R9Ast__id_t* r_ci_name, fx_str_t* r_ci_cname, struct _fx_R9Ast__id_t* r_ci_id, struct _fx_R9Ast__id_t* r_ci_vtbl, struct _fx_R9Ast__id_t* r_ci_base, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* r_ci_all_methods, struct _fx_LN12Ast__scope_t_data_t* r_ci_scope, struct _fx_R10Ast__loc_t* r_ci_loc, struct _fx_R23C_form__cdefinterface_t* fx_result) { fx_result->ci_name = *r_ci_name; fx_copy_str(r_ci_cname, &fx_result->ci_cname); fx_result->ci_id = *r_ci_id; fx_result->ci_vtbl = *r_ci_vtbl; fx_result->ci_base = *r_ci_base; FX_COPY_PTR(r_ci_all_methods, &fx_result->ci_all_methods); FX_COPY_PTR(r_ci_scope, &fx_result->ci_scope); fx_result->ci_loc = *r_ci_loc; } static void _fx_free_rR23C_form__cdefinterface_t(struct _fx_rR23C_form__cdefinterface_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR23C_form__cdefinterface_t, _fx_free_R23C_form__cdefinterface_t); } static int _fx_make_rR23C_form__cdefinterface_t( struct _fx_R23C_form__cdefinterface_t* arg, struct _fx_rR23C_form__cdefinterface_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR23C_form__cdefinterface_t, _fx_copy_R23C_form__cdefinterface_t); } static void _fx_free_LN15C_form__cstmt_t(struct _fx_LN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN15C_form__cstmt_t, _fx_free_N15C_form__cstmt_t); } static int _fx_cons_LN15C_form__cstmt_t( struct _fx_N15C_form__cstmt_t_data_t* hd, struct _fx_LN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN15C_form__cstmt_t, FX_COPY_PTR); } static int _fx_cons_LN19C_form__carg_attr_t( struct _fx_N19C_form__carg_attr_t* hd, struct _fx_LN19C_form__carg_attr_t_data_t* tl, bool addref_tl, struct _fx_LN19C_form__carg_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN19C_form__carg_attr_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); fx_free_list_simple(&dst->t2); } static void _fx_copy_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* src, struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); } static void _fx_make_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_LN19C_form__carg_attr_t_data_t* t2, struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); } static void _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t, _fx_free_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t); } static int _fx_cons_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t( struct _fx_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t* hd, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* tl, bool addref_tl, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t, _fx_copy_T3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t); } static void _fx_free_R17C_form__cdeffun_t(struct _fx_R17C_form__cdeffun_t* dst) { fx_free_str(&dst->cf_cname); _fx_free_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t(&dst->cf_args); _fx_free_N14C_form__ctyp_t(&dst->cf_rt); _fx_free_LN15C_form__cstmt_t(&dst->cf_body); fx_free_list_simple(&dst->cf_scope); } static void _fx_copy_R17C_form__cdeffun_t(struct _fx_R17C_form__cdeffun_t* src, struct _fx_R17C_form__cdeffun_t* dst) { dst->cf_name = src->cf_name; fx_copy_str(&src->cf_cname, &dst->cf_cname); FX_COPY_PTR(src->cf_args, &dst->cf_args); FX_COPY_PTR(src->cf_rt, &dst->cf_rt); FX_COPY_PTR(src->cf_body, &dst->cf_body); dst->cf_flags = src->cf_flags; FX_COPY_PTR(src->cf_scope, &dst->cf_scope); dst->cf_loc = src->cf_loc; } static void _fx_make_R17C_form__cdeffun_t( struct _fx_R9Ast__id_t* r_cf_name, fx_str_t* r_cf_cname, struct _fx_LT3R9Ast__id_tN14C_form__ctyp_tLN19C_form__carg_attr_t_data_t* r_cf_args, struct _fx_N14C_form__ctyp_t_data_t* r_cf_rt, struct _fx_LN15C_form__cstmt_t_data_t* r_cf_body, struct _fx_R16Ast__fun_flags_t* r_cf_flags, struct _fx_LN12Ast__scope_t_data_t* r_cf_scope, struct _fx_R10Ast__loc_t* r_cf_loc, struct _fx_R17C_form__cdeffun_t* fx_result) { fx_result->cf_name = *r_cf_name; fx_copy_str(r_cf_cname, &fx_result->cf_cname); FX_COPY_PTR(r_cf_args, &fx_result->cf_args); FX_COPY_PTR(r_cf_rt, &fx_result->cf_rt); FX_COPY_PTR(r_cf_body, &fx_result->cf_body); fx_result->cf_flags = *r_cf_flags; FX_COPY_PTR(r_cf_scope, &fx_result->cf_scope); fx_result->cf_loc = *r_cf_loc; } static void _fx_free_rR17C_form__cdeffun_t(struct _fx_rR17C_form__cdeffun_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17C_form__cdeffun_t, _fx_free_R17C_form__cdeffun_t); } static int _fx_make_rR17C_form__cdeffun_t( struct _fx_R17C_form__cdeffun_t* arg, struct _fx_rR17C_form__cdeffun_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17C_form__cdeffun_t, _fx_copy_R17C_form__cdeffun_t); } static int _fx_cons_LR9Ast__id_t( struct _fx_R9Ast__id_t* hd, struct _fx_LR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LR9Ast__id_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_R17C_form__ctprops_t(struct _fx_R17C_form__ctprops_t* dst) { fx_free_list_simple(&dst->ctp_make); } static void _fx_copy_R17C_form__ctprops_t(struct _fx_R17C_form__ctprops_t* src, struct _fx_R17C_form__ctprops_t* dst) { dst->ctp_scalar = src->ctp_scalar; dst->ctp_complex = src->ctp_complex; dst->ctp_ptr = src->ctp_ptr; dst->ctp_pass_by_ref = src->ctp_pass_by_ref; FX_COPY_PTR(src->ctp_make, &dst->ctp_make); dst->ctp_free = src->ctp_free; dst->ctp_copy = src->ctp_copy; } static void _fx_make_R17C_form__ctprops_t( bool r_ctp_scalar, bool r_ctp_complex, bool r_ctp_ptr, bool r_ctp_pass_by_ref, struct _fx_LR9Ast__id_t_data_t* r_ctp_make, struct _fx_Ta2R9Ast__id_t* r_ctp_free, struct _fx_Ta2R9Ast__id_t* r_ctp_copy, struct _fx_R17C_form__ctprops_t* fx_result) { fx_result->ctp_scalar = r_ctp_scalar; fx_result->ctp_complex = r_ctp_complex; fx_result->ctp_ptr = r_ctp_ptr; fx_result->ctp_pass_by_ref = r_ctp_pass_by_ref; FX_COPY_PTR(r_ctp_make, &fx_result->ctp_make); fx_result->ctp_free = *r_ctp_free; fx_result->ctp_copy = *r_ctp_copy; } static void _fx_free_R17C_form__cdeftyp_t(struct _fx_R17C_form__cdeftyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->ct_typ); fx_free_str(&dst->ct_cname); _fx_free_R17C_form__ctprops_t(&dst->ct_props); fx_free_list_simple(&dst->ct_ifaces); fx_free_list_simple(&dst->ct_scope); } static void _fx_copy_R17C_form__cdeftyp_t(struct _fx_R17C_form__cdeftyp_t* src, struct _fx_R17C_form__cdeftyp_t* dst) { dst->ct_name = src->ct_name; FX_COPY_PTR(src->ct_typ, &dst->ct_typ); fx_copy_str(&src->ct_cname, &dst->ct_cname); _fx_copy_R17C_form__ctprops_t(&src->ct_props, &dst->ct_props); dst->ct_data_start = src->ct_data_start; dst->ct_enum = src->ct_enum; FX_COPY_PTR(src->ct_ifaces, &dst->ct_ifaces); dst->ct_ifaces_id = src->ct_ifaces_id; FX_COPY_PTR(src->ct_scope, &dst->ct_scope); dst->ct_loc = src->ct_loc; } static void _fx_make_R17C_form__cdeftyp_t( struct _fx_R9Ast__id_t* r_ct_name, struct _fx_N14C_form__ctyp_t_data_t* r_ct_typ, fx_str_t* r_ct_cname, struct _fx_R17C_form__ctprops_t* r_ct_props, int_ r_ct_data_start, struct _fx_R9Ast__id_t* r_ct_enum, struct _fx_LR9Ast__id_t_data_t* r_ct_ifaces, struct _fx_R9Ast__id_t* r_ct_ifaces_id, struct _fx_LN12Ast__scope_t_data_t* r_ct_scope, struct _fx_R10Ast__loc_t* r_ct_loc, struct _fx_R17C_form__cdeftyp_t* fx_result) { fx_result->ct_name = *r_ct_name; FX_COPY_PTR(r_ct_typ, &fx_result->ct_typ); fx_copy_str(r_ct_cname, &fx_result->ct_cname); _fx_copy_R17C_form__ctprops_t(r_ct_props, &fx_result->ct_props); fx_result->ct_data_start = r_ct_data_start; fx_result->ct_enum = *r_ct_enum; FX_COPY_PTR(r_ct_ifaces, &fx_result->ct_ifaces); fx_result->ct_ifaces_id = *r_ct_ifaces_id; FX_COPY_PTR(r_ct_scope, &fx_result->ct_scope); fx_result->ct_loc = *r_ct_loc; } static void _fx_free_rR17C_form__cdeftyp_t(struct _fx_rR17C_form__cdeftyp_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17C_form__cdeftyp_t, _fx_free_R17C_form__cdeftyp_t); } static int _fx_make_rR17C_form__cdeftyp_t( struct _fx_R17C_form__cdeftyp_t* arg, struct _fx_rR17C_form__cdeftyp_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17C_form__cdeftyp_t, _fx_copy_R17C_form__cdeftyp_t); } static void _fx_free_Nt6option1N14C_form__cexp_t(struct _fx_Nt6option1N14C_form__cexp_t* dst) { switch (dst->tag) { case 2: _fx_free_N14C_form__cexp_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14C_form__cexp_t( struct _fx_Nt6option1N14C_form__cexp_t* src, struct _fx_Nt6option1N14C_form__cexp_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: FX_COPY_PTR(src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_T2R9Ast__id_tNt6option1N14C_form__cexp_t(struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* dst) { _fx_free_Nt6option1N14C_form__cexp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* src, struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* dst) { dst->t0 = src->t0; _fx_copy_Nt6option1N14C_form__cexp_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_R9Ast__id_t* t0, struct _fx_Nt6option1N14C_form__cexp_t* t1, struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* fx_result) { fx_result->t0 = *t0; _fx_copy_Nt6option1N14C_form__cexp_t(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t, _fx_free_T2R9Ast__id_tNt6option1N14C_form__cexp_t); } static int _fx_cons_LT2R9Ast__id_tNt6option1N14C_form__cexp_t( struct _fx_T2R9Ast__id_tNt6option1N14C_form__cexp_t* hd, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t, _fx_copy_T2R9Ast__id_tNt6option1N14C_form__cexp_t); } static void _fx_free_R18C_form__cdefenum_t(struct _fx_R18C_form__cdefenum_t* dst) { _fx_free_LT2R9Ast__id_tNt6option1N14C_form__cexp_t(&dst->cenum_members); fx_free_str(&dst->cenum_cname); fx_free_list_simple(&dst->cenum_scope); } static void _fx_copy_R18C_form__cdefenum_t(struct _fx_R18C_form__cdefenum_t* src, struct _fx_R18C_form__cdefenum_t* dst) { dst->cenum_name = src->cenum_name; FX_COPY_PTR(src->cenum_members, &dst->cenum_members); fx_copy_str(&src->cenum_cname, &dst->cenum_cname); FX_COPY_PTR(src->cenum_scope, &dst->cenum_scope); dst->cenum_loc = src->cenum_loc; } static void _fx_make_R18C_form__cdefenum_t( struct _fx_R9Ast__id_t* r_cenum_name, struct _fx_LT2R9Ast__id_tNt6option1N14C_form__cexp_t_data_t* r_cenum_members, fx_str_t* r_cenum_cname, struct _fx_LN12Ast__scope_t_data_t* r_cenum_scope, struct _fx_R10Ast__loc_t* r_cenum_loc, struct _fx_R18C_form__cdefenum_t* fx_result) { fx_result->cenum_name = *r_cenum_name; FX_COPY_PTR(r_cenum_members, &fx_result->cenum_members); fx_copy_str(r_cenum_cname, &fx_result->cenum_cname); FX_COPY_PTR(r_cenum_scope, &fx_result->cenum_scope); fx_result->cenum_loc = *r_cenum_loc; } static void _fx_free_rR18C_form__cdefenum_t(struct _fx_rR18C_form__cdefenum_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR18C_form__cdefenum_t, _fx_free_R18C_form__cdefenum_t); } static int _fx_make_rR18C_form__cdefenum_t( struct _fx_R18C_form__cdefenum_t* arg, struct _fx_rR18C_form__cdefenum_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR18C_form__cdefenum_t, _fx_copy_R18C_form__cdefenum_t); } static void _fx_free_R19C_form__cdefmacro_t(struct _fx_R19C_form__cdefmacro_t* dst) { fx_free_str(&dst->cm_cname); fx_free_list_simple(&dst->cm_args); _fx_free_LN15C_form__cstmt_t(&dst->cm_body); fx_free_list_simple(&dst->cm_scope); } static void _fx_copy_R19C_form__cdefmacro_t(struct _fx_R19C_form__cdefmacro_t* src, struct _fx_R19C_form__cdefmacro_t* dst) { dst->cm_name = src->cm_name; fx_copy_str(&src->cm_cname, &dst->cm_cname); FX_COPY_PTR(src->cm_args, &dst->cm_args); FX_COPY_PTR(src->cm_body, &dst->cm_body); FX_COPY_PTR(src->cm_scope, &dst->cm_scope); dst->cm_loc = src->cm_loc; } static void _fx_make_R19C_form__cdefmacro_t( struct _fx_R9Ast__id_t* r_cm_name, fx_str_t* r_cm_cname, struct _fx_LR9Ast__id_t_data_t* r_cm_args, struct _fx_LN15C_form__cstmt_t_data_t* r_cm_body, struct _fx_LN12Ast__scope_t_data_t* r_cm_scope, struct _fx_R10Ast__loc_t* r_cm_loc, struct _fx_R19C_form__cdefmacro_t* fx_result) { fx_result->cm_name = *r_cm_name; fx_copy_str(r_cm_cname, &fx_result->cm_cname); FX_COPY_PTR(r_cm_args, &fx_result->cm_args); FX_COPY_PTR(r_cm_body, &fx_result->cm_body); FX_COPY_PTR(r_cm_scope, &fx_result->cm_scope); fx_result->cm_loc = *r_cm_loc; } static void _fx_free_rR19C_form__cdefmacro_t(struct _fx_rR19C_form__cdefmacro_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR19C_form__cdefmacro_t, _fx_free_R19C_form__cdefmacro_t); } static int _fx_make_rR19C_form__cdefmacro_t( struct _fx_R19C_form__cdefmacro_t* arg, struct _fx_rR19C_form__cdefmacro_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR19C_form__cdefmacro_t, _fx_copy_R19C_form__cdefmacro_t); } static void _fx_free_T2R9Ast__id_tN14K_form__ktyp_t(struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* src, struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14K_form__ktyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN14K_form__ktyp_t, _fx_free_T2R9Ast__id_tN14K_form__ktyp_t); } static int _fx_cons_LT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tN14K_form__ktyp_t* hd, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN14K_form__ktyp_t, _fx_copy_T2R9Ast__id_tN14K_form__ktyp_t); } static void _fx_free_R23K_form__kdefinterface_t(struct _fx_R23K_form__kdefinterface_t* dst) { fx_free_str(&dst->ki_cname); _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->ki_all_methods); fx_free_list_simple(&dst->ki_scope); } static void _fx_copy_R23K_form__kdefinterface_t( struct _fx_R23K_form__kdefinterface_t* src, struct _fx_R23K_form__kdefinterface_t* dst) { dst->ki_name = src->ki_name; dst->ki_base = src->ki_base; fx_copy_str(&src->ki_cname, &dst->ki_cname); dst->ki_id = src->ki_id; FX_COPY_PTR(src->ki_all_methods, &dst->ki_all_methods); FX_COPY_PTR(src->ki_scope, &dst->ki_scope); dst->ki_loc = src->ki_loc; } static void _fx_make_R23K_form__kdefinterface_t( struct _fx_R9Ast__id_t* r_ki_name, struct _fx_R9Ast__id_t* r_ki_base, fx_str_t* r_ki_cname, struct _fx_R9Ast__id_t* r_ki_id, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* r_ki_all_methods, struct _fx_LN12Ast__scope_t_data_t* r_ki_scope, struct _fx_R10Ast__loc_t* r_ki_loc, struct _fx_R23K_form__kdefinterface_t* fx_result) { fx_result->ki_name = *r_ki_name; fx_result->ki_base = *r_ki_base; fx_copy_str(r_ki_cname, &fx_result->ki_cname); fx_result->ki_id = *r_ki_id; FX_COPY_PTR(r_ki_all_methods, &fx_result->ki_all_methods); FX_COPY_PTR(r_ki_scope, &fx_result->ki_scope); fx_result->ki_loc = *r_ki_loc; } static void _fx_free_rR23K_form__kdefinterface_t(struct _fx_rR23K_form__kdefinterface_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR23K_form__kdefinterface_t, _fx_free_R23K_form__kdefinterface_t); } static int _fx_make_rR23K_form__kdefinterface_t( struct _fx_R23K_form__kdefinterface_t* arg, struct _fx_rR23K_form__kdefinterface_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR23K_form__kdefinterface_t, _fx_copy_R23K_form__kdefinterface_t); } static void _fx_free_R17K_form__kdeffun_t(struct _fx_R17K_form__kdeffun_t* dst) { fx_free_str(&dst->kf_cname); fx_free_list_simple(&dst->kf_params); _fx_free_N14K_form__ktyp_t(&dst->kf_rt); _fx_free_N14K_form__kexp_t(&dst->kf_body); fx_free_list_simple(&dst->kf_scope); } static void _fx_copy_R17K_form__kdeffun_t(struct _fx_R17K_form__kdeffun_t* src, struct _fx_R17K_form__kdeffun_t* dst) { dst->kf_name = src->kf_name; fx_copy_str(&src->kf_cname, &dst->kf_cname); FX_COPY_PTR(src->kf_params, &dst->kf_params); FX_COPY_PTR(src->kf_rt, &dst->kf_rt); FX_COPY_PTR(src->kf_body, &dst->kf_body); dst->kf_flags = src->kf_flags; dst->kf_closure = src->kf_closure; FX_COPY_PTR(src->kf_scope, &dst->kf_scope); dst->kf_loc = src->kf_loc; } static void _fx_make_R17K_form__kdeffun_t( struct _fx_R9Ast__id_t* r_kf_name, fx_str_t* r_kf_cname, struct _fx_LR9Ast__id_t_data_t* r_kf_params, struct _fx_N14K_form__ktyp_t_data_t* r_kf_rt, struct _fx_N14K_form__kexp_t_data_t* r_kf_body, struct _fx_R16Ast__fun_flags_t* r_kf_flags, struct _fx_R25K_form__kdefclosureinfo_t* r_kf_closure, struct _fx_LN12Ast__scope_t_data_t* r_kf_scope, struct _fx_R10Ast__loc_t* r_kf_loc, struct _fx_R17K_form__kdeffun_t* fx_result) { fx_result->kf_name = *r_kf_name; fx_copy_str(r_kf_cname, &fx_result->kf_cname); FX_COPY_PTR(r_kf_params, &fx_result->kf_params); FX_COPY_PTR(r_kf_rt, &fx_result->kf_rt); FX_COPY_PTR(r_kf_body, &fx_result->kf_body); fx_result->kf_flags = *r_kf_flags; fx_result->kf_closure = *r_kf_closure; FX_COPY_PTR(r_kf_scope, &fx_result->kf_scope); fx_result->kf_loc = *r_kf_loc; } static void _fx_free_rR17K_form__kdeffun_t(struct _fx_rR17K_form__kdeffun_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17K_form__kdeffun_t, _fx_free_R17K_form__kdeffun_t); } static int _fx_make_rR17K_form__kdeffun_t( struct _fx_R17K_form__kdeffun_t* arg, struct _fx_rR17K_form__kdeffun_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17K_form__kdeffun_t, _fx_copy_R17K_form__kdeffun_t); } static void _fx_free_R17K_form__kdefexn_t(struct _fx_R17K_form__kdefexn_t* dst) { fx_free_str(&dst->ke_cname); fx_free_str(&dst->ke_base_cname); _fx_free_N14K_form__ktyp_t(&dst->ke_typ); fx_free_list_simple(&dst->ke_scope); } static void _fx_copy_R17K_form__kdefexn_t(struct _fx_R17K_form__kdefexn_t* src, struct _fx_R17K_form__kdefexn_t* dst) { dst->ke_name = src->ke_name; fx_copy_str(&src->ke_cname, &dst->ke_cname); fx_copy_str(&src->ke_base_cname, &dst->ke_base_cname); FX_COPY_PTR(src->ke_typ, &dst->ke_typ); dst->ke_std = src->ke_std; dst->ke_tag = src->ke_tag; dst->ke_make = src->ke_make; FX_COPY_PTR(src->ke_scope, &dst->ke_scope); dst->ke_loc = src->ke_loc; } static void _fx_make_R17K_form__kdefexn_t( struct _fx_R9Ast__id_t* r_ke_name, fx_str_t* r_ke_cname, fx_str_t* r_ke_base_cname, struct _fx_N14K_form__ktyp_t_data_t* r_ke_typ, bool r_ke_std, struct _fx_R9Ast__id_t* r_ke_tag, struct _fx_R9Ast__id_t* r_ke_make, struct _fx_LN12Ast__scope_t_data_t* r_ke_scope, struct _fx_R10Ast__loc_t* r_ke_loc, struct _fx_R17K_form__kdefexn_t* fx_result) { fx_result->ke_name = *r_ke_name; fx_copy_str(r_ke_cname, &fx_result->ke_cname); fx_copy_str(r_ke_base_cname, &fx_result->ke_base_cname); FX_COPY_PTR(r_ke_typ, &fx_result->ke_typ); fx_result->ke_std = r_ke_std; fx_result->ke_tag = *r_ke_tag; fx_result->ke_make = *r_ke_make; FX_COPY_PTR(r_ke_scope, &fx_result->ke_scope); fx_result->ke_loc = *r_ke_loc; } static void _fx_free_rR17K_form__kdefexn_t(struct _fx_rR17K_form__kdefexn_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17K_form__kdefexn_t, _fx_free_R17K_form__kdefexn_t); } static int _fx_make_rR17K_form__kdefexn_t( struct _fx_R17K_form__kdefexn_t* arg, struct _fx_rR17K_form__kdefexn_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17K_form__kdefexn_t, _fx_copy_R17K_form__kdefexn_t); } static void _fx_free_LN14K_form__ktyp_t(struct _fx_LN14K_form__ktyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14K_form__ktyp_t, _fx_free_N14K_form__ktyp_t); } static int _fx_cons_LN14K_form__ktyp_t( struct _fx_N14K_form__ktyp_t_data_t* hd, struct _fx_LN14K_form__ktyp_t_data_t* tl, bool addref_tl, struct _fx_LN14K_form__ktyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14K_form__ktyp_t, FX_COPY_PTR); } static void _fx_free_T2R9Ast__id_tLR9Ast__id_t(struct _fx_T2R9Ast__id_tLR9Ast__id_t* dst) { fx_free_list_simple(&dst->t1); } static void _fx_copy_T2R9Ast__id_tLR9Ast__id_t( struct _fx_T2R9Ast__id_tLR9Ast__id_t* src, struct _fx_T2R9Ast__id_tLR9Ast__id_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tLR9Ast__id_t( struct _fx_R9Ast__id_t* t0, struct _fx_LR9Ast__id_t_data_t* t1, struct _fx_T2R9Ast__id_tLR9Ast__id_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tLR9Ast__id_t(struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tLR9Ast__id_t, _fx_free_T2R9Ast__id_tLR9Ast__id_t); } static int _fx_cons_LT2R9Ast__id_tLR9Ast__id_t( struct _fx_T2R9Ast__id_tLR9Ast__id_t* hd, struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tLR9Ast__id_t, _fx_copy_T2R9Ast__id_tLR9Ast__id_t); } static void _fx_free_R21K_form__kdefvariant_t(struct _fx_R21K_form__kdefvariant_t* dst) { fx_free_str(&dst->kvar_cname); _fx_free_LN14K_form__ktyp_t(&dst->kvar_targs); _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->kvar_cases); fx_free_list_simple(&dst->kvar_ctors); _fx_free_LT2R9Ast__id_tLR9Ast__id_t(&dst->kvar_ifaces); fx_free_list_simple(&dst->kvar_scope); } static void _fx_copy_R21K_form__kdefvariant_t( struct _fx_R21K_form__kdefvariant_t* src, struct _fx_R21K_form__kdefvariant_t* dst) { dst->kvar_name = src->kvar_name; fx_copy_str(&src->kvar_cname, &dst->kvar_cname); dst->kvar_proto = src->kvar_proto; dst->kvar_props = src->kvar_props; FX_COPY_PTR(src->kvar_targs, &dst->kvar_targs); FX_COPY_PTR(src->kvar_cases, &dst->kvar_cases); FX_COPY_PTR(src->kvar_ctors, &dst->kvar_ctors); dst->kvar_flags = src->kvar_flags; FX_COPY_PTR(src->kvar_ifaces, &dst->kvar_ifaces); FX_COPY_PTR(src->kvar_scope, &dst->kvar_scope); dst->kvar_loc = src->kvar_loc; } static void _fx_make_R21K_form__kdefvariant_t( struct _fx_R9Ast__id_t* r_kvar_name, fx_str_t* r_kvar_cname, struct _fx_R9Ast__id_t* r_kvar_proto, struct _fx_Nt6option1R17K_form__ktprops_t* r_kvar_props, struct _fx_LN14K_form__ktyp_t_data_t* r_kvar_targs, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* r_kvar_cases, struct _fx_LR9Ast__id_t_data_t* r_kvar_ctors, struct _fx_R16Ast__var_flags_t* r_kvar_flags, struct _fx_LT2R9Ast__id_tLR9Ast__id_t_data_t* r_kvar_ifaces, struct _fx_LN12Ast__scope_t_data_t* r_kvar_scope, struct _fx_R10Ast__loc_t* r_kvar_loc, struct _fx_R21K_form__kdefvariant_t* fx_result) { fx_result->kvar_name = *r_kvar_name; fx_copy_str(r_kvar_cname, &fx_result->kvar_cname); fx_result->kvar_proto = *r_kvar_proto; fx_result->kvar_props = *r_kvar_props; FX_COPY_PTR(r_kvar_targs, &fx_result->kvar_targs); FX_COPY_PTR(r_kvar_cases, &fx_result->kvar_cases); FX_COPY_PTR(r_kvar_ctors, &fx_result->kvar_ctors); fx_result->kvar_flags = *r_kvar_flags; FX_COPY_PTR(r_kvar_ifaces, &fx_result->kvar_ifaces); FX_COPY_PTR(r_kvar_scope, &fx_result->kvar_scope); fx_result->kvar_loc = *r_kvar_loc; } static void _fx_free_rR21K_form__kdefvariant_t(struct _fx_rR21K_form__kdefvariant_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR21K_form__kdefvariant_t, _fx_free_R21K_form__kdefvariant_t); } static int _fx_make_rR21K_form__kdefvariant_t( struct _fx_R21K_form__kdefvariant_t* arg, struct _fx_rR21K_form__kdefvariant_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR21K_form__kdefvariant_t, _fx_copy_R21K_form__kdefvariant_t); } static void _fx_free_R17K_form__kdeftyp_t(struct _fx_R17K_form__kdeftyp_t* dst) { fx_free_str(&dst->kt_cname); _fx_free_LN14K_form__ktyp_t(&dst->kt_targs); _fx_free_N14K_form__ktyp_t(&dst->kt_typ); fx_free_list_simple(&dst->kt_scope); } static void _fx_copy_R17K_form__kdeftyp_t(struct _fx_R17K_form__kdeftyp_t* src, struct _fx_R17K_form__kdeftyp_t* dst) { dst->kt_name = src->kt_name; fx_copy_str(&src->kt_cname, &dst->kt_cname); dst->kt_proto = src->kt_proto; dst->kt_props = src->kt_props; FX_COPY_PTR(src->kt_targs, &dst->kt_targs); FX_COPY_PTR(src->kt_typ, &dst->kt_typ); FX_COPY_PTR(src->kt_scope, &dst->kt_scope); dst->kt_loc = src->kt_loc; } static void _fx_make_R17K_form__kdeftyp_t( struct _fx_R9Ast__id_t* r_kt_name, fx_str_t* r_kt_cname, struct _fx_R9Ast__id_t* r_kt_proto, struct _fx_Nt6option1R17K_form__ktprops_t* r_kt_props, struct _fx_LN14K_form__ktyp_t_data_t* r_kt_targs, struct _fx_N14K_form__ktyp_t_data_t* r_kt_typ, struct _fx_LN12Ast__scope_t_data_t* r_kt_scope, struct _fx_R10Ast__loc_t* r_kt_loc, struct _fx_R17K_form__kdeftyp_t* fx_result) { fx_result->kt_name = *r_kt_name; fx_copy_str(r_kt_cname, &fx_result->kt_cname); fx_result->kt_proto = *r_kt_proto; fx_result->kt_props = *r_kt_props; FX_COPY_PTR(r_kt_targs, &fx_result->kt_targs); FX_COPY_PTR(r_kt_typ, &fx_result->kt_typ); FX_COPY_PTR(r_kt_scope, &fx_result->kt_scope); fx_result->kt_loc = *r_kt_loc; } static void _fx_free_rR17K_form__kdeftyp_t(struct _fx_rR17K_form__kdeftyp_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17K_form__kdeftyp_t, _fx_free_R17K_form__kdeftyp_t); } static int _fx_make_rR17K_form__kdeftyp_t( struct _fx_R17K_form__kdeftyp_t* arg, struct _fx_rR17K_form__kdeftyp_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17K_form__kdeftyp_t, _fx_copy_R17K_form__kdeftyp_t); } static void _fx_free_R25K_form__kdefclosurevars_t(struct _fx_R25K_form__kdefclosurevars_t* dst) { fx_free_str(&dst->kcv_cname); _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->kcv_freevars); fx_free_list_simple(&dst->kcv_orig_freevars); fx_free_list_simple(&dst->kcv_scope); } static void _fx_copy_R25K_form__kdefclosurevars_t( struct _fx_R25K_form__kdefclosurevars_t* src, struct _fx_R25K_form__kdefclosurevars_t* dst) { dst->kcv_name = src->kcv_name; fx_copy_str(&src->kcv_cname, &dst->kcv_cname); FX_COPY_PTR(src->kcv_freevars, &dst->kcv_freevars); FX_COPY_PTR(src->kcv_orig_freevars, &dst->kcv_orig_freevars); FX_COPY_PTR(src->kcv_scope, &dst->kcv_scope); dst->kcv_loc = src->kcv_loc; } static void _fx_make_R25K_form__kdefclosurevars_t( struct _fx_R9Ast__id_t* r_kcv_name, fx_str_t* r_kcv_cname, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* r_kcv_freevars, struct _fx_LR9Ast__id_t_data_t* r_kcv_orig_freevars, struct _fx_LN12Ast__scope_t_data_t* r_kcv_scope, struct _fx_R10Ast__loc_t* r_kcv_loc, struct _fx_R25K_form__kdefclosurevars_t* fx_result) { fx_result->kcv_name = *r_kcv_name; fx_copy_str(r_kcv_cname, &fx_result->kcv_cname); FX_COPY_PTR(r_kcv_freevars, &fx_result->kcv_freevars); FX_COPY_PTR(r_kcv_orig_freevars, &fx_result->kcv_orig_freevars); FX_COPY_PTR(r_kcv_scope, &fx_result->kcv_scope); fx_result->kcv_loc = *r_kcv_loc; } static void _fx_free_rR25K_form__kdefclosurevars_t(struct _fx_rR25K_form__kdefclosurevars_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR25K_form__kdefclosurevars_t, _fx_free_R25K_form__kdefclosurevars_t); } static int _fx_make_rR25K_form__kdefclosurevars_t( struct _fx_R25K_form__kdefclosurevars_t* arg, struct _fx_rR25K_form__kdefclosurevars_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR25K_form__kdefclosurevars_t, _fx_copy_R25K_form__kdefclosurevars_t); } static void _fx_free_Nt6option1N10Ast__exp_t(struct _fx_Nt6option1N10Ast__exp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_N10Ast__exp_t(&(*dst)->u.Some); fx_free(*dst); } *dst = 0; } static void _fx_free_R13Ast__defval_t(struct _fx_R13Ast__defval_t* dst) { _fx_free_N10Ast__typ_t(&dst->dv_typ); _fx_free_R16Ast__val_flags_t(&dst->dv_flags); fx_free_list_simple(&dst->dv_scope); } static void _fx_copy_R13Ast__defval_t(struct _fx_R13Ast__defval_t* src, struct _fx_R13Ast__defval_t* dst) { dst->dv_name = src->dv_name; FX_COPY_PTR(src->dv_typ, &dst->dv_typ); _fx_copy_R16Ast__val_flags_t(&src->dv_flags, &dst->dv_flags); FX_COPY_PTR(src->dv_scope, &dst->dv_scope); dst->dv_loc = src->dv_loc; } static void _fx_make_R13Ast__defval_t( struct _fx_R9Ast__id_t* r_dv_name, struct _fx_N10Ast__typ_t_data_t* r_dv_typ, struct _fx_R16Ast__val_flags_t* r_dv_flags, struct _fx_LN12Ast__scope_t_data_t* r_dv_scope, struct _fx_R10Ast__loc_t* r_dv_loc, struct _fx_R13Ast__defval_t* fx_result) { fx_result->dv_name = *r_dv_name; FX_COPY_PTR(r_dv_typ, &fx_result->dv_typ); _fx_copy_R16Ast__val_flags_t(r_dv_flags, &fx_result->dv_flags); FX_COPY_PTR(r_dv_scope, &fx_result->dv_scope); fx_result->dv_loc = *r_dv_loc; } static void _fx_free_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* dst) { _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(&dst->root); fx_free_fp(&dst->cmp); } static void _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* src, struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* dst) { FX_COPY_PTR(src->root, &dst->root); FX_COPY_FP(&src->cmp, &dst->cmp); } static void _fx_make_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* r_root, struct _fx_FPi2R9Ast__id_tR9Ast__id_t* r_cmp, struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* fx_result) { FX_COPY_PTR(r_root, &fx_result->root); FX_COPY_FP(r_cmp, &fx_result->cmp); } static void _fx_free_LN10Ast__pat_t(struct _fx_LN10Ast__pat_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN10Ast__pat_t, _fx_free_N10Ast__pat_t); } static int _fx_cons_LN10Ast__pat_t( struct _fx_N10Ast__pat_t_data_t* hd, struct _fx_LN10Ast__pat_t_data_t* tl, bool addref_tl, struct _fx_LN10Ast__pat_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN10Ast__pat_t, FX_COPY_PTR); } static void _fx_free_rLR9Ast__id_t(struct _fx_rLR9Ast__id_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rLR9Ast__id_t, fx_free_list_simple); } static int _fx_make_rLR9Ast__id_t(struct _fx_LR9Ast__id_t_data_t* arg, struct _fx_rLR9Ast__id_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rLR9Ast__id_t, FX_COPY_PTR); } static void _fx_free_R13Ast__deffun_t(struct _fx_R13Ast__deffun_t* dst) { fx_free_list_simple(&dst->df_templ_args); _fx_free_LN10Ast__pat_t(&dst->df_args); _fx_free_N10Ast__typ_t(&dst->df_typ); _fx_free_N10Ast__exp_t(&dst->df_body); fx_free_list_simple(&dst->df_scope); _fx_free_rLR9Ast__id_t(&dst->df_templ_inst); _fx_free_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&dst->df_env); } static void _fx_copy_R13Ast__deffun_t(struct _fx_R13Ast__deffun_t* src, struct _fx_R13Ast__deffun_t* dst) { dst->df_name = src->df_name; FX_COPY_PTR(src->df_templ_args, &dst->df_templ_args); FX_COPY_PTR(src->df_args, &dst->df_args); FX_COPY_PTR(src->df_typ, &dst->df_typ); FX_COPY_PTR(src->df_body, &dst->df_body); dst->df_flags = src->df_flags; FX_COPY_PTR(src->df_scope, &dst->df_scope); dst->df_loc = src->df_loc; FX_COPY_PTR(src->df_templ_inst, &dst->df_templ_inst); _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&src->df_env, &dst->df_env); } static void _fx_make_R13Ast__deffun_t( struct _fx_R9Ast__id_t* r_df_name, struct _fx_LR9Ast__id_t_data_t* r_df_templ_args, struct _fx_LN10Ast__pat_t_data_t* r_df_args, struct _fx_N10Ast__typ_t_data_t* r_df_typ, struct _fx_N10Ast__exp_t_data_t* r_df_body, struct _fx_R16Ast__fun_flags_t* r_df_flags, struct _fx_LN12Ast__scope_t_data_t* r_df_scope, struct _fx_R10Ast__loc_t* r_df_loc, struct _fx_rLR9Ast__id_t_data_t* r_df_templ_inst, struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* r_df_env, struct _fx_R13Ast__deffun_t* fx_result) { fx_result->df_name = *r_df_name; FX_COPY_PTR(r_df_templ_args, &fx_result->df_templ_args); FX_COPY_PTR(r_df_args, &fx_result->df_args); FX_COPY_PTR(r_df_typ, &fx_result->df_typ); FX_COPY_PTR(r_df_body, &fx_result->df_body); fx_result->df_flags = *r_df_flags; FX_COPY_PTR(r_df_scope, &fx_result->df_scope); fx_result->df_loc = *r_df_loc; FX_COPY_PTR(r_df_templ_inst, &fx_result->df_templ_inst); _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(r_df_env, &fx_result->df_env); } static void _fx_free_rR13Ast__deffun_t(struct _fx_rR13Ast__deffun_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR13Ast__deffun_t, _fx_free_R13Ast__deffun_t); } static int _fx_make_rR13Ast__deffun_t(struct _fx_R13Ast__deffun_t* arg, struct _fx_rR13Ast__deffun_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR13Ast__deffun_t, _fx_copy_R13Ast__deffun_t); } static void _fx_free_R13Ast__defexn_t(struct _fx_R13Ast__defexn_t* dst) { _fx_free_N10Ast__typ_t(&dst->dexn_typ); fx_free_list_simple(&dst->dexn_scope); } static void _fx_copy_R13Ast__defexn_t(struct _fx_R13Ast__defexn_t* src, struct _fx_R13Ast__defexn_t* dst) { dst->dexn_name = src->dexn_name; FX_COPY_PTR(src->dexn_typ, &dst->dexn_typ); FX_COPY_PTR(src->dexn_scope, &dst->dexn_scope); dst->dexn_loc = src->dexn_loc; } static void _fx_make_R13Ast__defexn_t( struct _fx_R9Ast__id_t* r_dexn_name, struct _fx_N10Ast__typ_t_data_t* r_dexn_typ, struct _fx_LN12Ast__scope_t_data_t* r_dexn_scope, struct _fx_R10Ast__loc_t* r_dexn_loc, struct _fx_R13Ast__defexn_t* fx_result) { fx_result->dexn_name = *r_dexn_name; FX_COPY_PTR(r_dexn_typ, &fx_result->dexn_typ); FX_COPY_PTR(r_dexn_scope, &fx_result->dexn_scope); fx_result->dexn_loc = *r_dexn_loc; } static void _fx_free_rR13Ast__defexn_t(struct _fx_rR13Ast__defexn_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR13Ast__defexn_t, _fx_free_R13Ast__defexn_t); } static int _fx_make_rR13Ast__defexn_t(struct _fx_R13Ast__defexn_t* arg, struct _fx_rR13Ast__defexn_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR13Ast__defexn_t, _fx_copy_R13Ast__defexn_t); } static void _fx_free_R13Ast__deftyp_t(struct _fx_R13Ast__deftyp_t* dst) { fx_free_list_simple(&dst->dt_templ_args); _fx_free_N10Ast__typ_t(&dst->dt_typ); fx_free_list_simple(&dst->dt_scope); } static void _fx_copy_R13Ast__deftyp_t(struct _fx_R13Ast__deftyp_t* src, struct _fx_R13Ast__deftyp_t* dst) { dst->dt_name = src->dt_name; FX_COPY_PTR(src->dt_templ_args, &dst->dt_templ_args); FX_COPY_PTR(src->dt_typ, &dst->dt_typ); dst->dt_finalized = src->dt_finalized; FX_COPY_PTR(src->dt_scope, &dst->dt_scope); dst->dt_loc = src->dt_loc; } static void _fx_make_R13Ast__deftyp_t( struct _fx_R9Ast__id_t* r_dt_name, struct _fx_LR9Ast__id_t_data_t* r_dt_templ_args, struct _fx_N10Ast__typ_t_data_t* r_dt_typ, bool r_dt_finalized, struct _fx_LN12Ast__scope_t_data_t* r_dt_scope, struct _fx_R10Ast__loc_t* r_dt_loc, struct _fx_R13Ast__deftyp_t* fx_result) { fx_result->dt_name = *r_dt_name; FX_COPY_PTR(r_dt_templ_args, &fx_result->dt_templ_args); FX_COPY_PTR(r_dt_typ, &fx_result->dt_typ); fx_result->dt_finalized = r_dt_finalized; FX_COPY_PTR(r_dt_scope, &fx_result->dt_scope); fx_result->dt_loc = *r_dt_loc; } static void _fx_free_rR13Ast__deftyp_t(struct _fx_rR13Ast__deftyp_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR13Ast__deftyp_t, _fx_free_R13Ast__deftyp_t); } static int _fx_make_rR13Ast__deftyp_t(struct _fx_R13Ast__deftyp_t* arg, struct _fx_rR13Ast__deftyp_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR13Ast__deftyp_t, _fx_copy_R13Ast__deftyp_t); } static void _fx_free_T2R9Ast__id_tN10Ast__typ_t(struct _fx_T2R9Ast__id_tN10Ast__typ_t* dst) { _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN10Ast__typ_t( struct _fx_T2R9Ast__id_tN10Ast__typ_t* src, struct _fx_T2R9Ast__id_tN10Ast__typ_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN10Ast__typ_t( struct _fx_R9Ast__id_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2R9Ast__id_tN10Ast__typ_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN10Ast__typ_t(struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__typ_t, _fx_free_T2R9Ast__id_tN10Ast__typ_t); } static int _fx_cons_LT2R9Ast__id_tN10Ast__typ_t( struct _fx_T2R9Ast__id_tN10Ast__typ_t* hd, struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__typ_t, _fx_copy_T2R9Ast__id_tN10Ast__typ_t); } static int _fx_cons_LTa2R9Ast__id_t( struct _fx_Ta2R9Ast__id_t* hd, struct _fx_LTa2R9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LTa2R9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LTa2R9Ast__id_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T2R9Ast__id_tLTa2R9Ast__id_t(struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* dst) { fx_free_list_simple(&dst->t1); } static void _fx_copy_T2R9Ast__id_tLTa2R9Ast__id_t( struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* src, struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tLTa2R9Ast__id_t( struct _fx_R9Ast__id_t* t0, struct _fx_LTa2R9Ast__id_t_data_t* t1, struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tLTa2R9Ast__id_t(struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tLTa2R9Ast__id_t, _fx_free_T2R9Ast__id_tLTa2R9Ast__id_t); } static int _fx_cons_LT2R9Ast__id_tLTa2R9Ast__id_t( struct _fx_T2R9Ast__id_tLTa2R9Ast__id_t* hd, struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tLTa2R9Ast__id_t, _fx_copy_T2R9Ast__id_tLTa2R9Ast__id_t); } static void _fx_free_R17Ast__defvariant_t(struct _fx_R17Ast__defvariant_t* dst) { fx_free_list_simple(&dst->dvar_templ_args); _fx_free_N10Ast__typ_t(&dst->dvar_alias); _fx_free_LT2R9Ast__id_tN10Ast__typ_t(&dst->dvar_cases); fx_free_list_simple(&dst->dvar_ctors); _fx_free_rLR9Ast__id_t(&dst->dvar_templ_inst); _fx_free_LT2R9Ast__id_tLTa2R9Ast__id_t(&dst->dvar_ifaces); fx_free_list_simple(&dst->dvar_scope); } static void _fx_copy_R17Ast__defvariant_t(struct _fx_R17Ast__defvariant_t* src, struct _fx_R17Ast__defvariant_t* dst) { dst->dvar_name = src->dvar_name; FX_COPY_PTR(src->dvar_templ_args, &dst->dvar_templ_args); FX_COPY_PTR(src->dvar_alias, &dst->dvar_alias); dst->dvar_flags = src->dvar_flags; FX_COPY_PTR(src->dvar_cases, &dst->dvar_cases); FX_COPY_PTR(src->dvar_ctors, &dst->dvar_ctors); FX_COPY_PTR(src->dvar_templ_inst, &dst->dvar_templ_inst); FX_COPY_PTR(src->dvar_ifaces, &dst->dvar_ifaces); FX_COPY_PTR(src->dvar_scope, &dst->dvar_scope); dst->dvar_loc = src->dvar_loc; } static void _fx_make_R17Ast__defvariant_t( struct _fx_R9Ast__id_t* r_dvar_name, struct _fx_LR9Ast__id_t_data_t* r_dvar_templ_args, struct _fx_N10Ast__typ_t_data_t* r_dvar_alias, struct _fx_R16Ast__var_flags_t* r_dvar_flags, struct _fx_LT2R9Ast__id_tN10Ast__typ_t_data_t* r_dvar_cases, struct _fx_LR9Ast__id_t_data_t* r_dvar_ctors, struct _fx_rLR9Ast__id_t_data_t* r_dvar_templ_inst, struct _fx_LT2R9Ast__id_tLTa2R9Ast__id_t_data_t* r_dvar_ifaces, struct _fx_LN12Ast__scope_t_data_t* r_dvar_scope, struct _fx_R10Ast__loc_t* r_dvar_loc, struct _fx_R17Ast__defvariant_t* fx_result) { fx_result->dvar_name = *r_dvar_name; FX_COPY_PTR(r_dvar_templ_args, &fx_result->dvar_templ_args); FX_COPY_PTR(r_dvar_alias, &fx_result->dvar_alias); fx_result->dvar_flags = *r_dvar_flags; FX_COPY_PTR(r_dvar_cases, &fx_result->dvar_cases); FX_COPY_PTR(r_dvar_ctors, &fx_result->dvar_ctors); FX_COPY_PTR(r_dvar_templ_inst, &fx_result->dvar_templ_inst); FX_COPY_PTR(r_dvar_ifaces, &fx_result->dvar_ifaces); FX_COPY_PTR(r_dvar_scope, &fx_result->dvar_scope); fx_result->dvar_loc = *r_dvar_loc; } static void _fx_free_rR17Ast__defvariant_t(struct _fx_rR17Ast__defvariant_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR17Ast__defvariant_t, _fx_free_R17Ast__defvariant_t); } static int _fx_make_rR17Ast__defvariant_t( struct _fx_R17Ast__defvariant_t* arg, struct _fx_rR17Ast__defvariant_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR17Ast__defvariant_t, _fx_copy_R17Ast__defvariant_t); } static void _fx_free_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* dst) { _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* src, struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_R9Ast__id_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_R16Ast__fun_flags_t* t2, struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t, _fx_free_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t); } static int _fx_cons_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t( struct _fx_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t* hd, struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* tl, bool addref_tl, struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t, _fx_copy_T3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t); } static void _fx_free_R19Ast__definterface_t(struct _fx_R19Ast__definterface_t* dst) { _fx_free_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(&dst->di_new_methods); _fx_free_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t(&dst->di_all_methods); fx_free_list_simple(&dst->di_scope); } static void _fx_copy_R19Ast__definterface_t(struct _fx_R19Ast__definterface_t* src, struct _fx_R19Ast__definterface_t* dst) { dst->di_name = src->di_name; dst->di_base = src->di_base; FX_COPY_PTR(src->di_new_methods, &dst->di_new_methods); FX_COPY_PTR(src->di_all_methods, &dst->di_all_methods); FX_COPY_PTR(src->di_scope, &dst->di_scope); dst->di_loc = src->di_loc; } static void _fx_make_R19Ast__definterface_t( struct _fx_R9Ast__id_t* r_di_name, struct _fx_R9Ast__id_t* r_di_base, struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* r_di_new_methods, struct _fx_LT3R9Ast__id_tN10Ast__typ_tR16Ast__fun_flags_t_data_t* r_di_all_methods, struct _fx_LN12Ast__scope_t_data_t* r_di_scope, struct _fx_R10Ast__loc_t* r_di_loc, struct _fx_R19Ast__definterface_t* fx_result) { fx_result->di_name = *r_di_name; fx_result->di_base = *r_di_base; FX_COPY_PTR(r_di_new_methods, &fx_result->di_new_methods); FX_COPY_PTR(r_di_all_methods, &fx_result->di_all_methods); FX_COPY_PTR(r_di_scope, &fx_result->di_scope); fx_result->di_loc = *r_di_loc; } static void _fx_free_rR19Ast__definterface_t(struct _fx_rR19Ast__definterface_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rR19Ast__definterface_t, _fx_free_R19Ast__definterface_t); } static int _fx_make_rR19Ast__definterface_t( struct _fx_R19Ast__definterface_t* arg, struct _fx_rR19Ast__definterface_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rR19Ast__definterface_t, _fx_copy_R19Ast__definterface_t); } static void _fx_free_N14Ast__id_info_t(struct _fx_N14Ast__id_info_t* dst) { switch (dst->tag) { case 2: _fx_free_R13Ast__defval_t(&dst->u.IdDVal); break; case 3: _fx_free_rR13Ast__deffun_t(&dst->u.IdFun); break; case 4: _fx_free_rR13Ast__defexn_t(&dst->u.IdExn); break; case 5: _fx_free_rR13Ast__deftyp_t(&dst->u.IdTyp); break; case 6: _fx_free_rR17Ast__defvariant_t(&dst->u.IdVariant); break; case 7: _fx_free_rR19Ast__definterface_t(&dst->u.IdInterface); break; default: ; } dst->tag = 0; } static void _fx_copy_N14Ast__id_info_t(struct _fx_N14Ast__id_info_t* src, struct _fx_N14Ast__id_info_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: _fx_copy_R13Ast__defval_t(&src->u.IdDVal, &dst->u.IdDVal); break; case 3: FX_COPY_PTR(src->u.IdFun, &dst->u.IdFun); break; case 4: FX_COPY_PTR(src->u.IdExn, &dst->u.IdExn); break; case 5: FX_COPY_PTR(src->u.IdTyp, &dst->u.IdTyp); break; case 6: FX_COPY_PTR(src->u.IdVariant, &dst->u.IdVariant); break; case 7: FX_COPY_PTR(src->u.IdInterface, &dst->u.IdInterface); break; default: dst->u = src->u; } } static void _fx_free_T3iA1N14Ast__id_info_tN14Ast__id_info_t(struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* dst) { fx_free_arr(&dst->t1); _fx_free_N14Ast__id_info_t(&dst->t2); } static void _fx_copy_T3iA1N14Ast__id_info_tN14Ast__id_info_t( struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* src, struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* dst) { dst->t0 = src->t0; fx_copy_arr(&src->t1, &dst->t1); _fx_copy_N14Ast__id_info_t(&src->t2, &dst->t2); } static void _fx_make_T3iA1N14Ast__id_info_tN14Ast__id_info_t( int_ t0, fx_arr_t* t1, struct _fx_N14Ast__id_info_t* t2, struct _fx_T3iA1N14Ast__id_info_tN14Ast__id_info_t* fx_result) { fx_result->t0 = t0; fx_copy_arr(t1, &fx_result->t1); _fx_copy_N14Ast__id_info_t(t2, &fx_result->t2); } static void _fx_free_Nt9Dynvec__t1N14Ast__id_info_t(struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_T3iA1N14Ast__id_info_tN14Ast__id_info_t(&(*dst)->u.t); fx_free(*dst); } *dst = 0; } static void _fx_free_LN16Ast__env_entry_t(struct _fx_LN16Ast__env_entry_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN16Ast__env_entry_t, _fx_free_N16Ast__env_entry_t); } static int _fx_cons_LN16Ast__env_entry_t( struct _fx_N16Ast__env_entry_t_data_t* hd, struct _fx_LN16Ast__env_entry_t_data_t* tl, bool addref_tl, struct _fx_LN16Ast__env_entry_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN16Ast__env_entry_t, FX_COPY_PTR); } static void _fx_free_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t* dst) { _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(&dst->t1); _fx_free_LN16Ast__env_entry_t(&dst->t3); _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t(&dst->t4); } static void _fx_copy_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t* src, struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; FX_COPY_PTR(src->t3, &dst->t3); FX_COPY_PTR(src->t4, &dst->t4); } static void _fx_make_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_N12Map__color_t* t0, struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t1, struct _fx_R9Ast__id_t* t2, struct _fx_LN16Ast__env_entry_t_data_t* t3, struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t* t4, struct _fx_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; FX_COPY_PTR(t3, &fx_result->t3); FX_COPY_PTR(t4, &fx_result->t4); } static void _fx_free_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( struct _fx_Nt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_T5N12Map__color_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_tR9Ast__id_tLN16Ast__env_entry_tNt11Map__tree_t2R9Ast__id_tLN16Ast__env_entry_t( &(*dst)->u.Node); fx_free(*dst); } *dst = 0; } static void _fx_free_T2R10Ast__loc_tS(struct _fx_T2R10Ast__loc_tS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2R10Ast__loc_tS(struct _fx_T2R10Ast__loc_tS* src, struct _fx_T2R10Ast__loc_tS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2R10Ast__loc_tS(struct _fx_R10Ast__loc_t* t0, fx_str_t* t1, struct _fx_T2R10Ast__loc_tS* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); } static void _fx_free_N10Ast__lit_t(struct _fx_N10Ast__lit_t* dst) { switch (dst->tag) { case 5: fx_free_str(&dst->u.LitString); break; default: ; } dst->tag = 0; } static void _fx_copy_N10Ast__lit_t(struct _fx_N10Ast__lit_t* src, struct _fx_N10Ast__lit_t* dst) { dst->tag = src->tag; switch (src->tag) { case 5: fx_copy_str(&src->u.LitString, &dst->u.LitString); break; default: dst->u = src->u; } } static void _fx_free_rNt6option1N10Ast__typ_t(struct _fx_rNt6option1N10Ast__typ_t_data_t** dst) { FX_FREE_REF_IMPL(_fx_rNt6option1N10Ast__typ_t, _fx_free_Nt6option1N10Ast__typ_t); } static int _fx_make_rNt6option1N10Ast__typ_t( struct _fx_Nt6option1N10Ast__typ_t_data_t* arg, struct _fx_rNt6option1N10Ast__typ_t_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rNt6option1N10Ast__typ_t, FX_COPY_PTR); } static void _fx_free_LN10Ast__typ_t(struct _fx_LN10Ast__typ_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN10Ast__typ_t, _fx_free_N10Ast__typ_t); } static int _fx_cons_LN10Ast__typ_t( struct _fx_N10Ast__typ_t_data_t* hd, struct _fx_LN10Ast__typ_t_data_t* tl, bool addref_tl, struct _fx_LN10Ast__typ_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN10Ast__typ_t, FX_COPY_PTR); } static void _fx_free_T2LN10Ast__typ_tN10Ast__typ_t(struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* dst) { _fx_free_LN10Ast__typ_t(&dst->t0); _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T2LN10Ast__typ_tN10Ast__typ_t( struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* src, struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN10Ast__typ_tN10Ast__typ_t( struct _fx_LN10Ast__typ_t_data_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2LN10Ast__typ_tN10Ast__typ_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN10Ast__typ_t(struct _fx_T2iN10Ast__typ_t* dst) { _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T2iN10Ast__typ_t(struct _fx_T2iN10Ast__typ_t* src, struct _fx_T2iN10Ast__typ_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN10Ast__typ_t(int_ t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2iN10Ast__typ_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* dst) { _fx_free_R16Ast__val_flags_t(&dst->t0); _fx_free_N10Ast__typ_t(&dst->t2); _fx_free_N10Ast__exp_t(&dst->t3); } static void _fx_copy_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* src, struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* dst) { _fx_copy_R16Ast__val_flags_t(&src->t0, &dst->t0); dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); FX_COPY_PTR(src->t3, &dst->t3); } static void _fx_make_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_R16Ast__val_flags_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_N10Ast__typ_t_data_t* t2, struct _fx_N10Ast__exp_t_data_t* t3, struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* fx_result) { _fx_copy_R16Ast__val_flags_t(t0, &fx_result->t0); fx_result->t1 = *t1; FX_COPY_PTR(t2, &fx_result->t2); FX_COPY_PTR(t3, &fx_result->t3); } static void _fx_free_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t, _fx_free_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t); } static int _fx_cons_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t( struct _fx_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t* hd, struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t, _fx_copy_T4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t); } static void _fx_free_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* dst) { _fx_free_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t(&dst->t0); } static void _fx_copy_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* src, struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_t_data_t* t0, bool t1, struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t** dst) { FX_FREE_REF_IMPL(_fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB, _fx_free_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB); } static int _fx_make_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB( struct _fx_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB* arg, struct _fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB, _fx_copy_T2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB); } static void _fx_free_T2LN10Ast__typ_tR9Ast__id_t(struct _fx_T2LN10Ast__typ_tR9Ast__id_t* dst) { _fx_free_LN10Ast__typ_t(&dst->t0); } static void _fx_copy_T2LN10Ast__typ_tR9Ast__id_t( struct _fx_T2LN10Ast__typ_tR9Ast__id_t* src, struct _fx_T2LN10Ast__typ_tR9Ast__id_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN10Ast__typ_tR9Ast__id_t( struct _fx_LN10Ast__typ_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_T2LN10Ast__typ_tR9Ast__id_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_N10Ast__typ_t(struct _fx_N10Ast__typ_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 1: _fx_free_rNt6option1N10Ast__typ_t(&(*dst)->u.TypVar); break; case 2: _fx_free_Nt6option1N10Ast__typ_t(&(*dst)->u.TypVarTuple); break; case 3: _fx_free_N10Ast__typ_t(&(*dst)->u.TypVarArray); break; case 13: _fx_free_T2LN10Ast__typ_tN10Ast__typ_t(&(*dst)->u.TypFun); break; case 14: _fx_free_N10Ast__typ_t(&(*dst)->u.TypList); break; case 15: _fx_free_N10Ast__typ_t(&(*dst)->u.TypVector); break; case 16: _fx_free_LN10Ast__typ_t(&(*dst)->u.TypTuple); break; case 17: _fx_free_N10Ast__typ_t(&(*dst)->u.TypRef); break; case 18: _fx_free_T2iN10Ast__typ_t(&(*dst)->u.TypArray); break; case 19: _fx_free_rT2LT4R16Ast__val_flags_tR9Ast__id_tN10Ast__typ_tN10Ast__exp_tB(&(*dst)->u.TypRecord); break; case 23: _fx_free_T2LN10Ast__typ_tR9Ast__id_t(&(*dst)->u.TypApp); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_N13Ast__binary_t(struct _fx_N13Ast__binary_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 27: _fx_free_N13Ast__binary_t(&(*dst)->u.OpAugBinary); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2Nt6option1N10Ast__exp_tR10Ast__loc_t(struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N10Ast__exp_t(&dst->t0); } static void _fx_copy_T2Nt6option1N10Ast__exp_tR10Ast__loc_t( struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* src, struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2Nt6option1N10Ast__exp_tR10Ast__loc_t( struct _fx_Nt6option1N10Ast__exp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2Nt6option1N10Ast__exp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__typ_t(&dst->t0); } static void _fx_copy_T2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__typ_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N10Ast__exp_t(&dst->t0); _fx_free_Nt6option1N10Ast__exp_t(&dst->t1); _fx_free_Nt6option1N10Ast__exp_t(&dst->t2); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_Nt6option1N10Ast__exp_t_data_t* t0, struct _fx_Nt6option1N10Ast__exp_t_data_t* t1, struct _fx_Nt6option1N10Ast__exp_t_data_t* t2, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3, struct _fx_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__lit_t(&dst->t0); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_copy_N10Ast__lit_t(&src->t0, &dst->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__lit_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { _fx_copy_N10Ast__lit_t(t0, &fx_result->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N13Ast__binary_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_N10Ast__exp_t(&dst->t2); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N13Ast__binary_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_N10Ast__exp_t_data_t* t2, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3, struct _fx_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N12Ast__unary_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_LN10Ast__exp_t(struct _fx_LN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN10Ast__exp_t, _fx_free_N10Ast__exp_t); } static int _fx_cons_LN10Ast__exp_t( struct _fx_N10Ast__exp_t_data_t* hd, struct _fx_LN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN10Ast__exp_t, FX_COPY_PTR); } static void _fx_free_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_LN10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N13Ast__intrin_t* t0, struct _fx_LN10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2R9Ast__id_tN10Ast__exp_t(struct _fx_T2R9Ast__id_tN10Ast__exp_t* dst) { _fx_free_N10Ast__exp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN10Ast__exp_t( struct _fx_T2R9Ast__id_tN10Ast__exp_t* src, struct _fx_T2R9Ast__id_tN10Ast__exp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN10Ast__exp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_T2R9Ast__id_tN10Ast__exp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_LN10Ast__exp_t(&dst->t0); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_LN10Ast__exp_t_data_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_LLN10Ast__exp_t(struct _fx_LLN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LLN10Ast__exp_t, _fx_free_LN10Ast__exp_t); } static int _fx_cons_LLN10Ast__exp_t( struct _fx_LN10Ast__exp_t_data_t* hd, struct _fx_LLN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LLN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LLN10Ast__exp_t, FX_COPY_PTR); } static void _fx_free_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_LLN10Ast__exp_t(&dst->t0); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_LLN10Ast__exp_t_data_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN10Ast__exp_t(struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__exp_t, _fx_free_T2R9Ast__id_tN10Ast__exp_t); } static int _fx_cons_LT2R9Ast__id_tN10Ast__exp_t( struct _fx_T2R9Ast__id_tN10Ast__exp_t* hd, struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__exp_t, _fx_copy_T2R9Ast__id_tN10Ast__exp_t); } static void _fx_free_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_LT2R9Ast__id_tN10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_LT2R9Ast__id_tN10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_LN10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_LN10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_LN10Ast__exp_t(&dst->t3); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t4); } static void _fx_copy_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; dst->t2 = src->t2; FX_COPY_PTR(src->t3, &dst->t3); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t4, &dst->t4); } static void _fx_make_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N13Ast__border_t* t1, struct _fx_N18Ast__interpolate_t* t2, struct _fx_LN10Ast__exp_t_data_t* t3, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t4, struct _fx_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; fx_result->t2 = *t2; FX_COPY_PTR(t3, &fx_result->t3); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t4, &fx_result->t4); } static void _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); } static void _fx_copy_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2N10Ast__exp_tR10Ast__loc_t(struct _fx_T2N10Ast__exp_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); } static void _fx_copy_T2N10Ast__exp_tR10Ast__loc_t( struct _fx_T2N10Ast__exp_tR10Ast__loc_t* src, struct _fx_T2N10Ast__exp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N10Ast__exp_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N10Ast__exp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_N10Ast__exp_t(&dst->t2); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_N10Ast__exp_t_data_t* t2, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3, struct _fx_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T2N10Ast__pat_tN10Ast__exp_t(struct _fx_T2N10Ast__pat_tN10Ast__exp_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); } static void _fx_copy_T2N10Ast__pat_tN10Ast__exp_t( struct _fx_T2N10Ast__pat_tN10Ast__exp_t* src, struct _fx_T2N10Ast__pat_tN10Ast__exp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2N10Ast__pat_tN10Ast__exp_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__pat_tN10Ast__exp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2N10Ast__pat_tN10Ast__exp_t, _fx_free_T2N10Ast__pat_tN10Ast__exp_t); } static int _fx_cons_LT2N10Ast__pat_tN10Ast__exp_t( struct _fx_T2N10Ast__pat_tN10Ast__exp_t* hd, struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* tl, bool addref_tl, struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2N10Ast__pat_tN10Ast__exp_t, _fx_copy_T2N10Ast__pat_tN10Ast__exp_t); } static void _fx_free_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* dst) { _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(&dst->t0); _fx_free_N10Ast__pat_t(&dst->t1); _fx_free_N10Ast__exp_t(&dst->t2); } static void _fx_copy_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* src, struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; dst->t4 = src->t4; } static void _fx_make_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0, struct _fx_N10Ast__pat_t_data_t* t1, struct _fx_N10Ast__exp_t_data_t* t2, struct _fx_R16Ast__for_flags_t* t3, struct _fx_R10Ast__loc_t* t4, struct _fx_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = *t3; fx_result->t4 = *t4; } static void _fx_free_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t(struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* dst) { _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(&dst->t0); _fx_free_N10Ast__pat_t(&dst->t1); } static void _fx_copy_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t( struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* src, struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t( struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t0, struct _fx_N10Ast__pat_t_data_t* t1, struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t( struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t, _fx_free_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t); } static int _fx_cons_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t( struct _fx_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t* hd, struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* tl, bool addref_tl, struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t, _fx_copy_T2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t); } static void _fx_free_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_R16Ast__for_flags_t* t2, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t3, struct _fx_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_LT2N10Ast__pat_tN10Ast__exp_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_LT2N10Ast__pat_tN10Ast__exp_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__exp_t(&dst->t0); _fx_free_N10Ast__typ_t(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__exp_t_data_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2ST2N10Ast__typ_tR10Ast__loc_t(struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* dst) { fx_free_str(&dst->t0); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2ST2N10Ast__typ_tR10Ast__loc_t( struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2ST2N10Ast__typ_tR10Ast__loc_t( fx_str_t* t0, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t1, struct _fx_T2ST2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T3SST2N10Ast__typ_tR10Ast__loc_t(struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); _fx_free_T2N10Ast__typ_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3SST2N10Ast__typ_tR10Ast__loc_t( struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3SST2N10Ast__typ_tR10Ast__loc_t( fx_str_t* t0, fx_str_t* t1, struct _fx_T2N10Ast__typ_tR10Ast__loc_t* t2, struct _fx_T3SST2N10Ast__typ_tR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); _fx_copy_T2N10Ast__typ_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t( struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); _fx_free_R16Ast__val_flags_t(&dst->t2); } static void _fx_copy_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t( struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* src, struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_R16Ast__val_flags_t(&src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_R16Ast__val_flags_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_R16Ast__val_flags_t(t2, &fx_result->t2); fx_result->t3 = *t3; } static int _fx_cons_LT2iR9Ast__id_t( struct _fx_T2iR9Ast__id_t* hd, struct _fx_LT2iR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LT2iR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2iR9Ast__id_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T2LT2iR9Ast__id_tR10Ast__loc_t(struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* dst) { fx_free_list_simple(&dst->t0); } static void _fx_copy_T2LT2iR9Ast__id_tR10Ast__loc_t( struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* src, struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LT2iR9Ast__id_tR10Ast__loc_t( struct _fx_LT2iR9Ast__id_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LT2iR9Ast__id_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T3iLR9Ast__id_tR10Ast__loc_t(struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* dst) { fx_free_list_simple(&dst->t1); } static void _fx_copy_T3iLR9Ast__id_tR10Ast__loc_t( struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* src, struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3iLR9Ast__id_tR10Ast__loc_t( int_ t0, struct _fx_LR9Ast__id_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3iLR9Ast__id_tR10Ast__loc_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2LSR10Ast__loc_t(struct _fx_T2LSR10Ast__loc_t* dst) { _fx_free_LS(&dst->t0); } static void _fx_copy_T2LSR10Ast__loc_t(struct _fx_T2LSR10Ast__loc_t* src, struct _fx_T2LSR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LSR10Ast__loc_t( struct _fx_LS_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LSR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_N10Ast__exp_t(struct _fx_N10Ast__exp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 4: _fx_free_T2Nt6option1N10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpReturn); break; case 5: _fx_free_T4Nt6option1N10Ast__exp_tNt6option1N10Ast__exp_tNt6option1N10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( &(*dst)->u.ExpRange); break; case 6: _fx_free_T2N10Ast__lit_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpLit); break; case 7: _fx_free_T2R9Ast__id_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpIdent); break; case 8: _fx_free_T4N13Ast__binary_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpBinary); break; case 9: _fx_free_T3N12Ast__unary_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpUnary); break; case 10: _fx_free_T3N13Ast__intrin_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpIntrin); break; case 11: _fx_free_T2R9Ast__id_tN10Ast__exp_t(&(*dst)->u.ExpSync); break; case 12: _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpSeq); break; case 13: _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkTuple); break; case 14: _fx_free_T2LLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkArray); break; case 15: _fx_free_T2LN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkVector); break; case 16: _fx_free_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMkRecord); break; case 17: _fx_free_T3N10Ast__exp_tLT2R9Ast__id_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpUpdateRecord); break; case 18: _fx_free_T3N10Ast__exp_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpCall); break; case 19: _fx_free_T5N10Ast__exp_tN13Ast__border_tN18Ast__interpolate_tLN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t( &(*dst)->u.ExpAt); break; case 20: _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpAssign); break; case 21: _fx_free_T3N10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMem); break; case 22: _fx_free_T2N10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpThrow); break; case 23: _fx_free_T4N10Ast__exp_tN10Ast__exp_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpIf); break; case 24: _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpWhile); break; case 25: _fx_free_T3N10Ast__exp_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.ExpDoWhile); break; case 26: _fx_free_T5LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tR10Ast__loc_t(&(*dst)->u.ExpFor); break; case 27: _fx_free_T4LT2LT2N10Ast__pat_tN10Ast__exp_tN10Ast__pat_tN10Ast__exp_tR16Ast__for_flags_tT2N10Ast__typ_tR10Ast__loc_t( &(*dst)->u.ExpMap); break; case 28: _fx_free_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpTryCatch); break; case 29: _fx_free_T3N10Ast__exp_tLT2N10Ast__pat_tN10Ast__exp_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpMatch); break; case 30: _fx_free_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpCast); break; case 31: _fx_free_T3N10Ast__exp_tN10Ast__typ_tT2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpTyped); break; case 32: _fx_free_T2ST2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpCCode); break; case 33: _fx_free_T3SST2N10Ast__typ_tR10Ast__loc_t(&(*dst)->u.ExpData); break; case 34: _fx_free_T4N10Ast__pat_tN10Ast__exp_tR16Ast__val_flags_tR10Ast__loc_t(&(*dst)->u.DefVal); break; case 35: _fx_free_rR13Ast__deffun_t(&(*dst)->u.DefFun); break; case 36: _fx_free_rR13Ast__defexn_t(&(*dst)->u.DefExn); break; case 37: _fx_free_rR13Ast__deftyp_t(&(*dst)->u.DefTyp); break; case 38: _fx_free_rR17Ast__defvariant_t(&(*dst)->u.DefVariant); break; case 39: _fx_free_rR19Ast__definterface_t(&(*dst)->u.DefInterface); break; case 40: _fx_free_T2LT2iR9Ast__id_tR10Ast__loc_t(&(*dst)->u.DirImport); break; case 41: _fx_free_T3iLR9Ast__id_tR10Ast__loc_t(&(*dst)->u.DirImportFrom); break; case 42: _fx_free_T2LSR10Ast__loc_t(&(*dst)->u.DirPragma); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2N10Ast__lit_tR10Ast__loc_t(struct _fx_T2N10Ast__lit_tR10Ast__loc_t* dst) { _fx_free_N10Ast__lit_t(&dst->t0); } static void _fx_copy_T2N10Ast__lit_tR10Ast__loc_t( struct _fx_T2N10Ast__lit_tR10Ast__loc_t* src, struct _fx_T2N10Ast__lit_tR10Ast__loc_t* dst) { _fx_copy_N10Ast__lit_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N10Ast__lit_tR10Ast__loc_t( struct _fx_N10Ast__lit_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N10Ast__lit_tR10Ast__loc_t* fx_result) { _fx_copy_N10Ast__lit_t(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2LN10Ast__pat_tR10Ast__loc_t(struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_LN10Ast__pat_t(&dst->t0); } static void _fx_copy_T2LN10Ast__pat_tR10Ast__loc_t( struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* src, struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN10Ast__pat_tR10Ast__loc_t( struct _fx_LN10Ast__pat_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LN10Ast__pat_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t(struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_LN10Ast__pat_t(&dst->t1); } static void _fx_copy_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_LN10Ast__pat_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2R9Ast__id_tN10Ast__pat_t(struct _fx_T2R9Ast__id_tN10Ast__pat_t* dst) { _fx_free_N10Ast__pat_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN10Ast__pat_t( struct _fx_T2R9Ast__id_tN10Ast__pat_t* src, struct _fx_T2R9Ast__id_tN10Ast__pat_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN10Ast__pat_t( struct _fx_R9Ast__id_t* t0, struct _fx_N10Ast__pat_t_data_t* t1, struct _fx_T2R9Ast__id_tN10Ast__pat_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN10Ast__pat_t(struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__pat_t, _fx_free_T2R9Ast__id_tN10Ast__pat_t); } static int _fx_cons_LT2R9Ast__id_tN10Ast__pat_t( struct _fx_T2R9Ast__id_tN10Ast__pat_t* hd, struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN10Ast__pat_t, _fx_copy_T2R9Ast__id_tN10Ast__pat_t); } static void _fx_free_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t( struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_LT2R9Ast__id_tN10Ast__pat_t(&dst->t1); } static void _fx_copy_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t( struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* src, struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t( struct _fx_Nt6option1R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN10Ast__pat_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__pat_t(&dst->t1); } static void _fx_copy_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t( struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* src, struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__pat_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); } static void _fx_copy_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t( struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* src, struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; dst->t2 = src->t2; } static void _fx_make_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__typ_t(&dst->t1); } static void _fx_copy_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t( struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* src, struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__typ_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t(struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); _fx_free_N10Ast__exp_t(&dst->t1); } static void _fx_copy_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t( struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* src, struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_N10Ast__exp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2N10Ast__pat_tR10Ast__loc_t(struct _fx_T2N10Ast__pat_tR10Ast__loc_t* dst) { _fx_free_N10Ast__pat_t(&dst->t0); } static void _fx_copy_T2N10Ast__pat_tR10Ast__loc_t( struct _fx_T2N10Ast__pat_tR10Ast__loc_t* src, struct _fx_T2N10Ast__pat_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N10Ast__pat_tR10Ast__loc_t( struct _fx_N10Ast__pat_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N10Ast__pat_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_N10Ast__pat_t(struct _fx_N10Ast__pat_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 2: _fx_free_T2N10Ast__lit_tR10Ast__loc_t(&(*dst)->u.PatLit); break; case 4: _fx_free_T2LN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatTuple); break; case 5: _fx_free_T3R9Ast__id_tLN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatVariant); break; case 6: _fx_free_T3Nt6option1R9Ast__id_tLT2R9Ast__id_tN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatRecord); break; case 7: _fx_free_T3N10Ast__pat_tN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatCons); break; case 8: _fx_free_T3N10Ast__pat_tR9Ast__id_tR10Ast__loc_t(&(*dst)->u.PatAs); break; case 9: _fx_free_T3N10Ast__pat_tN10Ast__typ_tR10Ast__loc_t(&(*dst)->u.PatTyped); break; case 10: _fx_free_T3N10Ast__pat_tN10Ast__exp_tR10Ast__loc_t(&(*dst)->u.PatWhen); break; case 11: _fx_free_T2LN10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatAlt); break; case 12: _fx_free_T2N10Ast__pat_tR10Ast__loc_t(&(*dst)->u.PatRef); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_N16Ast__env_entry_t(struct _fx_N16Ast__env_entry_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 2: _fx_free_N10Ast__typ_t(&(*dst)->u.EnvTyp); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2SR10Ast__loc_t(struct _fx_T2SR10Ast__loc_t* dst) { fx_free_str(&dst->t0); } static void _fx_copy_T2SR10Ast__loc_t(struct _fx_T2SR10Ast__loc_t* src, struct _fx_T2SR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2SR10Ast__loc_t(fx_str_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2SR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_LT2SR10Ast__loc_t(struct _fx_LT2SR10Ast__loc_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2SR10Ast__loc_t, _fx_free_T2SR10Ast__loc_t); } static int _fx_cons_LT2SR10Ast__loc_t( struct _fx_T2SR10Ast__loc_t* hd, struct _fx_LT2SR10Ast__loc_t_data_t* tl, bool addref_tl, struct _fx_LT2SR10Ast__loc_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2SR10Ast__loc_t, _fx_copy_T2SR10Ast__loc_t); } static int _fx_cons_Li(int_ hd, struct _fx_Li_data_t* tl, bool addref_tl, struct _fx_Li_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_Li, FX_COPY_SIMPLE); } static void _fx_free_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t( struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* dst) { fx_free_str(&dst->t1); _fx_free_LN10Ast__exp_t(&dst->t4); fx_free_list_simple(&dst->t5); _fx_free_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&dst->t6); _fx_free_Nt9Dynvec__t1N14Ast__id_info_t(&dst->t9); } static void _fx_copy_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t( struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* src, struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); dst->t2 = src->t2; dst->t3 = src->t3; FX_COPY_PTR(src->t4, &dst->t4); FX_COPY_PTR(src->t5, &dst->t5); _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(&src->t6, &dst->t6); dst->t7 = src->t7; dst->t8 = src->t8; FX_COPY_PTR(src->t9, &dst->t9); } static void _fx_make_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t( struct _fx_R9Ast__id_t* t0, fx_str_t* t1, int_ t2, bool t3, struct _fx_LN10Ast__exp_t_data_t* t4, struct _fx_Li_data_t* t5, struct _fx_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t* t6, bool t7, int_ t8, struct _fx_Nt9Dynvec__t1N14Ast__id_info_t_data_t* t9, struct _fx_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t* fx_result) { fx_result->t0 = *t0; fx_copy_str(t1, &fx_result->t1); fx_result->t2 = t2; fx_result->t3 = t3; FX_COPY_PTR(t4, &fx_result->t4); FX_COPY_PTR(t5, &fx_result->t5); _fx_copy_Rt6Map__t2R9Ast__id_tLN16Ast__env_entry_t(t6, &fx_result->t6); fx_result->t7 = t7; fx_result->t8 = t8; FX_COPY_PTR(t9, &fx_result->t9); } static void _fx_free_N16Ast__defmodule_t(struct _fx_N16Ast__defmodule_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { _fx_free_T10R9Ast__id_tSiBLN10Ast__exp_tLiRt6Map__t2R9Ast__id_tLN16Ast__env_entry_tBiNt9Dynvec__t1N14Ast__id_info_t( &(*dst)->u.defmodule_t); fx_free(*dst); } *dst = 0; } static void _fx_free_LE(struct _fx_LE_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LE, fx_free_exn); } static int _fx_cons_LE(fx_exn_t* hd, struct _fx_LE_data_t* tl, bool addref_tl, struct _fx_LE_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LE, fx_copy_exn); } static void _fx_free_T2BS(struct _fx_T2BS* dst) { fx_free_str(&dst->t1); } static void _fx_copy_T2BS(struct _fx_T2BS* src, struct _fx_T2BS* dst) { dst->t0 = src->t0; fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_T2BS(bool t0, fx_str_t* t1, struct _fx_T2BS* fx_result) { fx_result->t0 = t0; fx_copy_str(t1, &fx_result->t1); } static void _fx_free_N14Lexer__token_t(struct _fx_N14Lexer__token_t* dst) { switch (dst->tag) { case 1: _fx_free_N10Ast__lit_t(&dst->u.LITERAL); break; case 2: _fx_free_T2BS(&dst->u.IDENT); break; case 3: fx_free_str(&dst->u.TYVAR); break; case 13: fx_free_str(&dst->u.DATA); break; case 94: _fx_free_N13Ast__binary_t(&dst->u.AUG_BINOP); break; case 100: fx_free_str(&dst->u.RESERVED); break; default: ; } dst->tag = 0; } static void _fx_copy_N14Lexer__token_t(struct _fx_N14Lexer__token_t* src, struct _fx_N14Lexer__token_t* dst) { dst->tag = src->tag; switch (src->tag) { case 1: _fx_copy_N10Ast__lit_t(&src->u.LITERAL, &dst->u.LITERAL); break; case 2: _fx_copy_T2BS(&src->u.IDENT, &dst->u.IDENT); break; case 3: fx_copy_str(&src->u.TYVAR, &dst->u.TYVAR); break; case 13: fx_copy_str(&src->u.DATA, &dst->u.DATA); break; case 94: FX_COPY_PTR(src->u.AUG_BINOP, &dst->u.AUG_BINOP); break; case 100: fx_copy_str(&src->u.RESERVED, &dst->u.RESERVED); break; default: dst->u = src->u; } } static void _fx_free_LN14Lexer__token_t(struct _fx_LN14Lexer__token_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14Lexer__token_t, _fx_free_N14Lexer__token_t); } static int _fx_cons_LN14Lexer__token_t( struct _fx_N14Lexer__token_t* hd, struct _fx_LN14Lexer__token_t_data_t* tl, bool addref_tl, struct _fx_LN14Lexer__token_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14Lexer__token_t, _fx_copy_N14Lexer__token_t); } static void _fx_free_N14K_form__klit_t(struct _fx_N14K_form__klit_t* dst) { switch (dst->tag) { case 5: fx_free_str(&dst->u.KLitString); break; case 8: _fx_free_N14K_form__ktyp_t(&dst->u.KLitNil); break; default: ; } dst->tag = 0; } static void _fx_copy_N14K_form__klit_t(struct _fx_N14K_form__klit_t* src, struct _fx_N14K_form__klit_t* dst) { dst->tag = src->tag; switch (src->tag) { case 5: fx_copy_str(&src->u.KLitString, &dst->u.KLitString); break; case 8: FX_COPY_PTR(src->u.KLitNil, &dst->u.KLitNil); break; default: dst->u = src->u; } } static void _fx_free_N14K_form__atom_t(struct _fx_N14K_form__atom_t* dst) { switch (dst->tag) { case 2: _fx_free_N14K_form__klit_t(&dst->u.AtomLit); break; default: ; } dst->tag = 0; } static void _fx_copy_N14K_form__atom_t(struct _fx_N14K_form__atom_t* src, struct _fx_N14K_form__atom_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: _fx_copy_N14K_form__klit_t(&src->u.AtomLit, &dst->u.AtomLit); break; default: dst->u = src->u; } } static void _fx_free_Nt6option1N14K_form__atom_t(struct _fx_Nt6option1N14K_form__atom_t* dst) { switch (dst->tag) { case 2: _fx_free_N14K_form__atom_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14K_form__atom_t( struct _fx_Nt6option1N14K_form__atom_t* src, struct _fx_Nt6option1N14K_form__atom_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: _fx_copy_N14K_form__atom_t(&src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_LN14K_form__kexp_t(struct _fx_LN14K_form__kexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14K_form__kexp_t, _fx_free_N14K_form__kexp_t); } static int _fx_cons_LN14K_form__kexp_t( struct _fx_N14K_form__kexp_t_data_t* hd, struct _fx_LN14K_form__kexp_t_data_t* tl, bool addref_tl, struct _fx_LN14K_form__kexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14K_form__kexp_t, FX_COPY_PTR); } static void _fx_free_T2BN14K_form__atom_t(struct _fx_T2BN14K_form__atom_t* dst) { _fx_free_N14K_form__atom_t(&dst->t1); } static void _fx_copy_T2BN14K_form__atom_t(struct _fx_T2BN14K_form__atom_t* src, struct _fx_T2BN14K_form__atom_t* dst) { dst->t0 = src->t0; _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); } static void _fx_make_T2BN14K_form__atom_t(bool t0, struct _fx_N14K_form__atom_t* t1, struct _fx_T2BN14K_form__atom_t* fx_result) { fx_result->t0 = t0; _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); } static void _fx_free_LT2BN14K_form__atom_t(struct _fx_LT2BN14K_form__atom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2BN14K_form__atom_t, _fx_free_T2BN14K_form__atom_t); } static int _fx_cons_LT2BN14K_form__atom_t( struct _fx_T2BN14K_form__atom_t* hd, struct _fx_LT2BN14K_form__atom_t_data_t* tl, bool addref_tl, struct _fx_LT2BN14K_form__atom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2BN14K_form__atom_t, _fx_copy_T2BN14K_form__atom_t); } static void _fx_free_T2LN14K_form__ktyp_tN14K_form__ktyp_t(struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* dst) { _fx_free_LN14K_form__ktyp_t(&dst->t0); _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2LN14K_form__ktyp_tN14K_form__ktyp_t( struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* src, struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__ktyp_tN14K_form__ktyp_t( struct _fx_LN14K_form__ktyp_t_data_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2LN14K_form__ktyp_tN14K_form__ktyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* dst) { _fx_free_LT2R9Ast__id_tN14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* src, struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t( struct _fx_R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN14K_form__ktyp_t_data_t* t1, struct _fx_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN14K_form__ktyp_t(struct _fx_T2iN14K_form__ktyp_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T2iN14K_form__ktyp_t(struct _fx_T2iN14K_form__ktyp_t* src, struct _fx_T2iN14K_form__ktyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN14K_form__ktyp_t( int_ t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_T2iN14K_form__ktyp_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_N14K_form__ktyp_t(struct _fx_N14K_form__ktyp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 11: _fx_free_T2LN14K_form__ktyp_tN14K_form__ktyp_t(&(*dst)->u.KTypFun); break; case 12: _fx_free_LN14K_form__ktyp_t(&(*dst)->u.KTypTuple); break; case 13: _fx_free_T2R9Ast__id_tLT2R9Ast__id_tN14K_form__ktyp_t(&(*dst)->u.KTypRecord); break; case 15: _fx_free_T2iN14K_form__ktyp_t(&(*dst)->u.KTypArray); break; case 16: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypVector); break; case 17: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypList); break; case 18: _fx_free_N14K_form__ktyp_t(&(*dst)->u.KTypRef); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_Ta3N14K_form__atom_t(struct _fx_Ta3N14K_form__atom_t* dst) { _fx_free_N14K_form__atom_t(&dst->t0); _fx_free_N14K_form__atom_t(&dst->t1); _fx_free_N14K_form__atom_t(&dst->t2); } static void _fx_copy_Ta3N14K_form__atom_t(struct _fx_Ta3N14K_form__atom_t* src, struct _fx_Ta3N14K_form__atom_t* dst) { _fx_copy_N14K_form__atom_t(&src->t0, &dst->t0); _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); _fx_copy_N14K_form__atom_t(&src->t2, &dst->t2); } static void _fx_make_Ta3N14K_form__atom_t( struct _fx_N14K_form__atom_t* t0, struct _fx_N14K_form__atom_t* t1, struct _fx_N14K_form__atom_t* t2, struct _fx_Ta3N14K_form__atom_t* fx_result) { _fx_copy_N14K_form__atom_t(t0, &fx_result->t0); _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); _fx_copy_N14K_form__atom_t(t2, &fx_result->t2); } static void _fx_free_N13K_form__dom_t(struct _fx_N13K_form__dom_t* dst) { switch (dst->tag) { case 1: _fx_free_N14K_form__atom_t(&dst->u.DomainElem); break; case 2: _fx_free_N14K_form__atom_t(&dst->u.DomainFast); break; case 3: _fx_free_Ta3N14K_form__atom_t(&dst->u.DomainRange); break; default: ; } dst->tag = 0; } static void _fx_copy_N13K_form__dom_t(struct _fx_N13K_form__dom_t* src, struct _fx_N13K_form__dom_t* dst) { dst->tag = src->tag; switch (src->tag) { case 1: _fx_copy_N14K_form__atom_t(&src->u.DomainElem, &dst->u.DomainElem); break; case 2: _fx_copy_N14K_form__atom_t(&src->u.DomainFast, &dst->u.DomainFast); break; case 3: _fx_copy_Ta3N14K_form__atom_t(&src->u.DomainRange, &dst->u.DomainRange); break; default: dst->u = src->u; } } static void _fx_free_T2Nt6option1N14K_form__atom_tR10Ast__loc_t(struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14K_form__atom_t(&dst->t0); } static void _fx_copy_T2Nt6option1N14K_form__atom_tR10Ast__loc_t( struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* src, struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14K_form__atom_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2Nt6option1N14K_form__atom_tR10Ast__loc_t( struct _fx_Nt6option1N14K_form__atom_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2Nt6option1N14K_form__atom_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14K_form__atom_t(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__ktyp_t(&dst->t0); } static void _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__ktyp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__atom_t(&src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__atom_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__atom_t(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N13Ast__binary_t(&dst->t0); _fx_free_N14K_form__atom_t(&dst->t1); _fx_free_N14K_form__atom_t(&dst->t2); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); _fx_copy_N14K_form__atom_t(&src->t2, &dst->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N13Ast__binary_t_data_t* t0, struct _fx_N14K_form__atom_t* t1, struct _fx_N14K_form__atom_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); _fx_copy_N14K_form__atom_t(t2, &fx_result->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N12Ast__unary_t* t0, struct _fx_N14K_form__atom_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_LN14K_form__atom_t(struct _fx_LN14K_form__atom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14K_form__atom_t, _fx_free_N14K_form__atom_t); } static int _fx_cons_LN14K_form__atom_t( struct _fx_N14K_form__atom_t* hd, struct _fx_LN14K_form__atom_t_data_t* tl, bool addref_tl, struct _fx_LN14K_form__atom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14K_form__atom_t, _fx_copy_N14K_form__atom_t); } static void _fx_free_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N13Ast__intrin_t* t0, struct _fx_LN14K_form__atom_t_data_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2R9Ast__id_tN14K_form__kexp_t(struct _fx_T2R9Ast__id_tN14K_form__kexp_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN14K_form__kexp_t( struct _fx_T2R9Ast__id_tN14K_form__kexp_t* src, struct _fx_T2R9Ast__id_tN14K_form__kexp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN14K_form__kexp_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_T2R9Ast__id_tN14K_form__kexp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__kexp_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LN14K_form__kexp_t_data_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); _fx_free_N14K_form__kexp_t(&dst->t2); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__kexp_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_N14K_form__kexp_t_data_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_LN14K_form__atom_t_data_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t2); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, int_ t1, struct _fx_LN14K_form__atom_t_data_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; fx_result->t1 = t1; FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LN14K_form__atom_t_data_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LN14K_form__atom_t(&dst->t2); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_LN14K_form__atom_t_data_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; fx_result->t1 = *t1; FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_LLT2BN14K_form__atom_t(struct _fx_LLT2BN14K_form__atom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LLT2BN14K_form__atom_t, _fx_free_LT2BN14K_form__atom_t); } static int _fx_cons_LLT2BN14K_form__atom_t( struct _fx_LT2BN14K_form__atom_t_data_t* hd, struct _fx_LLT2BN14K_form__atom_t_data_t* tl, bool addref_tl, struct _fx_LLT2BN14K_form__atom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LLT2BN14K_form__atom_t, FX_COPY_PTR); } static void _fx_free_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LLT2BN14K_form__atom_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( bool t0, struct _fx_LLT2BN14K_form__atom_t_data_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LT2BN14K_form__atom_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LT2BN14K_form__atom_t_data_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_LN13K_form__dom_t(struct _fx_LN13K_form__dom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN13K_form__dom_t, _fx_free_N13K_form__dom_t); } static int _fx_cons_LN13K_form__dom_t( struct _fx_N13K_form__dom_t* hd, struct _fx_LN13K_form__dom_t_data_t* tl, bool addref_tl, struct _fx_LN13K_form__dom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN13K_form__dom_t, _fx_copy_N13K_form__dom_t); } static void _fx_free_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t0); _fx_free_LN13K_form__dom_t(&dst->t3); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t4); } static void _fx_copy_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__atom_t(&src->t0, &dst->t0); dst->t1 = src->t1; dst->t2 = src->t2; FX_COPY_PTR(src->t3, &dst->t3); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t4, &dst->t4); } static void _fx_make_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__atom_t* t0, struct _fx_N13Ast__border_t* t1, struct _fx_N18Ast__interpolate_t* t2, struct _fx_LN13K_form__dom_t_data_t* t3, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t4, struct _fx_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__atom_t(t0, &fx_result->t0); fx_result->t1 = *t1; fx_result->t2 = *t2; FX_COPY_PTR(t3, &fx_result->t3); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t4, &fx_result->t4); } static void _fx_free_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; dst->t1 = src->t1; _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, int_ t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; fx_result->t1 = t1; _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t(struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t1); } static void _fx_copy_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_N14K_form__atom_t(&src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__atom_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_N14K_form__atom_t(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2LN14K_form__kexp_tN14K_form__kexp_t(struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* dst) { _fx_free_LN14K_form__kexp_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); } static void _fx_copy_T2LN14K_form__kexp_tN14K_form__kexp_t( struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* src, struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14K_form__kexp_tN14K_form__kexp_t( struct _fx_LN14K_form__kexp_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2LN14K_form__kexp_tN14K_form__kexp_t(struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2LN14K_form__kexp_tN14K_form__kexp_t, _fx_free_T2LN14K_form__kexp_tN14K_form__kexp_t); } static int _fx_cons_LT2LN14K_form__kexp_tN14K_form__kexp_t( struct _fx_T2LN14K_form__kexp_tN14K_form__kexp_t* hd, struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* tl, bool addref_tl, struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2LN14K_form__kexp_tN14K_form__kexp_t, _fx_copy_T2LN14K_form__kexp_tN14K_form__kexp_t); } static void _fx_free_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LT2LN14K_form__kexp_tN14K_form__kexp_t(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LT2LN14K_form__kexp_tN14K_form__kexp_t_data_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__kexp_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t2, struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__atom_t(&dst->t0); _fx_free_N14K_form__ktyp_t(&dst->t1); } static void _fx_copy_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t( struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__atom_t(&src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t( struct _fx_N14K_form__atom_t* t0, struct _fx_N14K_form__ktyp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__atom_t(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2R9Ast__id_tN13K_form__dom_t(struct _fx_T2R9Ast__id_tN13K_form__dom_t* dst) { _fx_free_N13K_form__dom_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN13K_form__dom_t( struct _fx_T2R9Ast__id_tN13K_form__dom_t* src, struct _fx_T2R9Ast__id_tN13K_form__dom_t* dst) { dst->t0 = src->t0; _fx_copy_N13K_form__dom_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN13K_form__dom_t( struct _fx_R9Ast__id_t* t0, struct _fx_N13K_form__dom_t* t1, struct _fx_T2R9Ast__id_tN13K_form__dom_t* fx_result) { fx_result->t0 = *t0; _fx_copy_N13K_form__dom_t(t1, &fx_result->t1); } static void _fx_free_LT2R9Ast__id_tN13K_form__dom_t(struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2R9Ast__id_tN13K_form__dom_t, _fx_free_T2R9Ast__id_tN13K_form__dom_t); } static int _fx_cons_LT2R9Ast__id_tN13K_form__dom_t( struct _fx_T2R9Ast__id_tN13K_form__dom_t* hd, struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* tl, bool addref_tl, struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2R9Ast__id_tN13K_form__dom_t, _fx_copy_T2R9Ast__id_tN13K_form__dom_t); } static void _fx_free_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t0); _fx_free_LT2R9Ast__id_tN13K_form__dom_t(&dst->t1); fx_free_list_simple(&dst->t2); } static void _fx_copy_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* src, struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); } static void _fx_make_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_N14K_form__kexp_t_data_t* t0, struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t1, struct _fx_LR9Ast__id_t_data_t* t2, struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); } static void _fx_free_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t, _fx_free_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t); } static int _fx_cons_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t( struct _fx_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t* hd, struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* tl, bool addref_tl, struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t, _fx_copy_T3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t); } static void _fx_free_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { _fx_free_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_R16Ast__for_flags_t* t2, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t3, struct _fx_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* dst) { _fx_free_LT2R9Ast__id_tN13K_form__dom_t(&dst->t0); fx_free_list_simple(&dst->t1); _fx_free_N14K_form__kexp_t(&dst->t2); } static void _fx_copy_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* src, struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; dst->t4 = src->t4; } static void _fx_make_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t( struct _fx_LT2R9Ast__id_tN13K_form__dom_t_data_t* t0, struct _fx_LR9Ast__id_t_data_t* t1, struct _fx_N14K_form__kexp_t_data_t* t2, struct _fx_R16Ast__for_flags_t* t3, struct _fx_R10Ast__loc_t* t4, struct _fx_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = *t3; fx_result->t4 = *t4; } static void _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t0); _fx_free_N14K_form__kexp_t(&dst->t1); } static void _fx_copy_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* src, struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_N14K_form__kexp_t_data_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2ST2N14K_form__ktyp_tR10Ast__loc_t(struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* dst) { fx_free_str(&dst->t0); _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2ST2N14K_form__ktyp_tR10Ast__loc_t( struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* src, struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* dst) { fx_copy_str(&src->t0, &dst->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2ST2N14K_form__ktyp_tR10Ast__loc_t( fx_str_t* t0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* t1, struct _fx_T2ST2N14K_form__ktyp_tR10Ast__loc_t* fx_result) { fx_copy_str(t0, &fx_result->t0); _fx_copy_T2N14K_form__ktyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t(struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__kexp_t(&dst->t1); } static void _fx_copy_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* src, struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_N14K_form__kexp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_N14K_form__kexp_t(struct _fx_N14K_form__kexp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 4: _fx_free_T2Nt6option1N14K_form__atom_tR10Ast__loc_t(&(*dst)->u.KExpReturn); break; case 5: _fx_free_T2N14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpAtom); break; case 6: _fx_free_T4N13Ast__binary_tN14K_form__atom_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpBinary); break; case 7: _fx_free_T3N12Ast__unary_tN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpUnary); break; case 8: _fx_free_T3N13Ast__intrin_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpIntrin); break; case 9: _fx_free_T2R9Ast__id_tN14K_form__kexp_t(&(*dst)->u.KExpSync); break; case 10: _fx_free_T2LN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpSeq); break; case 11: _fx_free_T4N14K_form__kexp_tN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpIf); break; case 12: _fx_free_T3R9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpCall); break; case 13: _fx_free_T4R9Ast__id_tiLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpICall); break; case 14: _fx_free_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkTuple); break; case 15: _fx_free_T2LN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkRecord); break; case 16: _fx_free_T4R9Ast__id_tR9Ast__id_tLN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkClosure); break; case 17: _fx_free_T3BLLT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkArray); break; case 18: _fx_free_T2LT2BN14K_form__atom_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMkVector); break; case 19: _fx_free_T5N14K_form__atom_tN13Ast__border_tN18Ast__interpolate_tLN13K_form__dom_tT2N14K_form__ktyp_tR10Ast__loc_t( &(*dst)->u.KExpAt); break; case 20: _fx_free_T3R9Ast__id_tiT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMem); break; case 21: _fx_free_T3R9Ast__id_tN14K_form__atom_tR10Ast__loc_t(&(*dst)->u.KExpAssign); break; case 22: _fx_free_T2LT2LN14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpMatch); break; case 23: _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tT2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpTryCatch); break; case 25: _fx_free_T3N14K_form__atom_tN14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpCast); break; case 26: _fx_free_T4LT3N14K_form__kexp_tLT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tT2N14K_form__ktyp_tR10Ast__loc_t( &(*dst)->u.KExpMap); break; case 27: _fx_free_T5LT2R9Ast__id_tN13K_form__dom_tLR9Ast__id_tN14K_form__kexp_tR16Ast__for_flags_tR10Ast__loc_t( &(*dst)->u.KExpFor); break; case 28: _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t(&(*dst)->u.KExpWhile); break; case 29: _fx_free_T3N14K_form__kexp_tN14K_form__kexp_tR10Ast__loc_t(&(*dst)->u.KExpDoWhile); break; case 30: _fx_free_T2ST2N14K_form__ktyp_tR10Ast__loc_t(&(*dst)->u.KExpCCode); break; case 31: _fx_free_T3R9Ast__id_tN14K_form__kexp_tR10Ast__loc_t(&(*dst)->u.KDefVal); break; case 32: _fx_free_rR17K_form__kdeffun_t(&(*dst)->u.KDefFun); break; case 33: _fx_free_rR17K_form__kdefexn_t(&(*dst)->u.KDefExn); break; case 34: _fx_free_rR21K_form__kdefvariant_t(&(*dst)->u.KDefVariant); break; case 35: _fx_free_rR23K_form__kdefinterface_t(&(*dst)->u.KDefInterface); break; case 36: _fx_free_rR17K_form__kdeftyp_t(&(*dst)->u.KDefTyp); break; case 37: _fx_free_rR25K_form__kdefclosurevars_t(&(*dst)->u.KDefClosureVars); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_R14Ast__pragmas_t(struct _fx_R14Ast__pragmas_t* dst) { _fx_free_LT2SR10Ast__loc_t(&dst->pragma_clibs); } static void _fx_copy_R14Ast__pragmas_t(struct _fx_R14Ast__pragmas_t* src, struct _fx_R14Ast__pragmas_t* dst) { dst->pragma_cpp = src->pragma_cpp; FX_COPY_PTR(src->pragma_clibs, &dst->pragma_clibs); } static void _fx_make_R14Ast__pragmas_t( bool r_pragma_cpp, struct _fx_LT2SR10Ast__loc_t_data_t* r_pragma_clibs, struct _fx_R14Ast__pragmas_t* fx_result) { fx_result->pragma_cpp = r_pragma_cpp; FX_COPY_PTR(r_pragma_clibs, &fx_result->pragma_clibs); } static void _fx_free_R17K_form__kmodule_t(struct _fx_R17K_form__kmodule_t* dst) { fx_free_str(&dst->km_cname); _fx_free_LN14K_form__kexp_t(&dst->km_top); fx_free_list_simple(&dst->km_deps); _fx_free_R14Ast__pragmas_t(&dst->km_pragmas); } static void _fx_copy_R17K_form__kmodule_t(struct _fx_R17K_form__kmodule_t* src, struct _fx_R17K_form__kmodule_t* dst) { dst->km_name = src->km_name; dst->km_idx = src->km_idx; dst->km_toposort_idx = src->km_toposort_idx; fx_copy_str(&src->km_cname, &dst->km_cname); FX_COPY_PTR(src->km_top, &dst->km_top); FX_COPY_PTR(src->km_deps, &dst->km_deps); dst->km_skip = src->km_skip; dst->km_main = src->km_main; _fx_copy_R14Ast__pragmas_t(&src->km_pragmas, &dst->km_pragmas); } static void _fx_make_R17K_form__kmodule_t( struct _fx_R9Ast__id_t* r_km_name, int_ r_km_idx, int_ r_km_toposort_idx, fx_str_t* r_km_cname, struct _fx_LN14K_form__kexp_t_data_t* r_km_top, struct _fx_Li_data_t* r_km_deps, bool r_km_skip, bool r_km_main, struct _fx_R14Ast__pragmas_t* r_km_pragmas, struct _fx_R17K_form__kmodule_t* fx_result) { fx_result->km_name = *r_km_name; fx_result->km_idx = r_km_idx; fx_result->km_toposort_idx = r_km_toposort_idx; fx_copy_str(r_km_cname, &fx_result->km_cname); FX_COPY_PTR(r_km_top, &fx_result->km_top); FX_COPY_PTR(r_km_deps, &fx_result->km_deps); fx_result->km_skip = r_km_skip; fx_result->km_main = r_km_main; _fx_copy_R14Ast__pragmas_t(r_km_pragmas, &fx_result->km_pragmas); } static void _fx_free_LR17K_form__kmodule_t(struct _fx_LR17K_form__kmodule_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LR17K_form__kmodule_t, _fx_free_R17K_form__kmodule_t); } static int _fx_cons_LR17K_form__kmodule_t( struct _fx_R17K_form__kmodule_t* hd, struct _fx_LR17K_form__kmodule_t_data_t* tl, bool addref_tl, struct _fx_LR17K_form__kmodule_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LR17K_form__kmodule_t, _fx_copy_R17K_form__kmodule_t); } static void _fx_free_Nt6option1N14C_form__ctyp_t(struct _fx_Nt6option1N14C_form__ctyp_t* dst) { switch (dst->tag) { case 2: _fx_free_N14C_form__ctyp_t(&dst->u.Some); break; default: ; } dst->tag = 0; } static void _fx_copy_Nt6option1N14C_form__ctyp_t( struct _fx_Nt6option1N14C_form__ctyp_t* src, struct _fx_Nt6option1N14C_form__ctyp_t* dst) { dst->tag = src->tag; switch (src->tag) { case 2: FX_COPY_PTR(src->u.Some, &dst->u.Some); break; default: dst->u = src->u; } } static void _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* dst) { _fx_free_LT2R9Ast__id_tN14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* src, struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t( struct _fx_Nt6option1R9Ast__id_t* t0, struct _fx_LT2R9Ast__id_tN14C_form__ctyp_t_data_t* t1, struct _fx_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LN14C_form__ctyp_t(struct _fx_LN14C_form__ctyp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14C_form__ctyp_t, _fx_free_N14C_form__ctyp_t); } static int _fx_cons_LN14C_form__ctyp_t( struct _fx_N14C_form__ctyp_t_data_t* hd, struct _fx_LN14C_form__ctyp_t_data_t* tl, bool addref_tl, struct _fx_LN14C_form__ctyp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14C_form__ctyp_t, FX_COPY_PTR); } static void _fx_free_T2LN14C_form__ctyp_tN14C_form__ctyp_t(struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* dst) { _fx_free_LN14C_form__ctyp_t(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2LN14C_form__ctyp_tN14C_form__ctyp_t( struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* src, struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__ctyp_tN14C_form__ctyp_t( struct _fx_LN14C_form__ctyp_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2LN14C_form__ctyp_tN14C_form__ctyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static int _fx_cons_LN19C_form__ctyp_attr_t( struct _fx_N19C_form__ctyp_attr_t* hd, struct _fx_LN19C_form__ctyp_attr_t_data_t* tl, bool addref_tl, struct _fx_LN19C_form__ctyp_attr_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN19C_form__ctyp_attr_t, FX_COPY_SIMPLE_BY_PTR); } static void _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* dst) { fx_free_list_simple(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t( struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* src, struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t( struct _fx_LN19C_form__ctyp_attr_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iN14C_form__ctyp_t(struct _fx_T2iN14C_form__ctyp_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T2iN14C_form__ctyp_t(struct _fx_T2iN14C_form__ctyp_t* src, struct _fx_T2iN14C_form__ctyp_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iN14C_form__ctyp_t( int_ t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_T2iN14C_form__ctyp_t* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_N14C_form__ctyp_t(struct _fx_N14C_form__ctyp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 13: _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(&(*dst)->u.CTypStruct); break; case 14: _fx_free_T2Nt6option1R9Ast__id_tLT2R9Ast__id_tN14C_form__ctyp_t(&(*dst)->u.CTypUnion); break; case 15: _fx_free_T2LN14C_form__ctyp_tN14C_form__ctyp_t(&(*dst)->u.CTypFunRawPtr); break; case 16: _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(&(*dst)->u.CTypRawPtr); break; case 17: _fx_free_T2LN19C_form__ctyp_attr_tN14C_form__ctyp_t(&(*dst)->u.CTypRawArray); break; case 18: _fx_free_T2iN14C_form__ctyp_t(&(*dst)->u.CTypArray); break; case 19: _fx_free_N14C_form__ctyp_t(&(*dst)->u.CTypVector); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t0); } static void _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__ctyp_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_R9Ast__id_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14K_form__klit_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_copy_N14K_form__klit_t(&src->t0, &dst->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14K_form__klit_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { _fx_copy_N14K_form__klit_t(t0, &fx_result->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_N14C_form__cexp_t(&dst->t2); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N17C_form__cbinary_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_N14C_form__cexp_t_data_t* t2, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t3, struct _fx_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N16C_form__cunary_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N14C_form__ctyp_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N14C_form__ctyp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N14C_form__cexp_t(&dst->t1); _fx_free_N14C_form__cexp_t(&dst->t2); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t3); } static void _fx_copy_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t3, &dst->t3); } static void _fx_make_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_N14C_form__cexp_t_data_t* t2, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t3, struct _fx_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t3, &fx_result->t3); } static void _fx_free_LN14C_form__cexp_t(struct _fx_LN14C_form__cexp_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LN14C_form__cexp_t, _fx_free_N14C_form__cexp_t); } static int _fx_cons_LN14C_form__cexp_t( struct _fx_N14C_form__cexp_t_data_t* hd, struct _fx_LN14C_form__cexp_t_data_t* tl, bool addref_tl, struct _fx_LN14C_form__cexp_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LN14C_form__cexp_t, FX_COPY_PTR); } static void _fx_free_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LN14C_form__cexp_t(&dst->t1); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t2); } static void _fx_copy_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t2, &dst->t2); } static void _fx_make_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LN14C_form__cexp_t_data_t* t1, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t2, &fx_result->t2); } static void _fx_free_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { _fx_free_LN14C_form__cexp_t(&dst->t0); _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&dst->t1); } static void _fx_copy_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* src, struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(&src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( struct _fx_LN14C_form__cexp_t_data_t* t0, struct _fx_T2N14C_form__ctyp_tR10Ast__loc_t* t1, struct _fx_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); _fx_copy_T2N14C_form__ctyp_tR10Ast__loc_t(t1, &fx_result->t1); } static void _fx_free_N14C_form__cexp_t(struct _fx_N14C_form__cexp_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 1: _fx_free_T2R9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpIdent); break; case 2: _fx_free_T2N14K_form__klit_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpLit); break; case 3: _fx_free_T4N17C_form__cbinary_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t( &(*dst)->u.CExpBinary); break; case 4: _fx_free_T3N16C_form__cunary_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpUnary); break; case 5: _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpMem); break; case 6: _fx_free_T3N14C_form__cexp_tR9Ast__id_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpArrow); break; case 7: _fx_free_T3N14C_form__cexp_tN14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpCast); break; case 8: _fx_free_T4N14C_form__cexp_tN14C_form__cexp_tN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpTernary); break; case 9: _fx_free_T3N14C_form__cexp_tLN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpCall); break; case 10: _fx_free_T2LN14C_form__cexp_tT2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpInit); break; case 11: _fx_free_T2N14C_form__ctyp_tR10Ast__loc_t(&(*dst)->u.CExpTyp); break; case 12: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CExpCCode); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14C_form__cexp_t(&dst->t0); } static void _fx_copy_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14C_form__cexp_t(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_Nt6option1N14C_form__cexp_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14C_form__cexp_t(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2LN15C_form__cstmt_tR10Ast__loc_t(struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_LN15C_form__cstmt_t(&dst->t0); } static void _fx_copy_T2LN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN15C_form__cstmt_tR10Ast__loc_t( struct _fx_LN15C_form__cstmt_t_data_t* t0, struct _fx_R10Ast__loc_t* t1, struct _fx_T2LN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; } static void _fx_free_T2R9Ast__id_tN15C_form__cstmt_t(struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* dst) { _fx_free_N15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2R9Ast__id_tN15C_form__cstmt_t( struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* src, struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2R9Ast__id_tN15C_form__cstmt_t( struct _fx_R9Ast__id_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_T2R9Ast__id_tN15C_form__cstmt_t* fx_result) { fx_result->t0 = *t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N15C_form__cstmt_t(&dst->t1); _fx_free_N15C_form__cstmt_t(&dst->t2); } static void _fx_copy_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_N15C_form__cstmt_t_data_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = *t3; } static void _fx_free_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_Nt6option1N14C_form__ctyp_t(&dst->t0); _fx_free_LN14C_form__cexp_t(&dst->t1); _fx_free_Nt6option1N14C_form__cexp_t(&dst->t2); _fx_free_LN14C_form__cexp_t(&dst->t3); _fx_free_N15C_form__cstmt_t(&dst->t4); } static void _fx_copy_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_copy_Nt6option1N14C_form__ctyp_t(&src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); _fx_copy_Nt6option1N14C_form__cexp_t(&src->t2, &dst->t2); FX_COPY_PTR(src->t3, &dst->t3); FX_COPY_PTR(src->t4, &dst->t4); dst->t5 = src->t5; } static void _fx_make_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_Nt6option1N14C_form__ctyp_t* t0, struct _fx_LN14C_form__cexp_t_data_t* t1, struct _fx_Nt6option1N14C_form__cexp_t* t2, struct _fx_LN14C_form__cexp_t_data_t* t3, struct _fx_N15C_form__cstmt_t_data_t* t4, struct _fx_R10Ast__loc_t* t5, struct _fx_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { _fx_copy_Nt6option1N14C_form__ctyp_t(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); _fx_copy_Nt6option1N14C_form__cexp_t(t2, &fx_result->t2); FX_COPY_PTR(t3, &fx_result->t3); FX_COPY_PTR(t4, &fx_result->t4); fx_result->t5 = *t5; } static void _fx_free_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_N15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_N15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_N15C_form__cstmt_t(&dst->t0); _fx_free_N14C_form__cexp_t(&dst->t1); } static void _fx_copy_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t( struct _fx_N15C_form__cstmt_t_data_t* t0, struct _fx_N14C_form__cexp_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T2LN14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* dst) { _fx_free_LN14C_form__cexp_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* src, struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_LN14C_form__cexp_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t, _fx_free_T2LN14C_form__cexp_tLN15C_form__cstmt_t); } static int _fx_cons_LT2LN14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2LN14C_form__cexp_tLN15C_form__cstmt_t* hd, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t, _fx_copy_T2LN14C_form__cexp_tLN15C_form__cstmt_t); } static void _fx_free_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LT2LN14C_form__cexp_tLN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LT2LN14C_form__cexp_tLN15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { _fx_free_N14C_form__ctyp_t(&dst->t0); _fx_free_Nt6option1N14C_form__cexp_t(&dst->t2); } static void _fx_copy_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* src, struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; _fx_copy_Nt6option1N14C_form__cexp_t(&src->t2, &dst->t2); dst->t3 = src->t3; } static void _fx_make_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t( struct _fx_N14C_form__ctyp_t_data_t* t0, struct _fx_R9Ast__id_t* t1, struct _fx_Nt6option1N14C_form__cexp_t* t2, struct _fx_R10Ast__loc_t* t3, struct _fx_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = *t1; _fx_copy_Nt6option1N14C_form__cexp_t(t2, &fx_result->t2); fx_result->t3 = *t3; } static void _fx_free_T2N14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* dst) { _fx_free_N14C_form__cexp_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* src, struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_N14C_form__cexp_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t, _fx_free_T2N14C_form__cexp_tLN15C_form__cstmt_t); } static int _fx_cons_LT2N14C_form__cexp_tLN15C_form__cstmt_t( struct _fx_T2N14C_form__cexp_tLN15C_form__cstmt_t* hd, struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* tl, bool addref_tl, struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t, _fx_copy_T2N14C_form__cexp_tLN15C_form__cstmt_t); } static void _fx_free_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { _fx_free_LT2N14C_form__cexp_tLN15C_form__cstmt_t(&dst->t0); _fx_free_LN15C_form__cstmt_t(&dst->t1); } static void _fx_copy_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* src, struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); dst->t2 = src->t2; } static void _fx_make_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t( struct _fx_LT2N14C_form__cexp_tLN15C_form__cstmt_t_data_t* t0, struct _fx_LN15C_form__cstmt_t_data_t* t1, struct _fx_R10Ast__loc_t* t2, struct _fx_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); fx_result->t2 = *t2; } static void _fx_free_N15C_form__cstmt_t(struct _fx_N15C_form__cstmt_t_data_t** dst) { if (*dst && FX_DECREF((*dst)->rc) == 1) { switch ((*dst)->tag) { case 2: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CComment); break; case 3: _fx_free_N14C_form__cexp_t(&(*dst)->u.CExp); break; case 6: _fx_free_T2Nt6option1N14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CStmtReturn); break; case 7: _fx_free_T2LN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtBlock); break; case 8: _fx_free_T2R9Ast__id_tN15C_form__cstmt_t(&(*dst)->u.CStmtSync); break; case 9: _fx_free_T4N14C_form__cexp_tN15C_form__cstmt_tN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtIf); break; case 12: _fx_free_T6Nt6option1N14C_form__ctyp_tLN14C_form__cexp_tNt6option1N14C_form__cexp_tLN14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t( &(*dst)->u.CStmtFor); break; case 13: _fx_free_T3N14C_form__cexp_tN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtWhile); break; case 14: _fx_free_T3N15C_form__cstmt_tN14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CStmtDoWhile); break; case 15: _fx_free_T3N14C_form__cexp_tLT2LN14C_form__cexp_tLN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CStmtSwitch); break; case 16: _fx_free_T4N14C_form__ctyp_tR9Ast__id_tNt6option1N14C_form__cexp_tR10Ast__loc_t(&(*dst)->u.CDefVal); break; case 17: _fx_free_rR17C_form__cdeffun_t(&(*dst)->u.CDefFun); break; case 18: _fx_free_rR17C_form__cdeftyp_t(&(*dst)->u.CDefTyp); break; case 21: _fx_free_rR18C_form__cdefenum_t(&(*dst)->u.CDefEnum); break; case 22: _fx_free_rR23C_form__cdefinterface_t(&(*dst)->u.CDefInterface); break; case 23: _fx_free_rR19C_form__cdefmacro_t(&(*dst)->u.CMacroDef); break; case 25: _fx_free_T3LT2N14C_form__cexp_tLN15C_form__cstmt_tLN15C_form__cstmt_tR10Ast__loc_t(&(*dst)->u.CMacroIf); break; case 26: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CMacroInclude); break; case 27: _fx_free_T2SR10Ast__loc_t(&(*dst)->u.CMacroPragma); break; default: ; } fx_free(*dst); } *dst = 0; } static void _fx_free_R17C_form__cmodule_t(struct _fx_R17C_form__cmodule_t* dst) { fx_free_str(&dst->cmod_cname); _fx_free_LN15C_form__cstmt_t(&dst->cmod_ccode); _fx_free_R14Ast__pragmas_t(&dst->cmod_pragmas); } static void _fx_copy_R17C_form__cmodule_t(struct _fx_R17C_form__cmodule_t* src, struct _fx_R17C_form__cmodule_t* dst) { dst->cmod_name = src->cmod_name; fx_copy_str(&src->cmod_cname, &dst->cmod_cname); FX_COPY_PTR(src->cmod_ccode, &dst->cmod_ccode); dst->cmod_main = src->cmod_main; dst->cmod_recompile = src->cmod_recompile; dst->cmod_skip = src->cmod_skip; _fx_copy_R14Ast__pragmas_t(&src->cmod_pragmas, &dst->cmod_pragmas); } static void _fx_make_R17C_form__cmodule_t( struct _fx_R9Ast__id_t* r_cmod_name, fx_str_t* r_cmod_cname, struct _fx_LN15C_form__cstmt_t_data_t* r_cmod_ccode, bool r_cmod_main, bool r_cmod_recompile, bool r_cmod_skip, struct _fx_R14Ast__pragmas_t* r_cmod_pragmas, struct _fx_R17C_form__cmodule_t* fx_result) { fx_result->cmod_name = *r_cmod_name; fx_copy_str(r_cmod_cname, &fx_result->cmod_cname); FX_COPY_PTR(r_cmod_ccode, &fx_result->cmod_ccode); fx_result->cmod_main = r_cmod_main; fx_result->cmod_recompile = r_cmod_recompile; fx_result->cmod_skip = r_cmod_skip; _fx_copy_R14Ast__pragmas_t(r_cmod_pragmas, &fx_result->cmod_pragmas); } static void _fx_free_LR17C_form__cmodule_t(struct _fx_LR17C_form__cmodule_t_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LR17C_form__cmodule_t, _fx_free_R17C_form__cmodule_t); } static int _fx_cons_LR17C_form__cmodule_t( struct _fx_R17C_form__cmodule_t* hd, struct _fx_LR17C_form__cmodule_t_data_t* tl, bool addref_tl, struct _fx_LR17C_form__cmodule_t_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LR17C_form__cmodule_t, _fx_copy_R17C_form__cmodule_t); } static void _fx_free_T2LN14Lexer__token_tB(struct _fx_T2LN14Lexer__token_tB* dst) { _fx_free_LN14Lexer__token_t(&dst->t0); } static void _fx_copy_T2LN14Lexer__token_tB(struct _fx_T2LN14Lexer__token_tB* src, struct _fx_T2LN14Lexer__token_tB* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LN14Lexer__token_tB( struct _fx_LN14Lexer__token_t_data_t* t0, bool t1, struct _fx_T2LN14Lexer__token_tB* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_T2SB(struct _fx_T2SB* dst) { fx_free_str(&dst->t0); } static void _fx_copy_T2SB(struct _fx_T2SB* src, struct _fx_T2SB* dst) { fx_copy_str(&src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2SB(fx_str_t* t0, bool t1, struct _fx_T2SB* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_LT2SB(struct _fx_LT2SB_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2SB, _fx_free_T2SB); } static int _fx_cons_LT2SB(struct _fx_T2SB* hd, struct _fx_LT2SB_data_t* tl, bool addref_tl, struct _fx_LT2SB_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2SB, _fx_copy_T2SB); } static void _fx_free_T2SLS(struct _fx_T2SLS* dst) { fx_free_str(&dst->t0); _fx_free_LS(&dst->t1); } static void _fx_copy_T2SLS(struct _fx_T2SLS* src, struct _fx_T2SLS* dst) { fx_copy_str(&src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2SLS(fx_str_t* t0, struct _fx_LS_data_t* t1, struct _fx_T2SLS* fx_result) { fx_copy_str(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_Ta2LS(struct _fx_Ta2LS* dst) { _fx_free_LS(&dst->t0); _fx_free_LS(&dst->t1); } static void _fx_copy_Ta2LS(struct _fx_Ta2LS* src, struct _fx_Ta2LS* dst) { FX_COPY_PTR(src->t0, &dst->t0); FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_Ta2LS(struct _fx_LS_data_t* t0, struct _fx_LS_data_t* t1, struct _fx_Ta2LS* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_T2iLi(struct _fx_T2iLi* dst) { fx_free_list_simple(&dst->t1); } static void _fx_copy_T2iLi(struct _fx_T2iLi* src, struct _fx_T2iLi* dst) { dst->t0 = src->t0; FX_COPY_PTR(src->t1, &dst->t1); } static void _fx_make_T2iLi(int_ t0, struct _fx_Li_data_t* t1, struct _fx_T2iLi* fx_result) { fx_result->t0 = t0; FX_COPY_PTR(t1, &fx_result->t1); } static void _fx_free_LT2iLi(struct _fx_LT2iLi_data_t** dst) { FX_FREE_LIST_IMPL(_fx_LT2iLi, _fx_free_T2iLi); } static int _fx_cons_LT2iLi( struct _fx_T2iLi* hd, struct _fx_LT2iLi_data_t* tl, bool addref_tl, struct _fx_LT2iLi_data_t** fx_result) { FX_MAKE_LIST_IMPL(_fx_LT2iLi, _fx_copy_T2iLi); } static void _fx_free_rLi(struct _fx_rLi_data_t** dst) { FX_FREE_REF_IMPL(_fx_rLi, fx_free_list_simple); } static int _fx_make_rLi(struct _fx_Li_data_t* arg, struct _fx_rLi_data_t** fx_result) { FX_MAKE_REF_IMPL(_fx_rLi, FX_COPY_PTR); } static void _fx_free_T3BBS(struct _fx_T3BBS* dst) { fx_free_str(&dst->t2); } static void _fx_copy_T3BBS(struct _fx_T3BBS* src, struct _fx_T3BBS* dst) { dst->t0 = src->t0; dst->t1 = src->t1; fx_copy_str(&src->t2, &dst->t2); } static void _fx_make_T3BBS(bool t0, bool t1, fx_str_t* t2, struct _fx_T3BBS* fx_result) { fx_result->t0 = t0; fx_result->t1 = t1; fx_copy_str(t2, &fx_result->t2); } static void _fx_free_T2LR17K_form__kmodule_tB(struct _fx_T2LR17K_form__kmodule_tB* dst) { _fx_free_LR17K_form__kmodule_t(&dst->t0); } static void _fx_copy_T2LR17K_form__kmodule_tB( struct _fx_T2LR17K_form__kmodule_tB* src, struct _fx_T2LR17K_form__kmodule_tB* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LR17K_form__kmodule_tB( struct _fx_LR17K_form__kmodule_t_data_t* t0, bool t1, struct _fx_T2LR17K_form__kmodule_tB* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = t1; } static void _fx_free_Ta9S(struct _fx_Ta9S* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); fx_free_str(&dst->t2); fx_free_str(&dst->t3); fx_free_str(&dst->t4); fx_free_str(&dst->t5); fx_free_str(&dst->t6); fx_free_str(&dst->t7); fx_free_str(&dst->t8); } static void _fx_copy_Ta9S(struct _fx_Ta9S* src, struct _fx_Ta9S* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); fx_copy_str(&src->t2, &dst->t2); fx_copy_str(&src->t3, &dst->t3); fx_copy_str(&src->t4, &dst->t4); fx_copy_str(&src->t5, &dst->t5); fx_copy_str(&src->t6, &dst->t6); fx_copy_str(&src->t7, &dst->t7); fx_copy_str(&src->t8, &dst->t8); } static void _fx_make_Ta9S( fx_str_t* t0, fx_str_t* t1, fx_str_t* t2, fx_str_t* t3, fx_str_t* t4, fx_str_t* t5, fx_str_t* t6, fx_str_t* t7, fx_str_t* t8, struct _fx_Ta9S* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); fx_copy_str(t2, &fx_result->t2); fx_copy_str(t3, &fx_result->t3); fx_copy_str(t4, &fx_result->t4); fx_copy_str(t5, &fx_result->t5); fx_copy_str(t6, &fx_result->t6); fx_copy_str(t7, &fx_result->t7); fx_copy_str(t8, &fx_result->t8); } static void _fx_free_Ta2S(struct _fx_Ta2S* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); } static void _fx_copy_Ta2S(struct _fx_Ta2S* src, struct _fx_Ta2S* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); } static void _fx_make_Ta2S(fx_str_t* t0, fx_str_t* t1, struct _fx_Ta2S* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); } static void _fx_free_Ta3S(struct _fx_Ta3S* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); fx_free_str(&dst->t2); } static void _fx_copy_Ta3S(struct _fx_Ta3S* src, struct _fx_Ta3S* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); fx_copy_str(&src->t2, &dst->t2); } static void _fx_make_Ta3S(fx_str_t* t0, fx_str_t* t1, fx_str_t* t2, struct _fx_Ta3S* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); fx_copy_str(t2, &fx_result->t2); } static void _fx_free_Ta4S(struct _fx_Ta4S* dst) { fx_free_str(&dst->t0); fx_free_str(&dst->t1); fx_free_str(&dst->t2); fx_free_str(&dst->t3); } static void _fx_copy_Ta4S(struct _fx_Ta4S* src, struct _fx_Ta4S* dst) { fx_copy_str(&src->t0, &dst->t0); fx_copy_str(&src->t1, &dst->t1); fx_copy_str(&src->t2, &dst->t2); fx_copy_str(&src->t3, &dst->t3); } static void _fx_make_Ta4S(fx_str_t* t0, fx_str_t* t1, fx_str_t* t2, fx_str_t* t3, struct _fx_Ta4S* fx_result) { fx_copy_str(t0, &fx_result->t0); fx_copy_str(t1, &fx_result->t1); fx_copy_str(t2, &fx_result->t2); fx_copy_str(t3, &fx_result->t3); } static void _fx_free_T5BBLSBS(struct _fx_T5BBLSBS* dst) { _fx_free_LS(&dst->t2); fx_free_str(&dst->t4); } static void _fx_copy_T5BBLSBS(struct _fx_T5BBLSBS* src, struct _fx_T5BBLSBS* dst) { dst->t0 = src->t0; dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; fx_copy_str(&src->t4, &dst->t4); } static void _fx_make_T5BBLSBS(bool t0, bool t1, struct _fx_LS_data_t* t2, bool t3, fx_str_t* t4, struct _fx_T5BBLSBS* fx_result) { fx_result->t0 = t0; fx_result->t1 = t1; FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = t3; fx_copy_str(t4, &fx_result->t4); } static void _fx_free_T5BBLSBLS(struct _fx_T5BBLSBLS* dst) { _fx_free_LS(&dst->t2); _fx_free_LS(&dst->t4); } static void _fx_copy_T5BBLSBLS(struct _fx_T5BBLSBLS* src, struct _fx_T5BBLSBLS* dst) { dst->t0 = src->t0; dst->t1 = src->t1; FX_COPY_PTR(src->t2, &dst->t2); dst->t3 = src->t3; FX_COPY_PTR(src->t4, &dst->t4); } static void _fx_make_T5BBLSBLS( bool t0, bool t1, struct _fx_LS_data_t* t2, bool t3, struct _fx_LS_data_t* t4, struct _fx_T5BBLSBLS* fx_result) { fx_result->t0 = t0; fx_result->t1 = t1; FX_COPY_PTR(t2, &fx_result->t2); fx_result->t3 = t3; FX_COPY_PTR(t4, &fx_result->t4); } static void _fx_free_T2LR17C_form__cmodule_tB(struct _fx_T2LR17C_form__cmodule_tB* dst) { _fx_free_LR17C_form__cmodule_t(&dst->t0); } static void _fx_copy_T2LR17C_form__cmodule_tB( struct _fx_T2LR17C_form__cmodule_tB* src, struct _fx_T2LR17C_form__cmodule_tB* dst) { FX_COPY_PTR(src->t0, &dst->t0); dst->t1 = src->t1; } static void _fx_make_T2LR17C_form__cmodule_tB( struct _fx_LR17C_form__cmodule_t_data_t* t0, bool t1, struct _fx_T2LR17C_form__cmodule_tB* fx_result) { FX_COPY_PTR(t0, &fx_result->t0); fx_result->t1 = t1; } _fx_N14Lexer__token_t _fx_g14Compiler__FROM = { 20 }; _fx_N14Lexer__token_t _fx_g19Compiler__SEMICOLON = { 59 }; _fx_N14Lexer__token_t _fx_g19Compiler__PP_DEFINE = { 107 }; int _FX_EXN_E30Compiler__CumulativeParseError = 0; _fx_N20Compiler__msgcolor_t _fx_g16Compiler__MsgRed = { 1 }; _fx_N20Compiler__msgcolor_t _fx_g18Compiler__MsgGreen = { 2 }; _fx_N20Compiler__msgcolor_t _fx_g17Compiler__MsgBlue = { 3 }; bool _fx_g21Compiler__iscolorterm; fx_str_t _fx_g15Compiler__error = {0}; FX_EXTERN_C int _fx_F4joinS2SLS(fx_str_t* sep_0, struct _fx_LS_data_t* strs_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int_ _fx_M6StringFM4findi3SSi(fx_str_t* s, fx_str_t* part, int_ from_pos, void* fx_fv); FX_EXTERN_C int _fx_M3SysFM9colortermB0(bool* fx_result, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_R18Options__options_t _fx_g12Options__opt) FX_EXTERN_C int _fx_M8FilenameFM8basenameS1S(fx_str_t* path_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8FilenameFM16remove_extensionS1S(fx_str_t* path_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C bool _fx_F6__eq__B2SS(fx_str_t* a, fx_str_t* b, void* fx_fv); FX_EXTERN_C void _fx_M5LexerFM5IDENTN14Lexer__token_t2BS(bool arg0, fx_str_t* arg1, struct _fx_N14Lexer__token_t* fx_result); FX_EXTERN_C void _fx_M5LexerFM6IMPORTN14Lexer__token_t1B(bool arg0, struct _fx_N14Lexer__token_t* fx_result); FX_EXTERN_C void _fx_M5LexerFM4STARN14Lexer__token_t1B(bool arg0, struct _fx_N14Lexer__token_t* fx_result); FX_EXTERN_C void _fx_M3AstFM7LitBoolN10Ast__lit_t1B(bool arg0, struct _fx_N10Ast__lit_t* fx_result); FX_EXTERN_C void _fx_M3AstFM6LitIntN10Ast__lit_t1l(int64_t arg0, struct _fx_N10Ast__lit_t* fx_result); FX_EXTERN_C void _fx_M3AstFM9LitStringN10Ast__lit_t1S(fx_str_t* arg0, struct _fx_N10Ast__lit_t* fx_result); FX_EXTERN_C void _fx_M5LexerFM7LITERALN14Lexer__token_t1N10Ast__lit_t( struct _fx_N10Ast__lit_t* arg0, struct _fx_N14Lexer__token_t* fx_result); FX_EXTERN_C int _fx_M3SysFM7getpathLS1S(fx_str_t* name_0, struct _fx_LS_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8FilenameFM6getcwdS0(fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_LS_data_t* _fx_g9Sys__argv) FX_EXTERN_C int _fx_M8FilenameFM9normalizeS2SS(fx_str_t* dir_0, fx_str_t* fname_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8FilenameFM7dirnameS1S(fx_str_t* path_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(int_ _fx_g15__ficus_major__) FX_EXTERN_C int _fx_F6stringS1i(int_ a, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(int_ _fx_g15__ficus_minor__) FX_EXTERN_C int _fx_M8FilenameFM6existsB1S(fx_str_t* name, bool* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM6get_idRM4id_t1S(fx_str_t* s_0, struct _fx_R9Ast__id_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM11find_modulei2RM4id_tS( struct _fx_R9Ast__id_t* mname_0, fx_str_t* mfname_0, int_* fx_result, void* fx_fv); FX_EXTERN_C_VAL(fx_arr_t _fx_g16Ast__all_modules) FX_EXTERN_C int _fx_M6ParserFM5parseB3iLN14Lexer__token_tLS( int_ m_idx_0, struct _fx_LN14Lexer__token_t_data_t* preamble_0, struct _fx_LS_data_t* inc_dirs_0, bool* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM10get_moduleN16Ast__defmodule_t1i( int_ m_0, struct _fx_N16Ast__defmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C_VAL(int _FX_EXN_E22LexerUtils__LexerError) FX_EXTERN_C void _fx_F12print_stringv1S(fx_str_t* a, void* fx_fv); FX_EXTERN_C_VAL(int _FX_EXN_E18Parser__ParseError) FX_EXTERN_C int _fx_M3AstFM6stringS1RM5loc_t(struct _fx_R10Ast__loc_t* loc_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_F6stringS1E(fx_exn_t* a, fx_str_t* fx_result, void* fx_fv); static int _fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi( int_ i_0, struct _fx_Li_data_t* visited_0, fx_arr_t* graph_0, fx_arr_t* processed_0, struct _fx_rLi_data_t* result_ref_0, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM15get_module_nameRM4id_t1i(int_ m_0, struct _fx_R9Ast__id_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM2ppS1RM4id_t(struct _fx_R9Ast__id_t* i_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_F9make_FailE1S(fx_str_t* arg0, fx_exn_t* fx_result); FX_EXTERN_C int _fx_M3SysFM5mkdirB2Si(fx_str_t* name, int_ permissions, bool* fx_result, void* fx_fv); FX_EXTERN_C_VAL(bool _fx_g10Sys__win32) FX_EXTERN_C int _fx_M8K_mangleFM12mangle_mnameS1S(fx_str_t* m_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M4K_ppFM16pp_top_to_stringS1LN14K_form__kexp_t( struct _fx_LN14K_form__kexp_t_data_t* code_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M4FileFM9read_utf8S1S(fx_str_t* fname, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(int FX_EXN_IOError) FX_EXTERN_C_VAL(int FX_EXN_FileOpenError) FX_EXTERN_C int _fx_M4FileFM10write_utf8v2SS(fx_str_t* fname, fx_str_t* text, void* fx_fv); FX_EXTERN_C int _fx_M3SysFM6removev1S(fx_str_t* name, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM10pr_verbosev1S(fx_str_t* str_0, void* fx_fv); FX_EXTERN_C int _fx_M6K_formFM9KExpCCodeN14K_form__kexp_t2ST2N14K_form__ktyp_tR10Ast__loc_t( fx_str_t* arg0, struct _fx_T2N14K_form__ktyp_tR10Ast__loc_t* arg1, struct _fx_N14K_form__kexp_t_data_t** fx_result); FX_EXTERN_C_VAL(struct _fx_LE_data_t* _fx_g21Ast__all_compile_errs) FX_EXTERN_C int _fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, bool initial_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M10K_annotateFM14annotate_typesLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M13K_copy_n_skipFM9copy_someLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M15K_remove_unusedFM21remove_unused_by_mainLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_mangleFM10mangle_allLR17K_form__kmodule_t2LR17K_form__kmodule_tB( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, bool final_mode_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_mangleFM13mangle_localsLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_mangleFM12demangle_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M13K_lift_simpleFM4liftLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M9K_tailrecFM17tailrec2loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M10K_loop_invFM18move_loop_invs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M13K_optim_matopFM13optimize_gemmLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_inlineFM11inline_someLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M9K_flattenFM11flatten_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M12K_fuse_loopsFM14fuse_loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M10K_fast_idxFM23optimize_idx_checks_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M15K_cfold_dealiasFM13cfold_dealiasLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M18K_nothrow_wrappersFM25make_wrappers_for_nothrowLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M10K_freevarsFM21mutable_freevars2refsLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M11K_declosureFM13declosure_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M6K_liftFM8lift_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M8K_inlineFM24find_recursive_funcs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_FPS1B _fx_g11Sys__osname) FX_EXTERN_C_VAL(bool _fx_g9Sys__unix) FX_EXTERN_C int _fx_M3SysFM6getenvS1S(fx_str_t* name, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C_VAL(struct _fx_R9Ast__id_t _fx_g9Ast__noid) FX_EXTERN_C int _fx_M4C_ppFM20pprint_top_to_stringS1LN15C_form__cstmt_t( struct _fx_LN15C_form__cstmt_t_data_t* code_0, fx_str_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3SysFM7commandi1S(fx_str_t* cmd, int_* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM8init_allv0(void* fx_fv); FX_EXTERN_C_VAL(fx_exn_t _fx_E30Compiler__CumulativeParseErrorv) FX_EXTERN_C_VAL(struct _fx_Li_data_t* _fx_g23Ast__all_modules_sorted) FX_EXTERN_C int _fx_M6Ast_ppFM10pprint_modv1N16Ast__defmodule_t(struct _fx_N16Ast__defmodule_t_data_t* dm_0, void* fx_fv); FX_EXTERN_C int _fx_M13Ast_typecheckFM9check_modv1i(int_ m_idx_0, void* fx_fv); FX_EXTERN_C int _fx_M6K_formFM13init_all_idksv0(void* fx_fv); FX_EXTERN_C int _fx_M11K_normalizeFM21normalize_all_modulesLR17K_form__kmodule_t1Li( struct _fx_Li_data_t* modules_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M4K_ppFM8pp_kmodsv1LR17K_form__kmodule_t(struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, void* fx_fv); FX_EXTERN_C int _fx_M6C_formFM13init_all_idcsv0(void* fx_fv); FX_EXTERN_C int _fx_M9C_gen_stdFM14init_std_namesv0(void* fx_fv); FX_EXTERN_C int _fx_M10C_gen_codeFM13gen_ccode_allLR17C_form__cmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17C_form__cmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M20C_post_rename_localsFM13rename_localsLR17C_form__cmodule_t1LR17C_form__cmodule_t( struct _fx_LR17C_form__cmodule_t_data_t* cmods_0, struct _fx_LR17C_form__cmodule_t_data_t** fx_result, void* fx_fv); FX_EXTERN_C int _fx_M19C_post_adjust_declsFM12adjust_declsR17C_form__cmodule_t1R17C_form__cmodule_t( struct _fx_R17C_form__cmodule_t* cmod_0, struct _fx_R17C_form__cmodule_t* fx_result, void* fx_fv); FX_EXTERN_C int _fx_M3AstFM17print_compile_errv1E(fx_exn_t* err_0, void* fx_fv); FX_EXTERN_C_VAL(int _FX_EXN_E4Fail) FX_EXTERN_C_VAL(int _FX_EXN_E17Ast__CompileError) fx_exn_info_t _fx_E30Compiler__CumulativeParseError_info = {0}; fx_exn_t _fx_E30Compiler__CumulativeParseErrorv = {0}; FX_EXTERN_C int_ _fx_M8CompilerFM6lengthi1LE(struct _fx_LE_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C int_ _fx_M8CompilerFM6lengthi1LS(struct _fx_LS_data_t* l, void* fx_fv) { return fx_list_length(l); } FX_EXTERN_C void _fx_M8CompilerFM5link2LN14Lexer__token_t2LN14Lexer__token_tLN14Lexer__token_t( struct _fx_LN14Lexer__token_t_data_t* l1, struct _fx_LN14Lexer__token_t_data_t* l2, struct _fx_LN14Lexer__token_t_data_t** fx_result, void* fx_fv) { fx_link_lists(l1, l2, fx_result); } FX_EXTERN_C void _fx_M8CompilerFM5link2LS2LSLS( struct _fx_LS_data_t* l1, struct _fx_LS_data_t* l2, struct _fx_LS_data_t** fx_result, void* fx_fv) { fx_link_lists(l1, l2, fx_result); } FX_EXTERN_C int _fx_M8CompilerFM7__add__LS2LSLS( struct _fx_LS_data_t* l1_0, struct _fx_LS_data_t* l2_0, struct _fx_LS_data_t** fx_result, void* fx_fv) { int fx_status = 0; if (l1_0 == 0) { FX_COPY_PTR(l2_0, fx_result); } else if (l2_0 == 0) { FX_COPY_PTR(l1_0, fx_result); } else { _fx_LS v_0 = 0; _fx_LS lstend_0 = 0; _fx_LS lst_0 = l1_0; for (; lst_0; lst_0 = lst_0->tl) { fx_str_t* x_0 = &lst_0->hd; _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(x_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(v_0, lstend_0, node_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_catch_1); } _fx_M8CompilerFM5link2LS2LSLS(v_0, l2_0, fx_result, 0); _fx_catch_1: ; if (v_0) { _fx_free_LS(&v_0); } } return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM6stringS1S(fx_str_t* a_0, fx_str_t* fx_result, void* fx_fv) { int fx_status = 0; fx_copy_str(a_0, fx_result); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM5arrayA1R17C_form__cmodule_t1LR17C_form__cmodule_t( struct _fx_LR17C_form__cmodule_t_data_t* l_0, fx_arr_t* fx_result, void* fx_fv) { int fx_status = 0; _fx_R17C_form__cmodule_t* dstptr_0 = 0; _fx_LR17C_form__cmodule_t lst_0 = l_0; int_ len_0 = fx_list_length(lst_0); { const int_ shape_0[] = { len_0 }; FX_CALL( fx_make_arr(1, shape_0, sizeof(_fx_R17C_form__cmodule_t), (fx_free_t)_fx_free_R17C_form__cmodule_t, (fx_copy_t)_fx_copy_R17C_form__cmodule_t, 0, fx_result), _fx_cleanup); } dstptr_0 = (_fx_R17C_form__cmodule_t*)fx_result->data; for (; lst_0; lst_0 = lst_0->tl, dstptr_0++) { _fx_R17C_form__cmodule_t* x_0 = &lst_0->hd; _fx_copy_R17C_form__cmodule_t(x_0, dstptr_0); } _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM3revLS1LS(struct _fx_LS_data_t* l_0, struct _fx_LS_data_t** fx_result, void* fx_fv) { _fx_LS __fold_result___0 = 0; int fx_status = 0; _fx_LS lst_0 = l_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_LS r_0 = 0; fx_str_t* a_0 = &lst_0->hd; FX_COPY_PTR(__fold_result___0, &r_0); FX_CALL(_fx_cons_LS(a_0, r_0, false, &r_0), _fx_catch_0); _fx_free_LS(&__fold_result___0); FX_COPY_PTR(r_0, &__fold_result___0); _fx_catch_0: ; if (r_0) { _fx_free_LS(&r_0); } FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(__fold_result___0, fx_result); _fx_cleanup: ; if (__fold_result___0) { _fx_free_LS(&__fold_result___0); } return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM4joinS2SLS(fx_str_t* sep_0, struct _fx_LS_data_t* strs_0, fx_str_t* fx_result, void* fx_fv) { int fx_status = 0; FX_CALL(_fx_F4joinS2SLS(sep_0, strs_0, fx_result, 0), _fx_cleanup); _fx_cleanup: ; return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM8containsB2SS(fx_str_t* s_0, fx_str_t* substr_0, bool* fx_result, void* fx_fv) { int fx_status = 0; int_ v_0 = _fx_M6StringFM4findi3SSi(s_0, substr_0, 0, 0); *fx_result = v_0 >= 0; return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS( struct _fx_N20Compiler__msgcolor_t* clr_0, fx_str_t* msg_0, fx_str_t* fx_result, void* fx_fv) { fx_str_t esc_0 = {0}; int fx_status = 0; if (_fx_g21Compiler__iscolorterm) { int tag_0 = clr_0->tag; if (tag_0 == 1) { fx_str_t slit_0 = FX_MAKE_STR(""); fx_copy_str(&slit_0, &esc_0); } else if (tag_0 == 2) { fx_str_t slit_1 = FX_MAKE_STR(""); fx_copy_str(&slit_1, &esc_0); } else if (tag_0 == 3) { fx_str_t slit_2 = FX_MAKE_STR(""); fx_copy_str(&slit_2, &esc_0); } else { fx_str_t slit_3 = FX_MAKE_STR(""); fx_copy_str(&slit_3, &esc_0); } FX_CHECK_EXN(_fx_cleanup); fx_str_t slit_4 = FX_MAKE_STR(""); { const fx_str_t strs_0[] = { esc_0, *msg_0, slit_4 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, fx_result), _fx_cleanup); } } else { fx_copy_str(msg_0, fx_result); } _fx_cleanup: ; FX_FREE_STR(&esc_0); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM12get_preambleLN14Lexer__token_t1S( fx_str_t* mfname_0, struct _fx_LN14Lexer__token_t_data_t** fx_result, void* fx_fv) { _fx_LN14Lexer__token_t preamble_0 = 0; fx_str_t v_0 = {0}; fx_str_t bare_name_0 = {0}; _fx_T2LN14Lexer__token_tB __fold_result___0 = {0}; _fx_T2SB v_1 = {0}; _fx_T2SB v_2 = {0}; _fx_T2SB v_3 = {0}; _fx_T2SB v_4 = {0}; _fx_T2SB v_5 = {0}; _fx_T2SB v_6 = {0}; _fx_T2SB v_7 = {0}; _fx_LT2SB v_8 = 0; _fx_T2LN14Lexer__token_tB v_9 = {0}; _fx_LN14Lexer__token_t __fold_result___1 = 0; _fx_LT2SN17Options__optval_t v_10 = 0; int fx_status = 0; if (_fx_g12Options__opt.use_preamble) { FX_CALL(_fx_M8FilenameFM8basenameS1S(mfname_0, &v_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM16remove_extensionS1S(&v_0, &bare_name_0, 0), _fx_cleanup); _fx_make_T2LN14Lexer__token_tB(0, false, &__fold_result___0); fx_str_t slit_0 = FX_MAKE_STR("Builtins"); _fx_make_T2SB(&slit_0, true, &v_1); fx_str_t slit_1 = FX_MAKE_STR("Math"); _fx_make_T2SB(&slit_1, true, &v_2); fx_str_t slit_2 = FX_MAKE_STR("Array"); _fx_make_T2SB(&slit_2, true, &v_3); fx_str_t slit_3 = FX_MAKE_STR("List"); _fx_make_T2SB(&slit_3, false, &v_4); fx_str_t slit_4 = FX_MAKE_STR("Vector"); _fx_make_T2SB(&slit_4, false, &v_5); fx_str_t slit_5 = FX_MAKE_STR("Char"); _fx_make_T2SB(&slit_5, false, &v_6); fx_str_t slit_6 = FX_MAKE_STR("String"); _fx_make_T2SB(&slit_6, false, &v_7); FX_CALL(_fx_cons_LT2SB(&v_7, 0, true, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_6, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_5, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_4, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_3, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_2, v_8, false, &v_8), _fx_cleanup); FX_CALL(_fx_cons_LT2SB(&v_1, v_8, false, &v_8), _fx_cleanup); _fx_LT2SB lst_0 = v_8; for (; lst_0; lst_0 = lst_0->tl) { fx_str_t mname_0 = {0}; _fx_T2LN14Lexer__token_tB v_11 = {0}; _fx_LN14Lexer__token_t preamble_1 = 0; _fx_T2LN14Lexer__token_tB v_12 = {0}; _fx_N14Lexer__token_t v_13 = {0}; _fx_N14Lexer__token_t v_14 = {0}; _fx_N14Lexer__token_t v_15 = {0}; _fx_LN14Lexer__token_t v_16 = 0; _fx_LN14Lexer__token_t v_17 = 0; _fx_N14Lexer__token_t v_18 = {0}; _fx_N14Lexer__token_t v_19 = {0}; _fx_LN14Lexer__token_t v_20 = 0; _fx_LN14Lexer__token_t v_21 = 0; _fx_T2SB* __pat___0 = &lst_0->hd; fx_copy_str(&__pat___0->t0, &mname_0); _fx_copy_T2LN14Lexer__token_tB(&__fold_result___0, &v_11); FX_COPY_PTR(v_11.t0, &preamble_1); bool found_0 = v_11.t1; if (found_0) { _fx_make_T2LN14Lexer__token_tB(preamble_1, found_0, &v_12); } else { bool v_22 = _fx_F6__eq__B2SS(&bare_name_0, &mname_0, 0); if (v_22) { _fx_make_T2LN14Lexer__token_tB(preamble_1, true, &v_12); } else if (__pat___0->t1) { _fx_M5LexerFM5IDENTN14Lexer__token_t2BS(true, &mname_0, &v_13); _fx_M5LexerFM6IMPORTN14Lexer__token_t1B(false, &v_14); _fx_M5LexerFM4STARN14Lexer__token_t1B(true, &v_15); FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g19Compiler__SEMICOLON, 0, true, &v_16), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_15, v_16, false, &v_16), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_14, v_16, false, &v_16), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_13, v_16, false, &v_16), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g14Compiler__FROM, v_16, false, &v_16), _fx_catch_4); if (preamble_1 == 0) { FX_COPY_PTR(v_16, &v_17); } else if (v_16 == 0) { FX_COPY_PTR(preamble_1, &v_17); } else { _fx_LN14Lexer__token_t v_23 = 0; _fx_LN14Lexer__token_t lstend_0 = 0; _fx_LN14Lexer__token_t lst_1 = preamble_1; for (; lst_1; lst_1 = lst_1->tl) { _fx_N14Lexer__token_t* x_0 = &lst_1->hd; _fx_LN14Lexer__token_t node_0 = 0; FX_CALL(_fx_cons_LN14Lexer__token_t(x_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(v_23, lstend_0, node_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_catch_1); } _fx_M8CompilerFM5link2LN14Lexer__token_t2LN14Lexer__token_tLN14Lexer__token_t(v_23, v_16, &v_17, 0); _fx_catch_1: ; if (v_23) { _fx_free_LN14Lexer__token_t(&v_23); } } FX_CHECK_EXN(_fx_catch_4); _fx_make_T2LN14Lexer__token_tB(v_17, false, &v_12); } else { _fx_M5LexerFM6IMPORTN14Lexer__token_t1B(true, &v_18); _fx_M5LexerFM5IDENTN14Lexer__token_t2BS(true, &mname_0, &v_19); FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g19Compiler__SEMICOLON, 0, true, &v_20), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_19, v_20, false, &v_20), _fx_catch_4); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_18, v_20, false, &v_20), _fx_catch_4); if (preamble_1 == 0) { FX_COPY_PTR(v_20, &v_21); } else if (v_20 == 0) { FX_COPY_PTR(preamble_1, &v_21); } else { _fx_LN14Lexer__token_t v_24 = 0; _fx_LN14Lexer__token_t lstend_1 = 0; _fx_LN14Lexer__token_t lst_2 = preamble_1; for (; lst_2; lst_2 = lst_2->tl) { _fx_N14Lexer__token_t* x_1 = &lst_2->hd; _fx_LN14Lexer__token_t node_1 = 0; FX_CALL(_fx_cons_LN14Lexer__token_t(x_1, 0, false, &node_1), _fx_catch_2); FX_LIST_APPEND(v_24, lstend_1, node_1); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } _fx_M8CompilerFM5link2LN14Lexer__token_t2LN14Lexer__token_tLN14Lexer__token_t(v_24, v_20, &v_21, 0); _fx_catch_3: ; if (v_24) { _fx_free_LN14Lexer__token_t(&v_24); } } FX_CHECK_EXN(_fx_catch_4); _fx_make_T2LN14Lexer__token_tB(v_21, false, &v_12); } } _fx_free_T2LN14Lexer__token_tB(&__fold_result___0); _fx_copy_T2LN14Lexer__token_tB(&v_12, &__fold_result___0); _fx_catch_4: ; if (v_21) { _fx_free_LN14Lexer__token_t(&v_21); } if (v_20) { _fx_free_LN14Lexer__token_t(&v_20); } _fx_free_N14Lexer__token_t(&v_19); _fx_free_N14Lexer__token_t(&v_18); if (v_17) { _fx_free_LN14Lexer__token_t(&v_17); } if (v_16) { _fx_free_LN14Lexer__token_t(&v_16); } _fx_free_N14Lexer__token_t(&v_15); _fx_free_N14Lexer__token_t(&v_14); _fx_free_N14Lexer__token_t(&v_13); _fx_free_T2LN14Lexer__token_tB(&v_12); if (preamble_1) { _fx_free_LN14Lexer__token_t(&preamble_1); } _fx_free_T2LN14Lexer__token_tB(&v_11); FX_FREE_STR(&mname_0); FX_CHECK_EXN(_fx_cleanup); } _fx_copy_T2LN14Lexer__token_tB(&__fold_result___0, &v_9); FX_COPY_PTR(v_9.t0, &preamble_0); } FX_COPY_PTR(preamble_0, &__fold_result___1); FX_COPY_PTR(_fx_g12Options__opt.defines, &v_10); _fx_LT2SN17Options__optval_t lst_3 = v_10; for (; lst_3; lst_3 = lst_3->tl) { fx_str_t n_0 = {0}; _fx_N17Options__optval_t v_25 = {0}; _fx_LN14Lexer__token_t p_0 = 0; _fx_N10Ast__lit_t v_26 = {0}; _fx_N14Lexer__token_t v_27 = {0}; _fx_N14Lexer__token_t v_28 = {0}; _fx_T2SN17Options__optval_t* __pat___1 = &lst_3->hd; fx_copy_str(&__pat___1->t0, &n_0); _fx_copy_N17Options__optval_t(&__pat___1->t1, &v_25); FX_COPY_PTR(__fold_result___1, &p_0); int tag_0 = v_25.tag; if (tag_0 == 1) { _fx_M3AstFM7LitBoolN10Ast__lit_t1B(v_25.u.OptBool, &v_26); } else if (tag_0 == 2) { _fx_M3AstFM6LitIntN10Ast__lit_t1l((int64_t)v_25.u.OptInt, &v_26); } else if (tag_0 == 3) { _fx_M3AstFM9LitStringN10Ast__lit_t1S(&v_25.u.OptString, &v_26); } else { FX_FAST_THROW(FX_EXN_NoMatchError, _fx_catch_5); } FX_CHECK_EXN(_fx_catch_5); _fx_M5LexerFM5IDENTN14Lexer__token_t2BS(true, &n_0, &v_27); _fx_M5LexerFM7LITERALN14Lexer__token_t1N10Ast__lit_t(&v_26, &v_28); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_28, p_0, false, &p_0), _fx_catch_5); FX_CALL(_fx_cons_LN14Lexer__token_t(&v_27, p_0, false, &p_0), _fx_catch_5); FX_CALL(_fx_cons_LN14Lexer__token_t(&_fx_g19Compiler__PP_DEFINE, p_0, false, &p_0), _fx_catch_5); _fx_free_LN14Lexer__token_t(&__fold_result___1); FX_COPY_PTR(p_0, &__fold_result___1); _fx_catch_5: ; _fx_free_N14Lexer__token_t(&v_28); _fx_free_N14Lexer__token_t(&v_27); _fx_free_N10Ast__lit_t(&v_26); if (p_0) { _fx_free_LN14Lexer__token_t(&p_0); } _fx_free_N17Options__optval_t(&v_25); FX_FREE_STR(&n_0); FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(__fold_result___1, fx_result); _fx_cleanup: ; if (preamble_0) { _fx_free_LN14Lexer__token_t(&preamble_0); } FX_FREE_STR(&v_0); FX_FREE_STR(&bare_name_0); _fx_free_T2LN14Lexer__token_tB(&__fold_result___0); _fx_free_T2SB(&v_1); _fx_free_T2SB(&v_2); _fx_free_T2SB(&v_3); _fx_free_T2SB(&v_4); _fx_free_T2SB(&v_5); _fx_free_T2SB(&v_6); _fx_free_T2SB(&v_7); if (v_8) { _fx_free_LT2SB(&v_8); } _fx_free_T2LN14Lexer__token_tB(&v_9); if (__fold_result___1) { _fx_free_LN14Lexer__token_t(&__fold_result___1); } if (v_10) { _fx_free_LT2SN17Options__optval_t(&v_10); } return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM15find_ficus_dirsT2SLS0(struct _fx_T2SLS* fx_result, void* fx_fv) { _fx_LS ficus_path_0 = 0; fx_str_t v_0 = {0}; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; fx_str_t ficus_app_path_0 = {0}; fx_str_t v_3 = {0}; fx_str_t ficus_pp_path_0 = {0}; fx_str_t v_4 = {0}; fx_str_t v_5 = {0}; fx_str_t v_6 = {0}; fx_str_t v_7 = {0}; fx_str_t ficus_inst_path_0 = {0}; fx_str_t v_8 = {0}; fx_str_t v_9 = {0}; fx_str_t v_10 = {0}; fx_str_t v_11 = {0}; _fx_LS v_12 = 0; _fx_LS std_ficus_path_0 = 0; _fx_Ta2LS v_13 = {0}; _fx_LS search_path_0 = 0; fx_str_t found_0 = {0}; int fx_status = 0; fx_str_t slit_0 = FX_MAKE_STR("FICUS_PATH"); FX_CALL(_fx_M3SysFM7getpathLS1S(&slit_0, &ficus_path_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM6getcwdS0(&v_0, 0), _fx_cleanup); if (_fx_g9Sys__argv != 0) { fx_copy_str(&_fx_g9Sys__argv->hd, &v_1); } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_cleanup); } FX_CHECK_EXN(_fx_cleanup); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_0, &v_1, &v_2, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&v_2, &ficus_app_path_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&ficus_app_path_0, &v_3, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&v_3, &ficus_pp_path_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&ficus_app_path_0, &v_4, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_major__, &v_5, 0), _fx_cleanup); FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_minor__, &v_6, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("lib/ficus-"); fx_str_t slit_2 = FX_MAKE_STR("."); { const fx_str_t strs_0[] = { slit_1, v_5, slit_2, v_6 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 4, &v_7), _fx_cleanup); } FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_4, &v_7, &ficus_inst_path_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&ficus_app_path_0, &v_8, 0), _fx_cleanup); fx_str_t slit_3 = FX_MAKE_STR("lib"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_8, &slit_3, &v_9, 0), _fx_cleanup); fx_str_t slit_4 = FX_MAKE_STR("lib"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&ficus_pp_path_0, &slit_4, &v_10, 0), _fx_cleanup); fx_str_t slit_5 = FX_MAKE_STR("lib"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&ficus_inst_path_0, &slit_5, &v_11, 0), _fx_cleanup); FX_CALL(_fx_cons_LS(&v_11, 0, true, &v_12), _fx_cleanup); FX_CALL(_fx_cons_LS(&v_10, v_12, false, &v_12), _fx_cleanup); FX_CALL(_fx_cons_LS(&v_9, v_12, true, &std_ficus_path_0), _fx_cleanup); int_ std_ficus_path_len_0 = _fx_M8CompilerFM6lengthi1LS(std_ficus_path_0, 0); _fx_make_Ta2LS(std_ficus_path_0, ficus_path_0, &v_13); if (v_13.t0 == 0) { FX_COPY_PTR(ficus_path_0, &search_path_0); } else if (v_13.t1 == 0) { FX_COPY_PTR(std_ficus_path_0, &search_path_0); } else { _fx_LS v_14 = 0; _fx_LS lstend_0 = 0; _fx_LS lst_0 = std_ficus_path_0; for (; lst_0; lst_0 = lst_0->tl) { fx_str_t* x_0 = &lst_0->hd; _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(x_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(v_14, lstend_0, node_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_catch_1); } _fx_M8CompilerFM5link2LS2LSLS(v_14, ficus_path_0, &search_path_0, 0); _fx_catch_1: ; if (v_14) { _fx_free_LS(&v_14); } } FX_CHECK_EXN(_fx_cleanup); fx_str_t slit_6 = FX_MAKE_STR(""); fx_copy_str(&slit_6, &found_0); int_ i_0 = 0; _fx_LS lst_1 = search_path_0; for (; lst_1; lst_1 = lst_1->tl, i_0 += 1) { fx_str_t builtins_fx_0 = {0}; fx_str_t ficus_h_0 = {0}; fx_str_t v_15 = {0}; _fx_LS v_16 = 0; _fx_Ta2LS v_17 = {0}; _fx_LS v_18 = 0; fx_str_t* d_0 = &lst_1->hd; fx_str_t slit_7 = FX_MAKE_STR("Builtins.fx"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(d_0, &slit_7, &builtins_fx_0, 0), _fx_catch_4); fx_str_t slit_8 = FX_MAKE_STR("../runtime/ficus/ficus.h"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(d_0, &slit_8, &ficus_h_0, 0), _fx_catch_4); bool v_19; bool res_0; FX_CALL(_fx_M8FilenameFM6existsB1S(&builtins_fx_0, &res_0, 0), _fx_catch_4); if (res_0) { FX_CALL(_fx_M8FilenameFM6existsB1S(&ficus_h_0, &v_19, 0), _fx_catch_4); } else { v_19 = false; } if (v_19) { FX_CALL(_fx_M8FilenameFM7dirnameS1S(d_0, &v_15, 0), _fx_catch_4); FX_FREE_STR(&found_0); fx_copy_str(&v_15, &found_0); if (i_0 < std_ficus_path_len_0) { FX_CALL(_fx_cons_LS(d_0, 0, true, &v_16), _fx_catch_4); _fx_make_Ta2LS(ficus_path_0, v_16, &v_17); if (v_17.t0 == 0) { FX_COPY_PTR(v_16, &v_18); } else if (v_17.t1 == 0) { FX_COPY_PTR(ficus_path_0, &v_18); } else { _fx_LS v_20 = 0; _fx_LS lstend_1 = 0; _fx_LS lst_2 = ficus_path_0; for (; lst_2; lst_2 = lst_2->tl) { fx_str_t* x_1 = &lst_2->hd; _fx_LS node_1 = 0; FX_CALL(_fx_cons_LS(x_1, 0, false, &node_1), _fx_catch_2); FX_LIST_APPEND(v_20, lstend_1, node_1); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } _fx_M8CompilerFM5link2LS2LSLS(v_20, v_16, &v_18, 0); _fx_catch_3: ; if (v_20) { _fx_free_LS(&v_20); } } FX_CHECK_EXN(_fx_catch_4); _fx_free_LS(&ficus_path_0); FX_COPY_PTR(v_18, &ficus_path_0); } FX_BREAK(_fx_catch_4); } _fx_catch_4: ; if (v_18) { _fx_free_LS(&v_18); } _fx_free_Ta2LS(&v_17); if (v_16) { _fx_free_LS(&v_16); } FX_FREE_STR(&v_15); FX_FREE_STR(&ficus_h_0); FX_FREE_STR(&builtins_fx_0); FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_cleanup); } _fx_make_T2SLS(&found_0, ficus_path_0, fx_result); _fx_cleanup: ; if (ficus_path_0) { _fx_free_LS(&ficus_path_0); } FX_FREE_STR(&v_0); FX_FREE_STR(&v_1); FX_FREE_STR(&v_2); FX_FREE_STR(&ficus_app_path_0); FX_FREE_STR(&v_3); FX_FREE_STR(&ficus_pp_path_0); FX_FREE_STR(&v_4); FX_FREE_STR(&v_5); FX_FREE_STR(&v_6); FX_FREE_STR(&v_7); FX_FREE_STR(&ficus_inst_path_0); FX_FREE_STR(&v_8); FX_FREE_STR(&v_9); FX_FREE_STR(&v_10); FX_FREE_STR(&v_11); if (v_12) { _fx_free_LS(&v_12); } if (std_ficus_path_0) { _fx_free_LS(&std_ficus_path_0); } _fx_free_Ta2LS(&v_13); if (search_path_0) { _fx_free_LS(&search_path_0); } FX_FREE_STR(&found_0); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM9parse_allB2SLS( fx_str_t* fname0_0, struct _fx_LS_data_t* ficus_path_0, bool* fx_result, void* fx_fv) { fx_str_t cwd_0 = {0}; fx_str_t fname0_1 = {0}; fx_str_t dir0_0 = {0}; _fx_LS inc_dirs0_0 = 0; _fx_LS v_0 = 0; _fx_LS v_1 = 0; _fx_LS inc_dirs0_1 = 0; _fx_LS inc_dirs0_2 = 0; _fx_LS inc_dirs0_3 = 0; fx_str_t v_2 = {0}; fx_str_t v_3 = {0}; _fx_Li queue_0 = 0; int fx_status = 0; FX_CALL(_fx_M8FilenameFM6getcwdS0(&cwd_0, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&cwd_0, fname0_0, &fname0_1, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM7dirnameS1S(&fname0_1, &dir0_0, 0), _fx_cleanup); bool v_4 = _fx_F6__eq__B2SS(&dir0_0, &cwd_0, 0); if (v_4) { FX_CALL(_fx_cons_LS(&cwd_0, 0, true, &inc_dirs0_0), _fx_cleanup); } else { FX_CALL(_fx_cons_LS(&cwd_0, 0, true, &v_0), _fx_cleanup); FX_CALL(_fx_cons_LS(&dir0_0, v_0, true, &inc_dirs0_0), _fx_cleanup); } FX_COPY_PTR(_fx_g12Options__opt.include_path, &v_1); if (inc_dirs0_0 == 0) { FX_COPY_PTR(v_1, &inc_dirs0_1); } else if (v_1 == 0) { FX_COPY_PTR(inc_dirs0_0, &inc_dirs0_1); } else { _fx_LS v_5 = 0; _fx_LS lstend_0 = 0; _fx_LS lst_0 = inc_dirs0_0; for (; lst_0; lst_0 = lst_0->tl) { fx_str_t* x_0 = &lst_0->hd; _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(x_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(v_5, lstend_0, node_0); _fx_catch_0: ; FX_CHECK_EXN(_fx_catch_1); } _fx_M8CompilerFM5link2LS2LSLS(v_5, v_1, &inc_dirs0_1, 0); _fx_catch_1: ; if (v_5) { _fx_free_LS(&v_5); } } FX_CHECK_EXN(_fx_cleanup); if (inc_dirs0_1 == 0) { FX_COPY_PTR(ficus_path_0, &inc_dirs0_2); } else if (ficus_path_0 == 0) { FX_COPY_PTR(inc_dirs0_1, &inc_dirs0_2); } else { _fx_LS v_6 = 0; _fx_LS lstend_1 = 0; _fx_LS lst_1 = inc_dirs0_1; for (; lst_1; lst_1 = lst_1->tl) { fx_str_t* x_1 = &lst_1->hd; _fx_LS node_1 = 0; FX_CALL(_fx_cons_LS(x_1, 0, false, &node_1), _fx_catch_2); FX_LIST_APPEND(v_6, lstend_1, node_1); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } _fx_M8CompilerFM5link2LS2LSLS(v_6, ficus_path_0, &inc_dirs0_2, 0); _fx_catch_3: ; if (v_6) { _fx_free_LS(&v_6); } } FX_CHECK_EXN(_fx_cleanup); _fx_LS lstend_2 = 0; _fx_LS lst_2 = inc_dirs0_2; for (; lst_2; lst_2 = lst_2->tl) { fx_str_t res_0 = {0}; fx_str_t* d_0 = &lst_2->hd; FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&cwd_0, d_0, &res_0, 0), _fx_catch_4); _fx_LS node_2 = 0; FX_CALL(_fx_cons_LS(&res_0, 0, false, &node_2), _fx_catch_4); FX_LIST_APPEND(inc_dirs0_3, lstend_2, node_2); _fx_catch_4: ; FX_FREE_STR(&res_0); FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_M8FilenameFM8basenameS1S(&fname0_1, &v_2, 0), _fx_cleanup); FX_CALL(_fx_M8FilenameFM16remove_extensionS1S(&v_2, &v_3, 0), _fx_cleanup); _fx_R9Ast__id_t name0_id_0; FX_CALL(_fx_M3AstFM6get_idRM4id_t1S(&v_3, &name0_id_0, 0), _fx_cleanup); int_ m_idx_0; FX_CALL(_fx_M3AstFM11find_modulei2RM4id_tS(&name0_id_0, &fname0_1, &m_idx_0, 0), _fx_cleanup); FX_CALL(_fx_cons_Li(m_idx_0, 0, true, &queue_0), _fx_cleanup); bool ok_0 = true; while (queue_0 != 0) { _fx_Li v_7 = 0; _fx_N16Ast__defmodule_t minfo_0 = 0; fx_str_t mfname_0 = {0}; fx_exn_t exn_0 = {0}; int_ m_idx_1; if (queue_0 != 0) { m_idx_1 = queue_0->hd; } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_13); } FX_CHECK_EXN(_fx_catch_13); if (queue_0 != 0) { FX_COPY_PTR(queue_0->tl, &v_7); } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_13); } FX_CHECK_EXN(_fx_catch_13); FX_FREE_LIST_SIMPLE(&queue_0); FX_COPY_PTR(v_7, &queue_0); FX_CHKIDX(FX_CHKIDX1(_fx_g16Ast__all_modules, 0, m_idx_1), _fx_catch_13); FX_COPY_PTR(*FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, m_idx_1), &minfo_0); fx_copy_str(&minfo_0->u.defmodule_t.t1, &mfname_0); if (!minfo_0->u.defmodule_t.t7) { fx_str_t dir1_0 = {0}; _fx_LS v_8 = 0; _fx_LS inc_dirs_0 = 0; _fx_LN14Lexer__token_t preamble_0 = 0; _fx_Li v_9 = 0; _fx_Li __fold_result___0 = 0; _fx_Li v_10 = 0; FX_CHKIDX(FX_CHKIDX1(_fx_g16Ast__all_modules, 0, m_idx_1), _fx_catch_9); (*FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, m_idx_1))->u.defmodule_t.t7 = true; FX_CALL(_fx_M8FilenameFM7dirnameS1S(&mfname_0, &dir1_0, 0), _fx_catch_9); bool v_11 = _fx_F6__eq__B2SS(&dir1_0, &dir0_0, 0); if (!v_11) { FX_CALL(_fx_cons_LS(&dir1_0, 0, true, &v_8), _fx_catch_9); } if (v_8 == 0) { FX_COPY_PTR(inc_dirs0_3, &inc_dirs_0); } else if (inc_dirs0_3 == 0) { FX_COPY_PTR(v_8, &inc_dirs_0); } else { _fx_LS v_12 = 0; _fx_LS lstend_3 = 0; _fx_LS lst_3 = v_8; for (; lst_3; lst_3 = lst_3->tl) { fx_str_t* x_2 = &lst_3->hd; _fx_LS node_3 = 0; FX_CALL(_fx_cons_LS(x_2, 0, false, &node_3), _fx_catch_5); FX_LIST_APPEND(v_12, lstend_3, node_3); _fx_catch_5: ; FX_CHECK_EXN(_fx_catch_6); } _fx_M8CompilerFM5link2LS2LSLS(v_12, inc_dirs0_3, &inc_dirs_0, 0); _fx_catch_6: ; if (v_12) { _fx_free_LS(&v_12); } } FX_CHECK_EXN(_fx_catch_9); FX_CALL(_fx_M8CompilerFM12get_preambleLN14Lexer__token_t1S(&mfname_0, &preamble_0, 0), _fx_catch_9); bool v_13; FX_CALL(_fx_M6ParserFM5parseB3iLN14Lexer__token_tLS(m_idx_1, preamble_0, inc_dirs_0, &v_13, 0), _fx_catch_9); ok_0 = ok_0 && v_13; FX_CHKIDX(FX_CHKIDX1(_fx_g16Ast__all_modules, 0, m_idx_1), _fx_catch_9); FX_COPY_PTR((*FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, m_idx_1))->u.defmodule_t.t5, &v_9); _fx_Li lst_4 = v_9; for (; lst_4; lst_4 = lst_4->tl) { _fx_Li r_0 = 0; int_ a_0 = lst_4->hd; FX_COPY_PTR(__fold_result___0, &r_0); FX_CALL(_fx_cons_Li(a_0, r_0, false, &r_0), _fx_catch_7); FX_FREE_LIST_SIMPLE(&__fold_result___0); FX_COPY_PTR(r_0, &__fold_result___0); _fx_catch_7: ; FX_FREE_LIST_SIMPLE(&r_0); FX_CHECK_EXN(_fx_catch_9); } FX_COPY_PTR(__fold_result___0, &v_10); _fx_Li lst_5 = v_10; for (; lst_5; lst_5 = lst_5->tl) { _fx_N16Ast__defmodule_t dep_minfo_0 = 0; _fx_Li v_14 = 0; int_ dep_0 = lst_5->hd; FX_CALL(_fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(dep_0, &dep_minfo_0, 0), _fx_catch_8); if (!dep_minfo_0->u.defmodule_t.t7) { FX_CALL(_fx_cons_Li(dep_0, queue_0, true, &v_14), _fx_catch_8); FX_FREE_LIST_SIMPLE(&queue_0); FX_COPY_PTR(v_14, &queue_0); } _fx_catch_8: ; FX_FREE_LIST_SIMPLE(&v_14); if (dep_minfo_0) { _fx_free_N16Ast__defmodule_t(&dep_minfo_0); } FX_CHECK_EXN(_fx_catch_9); } _fx_catch_9: ; FX_FREE_STR(&dir1_0); if (v_8) { _fx_free_LS(&v_8); } if (inc_dirs_0) { _fx_free_LS(&inc_dirs_0); } if (preamble_0) { _fx_free_LN14Lexer__token_t(&preamble_0); } FX_FREE_LIST_SIMPLE(&v_9); FX_FREE_LIST_SIMPLE(&__fold_result___0); FX_FREE_LIST_SIMPLE(&v_10); if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_0); fx_status = 0; int tag_0 = exn_0.tag; if (tag_0 == _FX_EXN_E22LexerUtils__LexerError) { fx_str_t v_15 = {0}; fx_str_t v_16 = {0}; fx_str_t v_17 = {0}; _fx_T2Ta2iS* vcase_0 = &FX_EXN_DATA(_fx_E22LexerUtils__LexerError_data_t, exn_0.data); _fx_Ta2i* v_18 = &vcase_0->t0; FX_CALL(_fx_F6stringS1i(v_18->t0, &v_15, 0), _fx_catch_10); FX_CALL(_fx_F6stringS1i(v_18->t1, &v_16, 0), _fx_catch_10); fx_str_t slit_0 = FX_MAKE_STR(":"); fx_str_t slit_1 = FX_MAKE_STR(":"); fx_str_t slit_2 = FX_MAKE_STR(": error: "); fx_str_t* msg_0 = &vcase_0->t1; fx_str_t slit_3 = FX_MAKE_STR("\n"); { const fx_str_t strs_0[] = { mfname_0, slit_0, v_15, slit_1, v_16, slit_2, *msg_0, slit_3 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 8, &v_17), _fx_catch_10); } _fx_F12print_stringv1S(&v_17, 0); fx_str_t slit_4 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_4, 0); ok_0 = false; _fx_catch_10: ; FX_FREE_STR(&v_17); FX_FREE_STR(&v_16); FX_FREE_STR(&v_15); } else if (tag_0 == _FX_EXN_E18Parser__ParseError) { fx_str_t v_19 = {0}; fx_str_t v_20 = {0}; _fx_T2R10Ast__loc_tS* vcase_1 = &FX_EXN_DATA(_fx_E18Parser__ParseError_data_t, exn_0.data); FX_CALL(_fx_M3AstFM6stringS1RM5loc_t(&vcase_1->t0, &v_19, 0), _fx_catch_11); fx_str_t slit_5 = FX_MAKE_STR(": error: "); fx_str_t* msg_1 = &vcase_1->t1; fx_str_t slit_6 = FX_MAKE_STR("\n"); { const fx_str_t strs_1[] = { v_19, slit_5, *msg_1, slit_6 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 4, &v_20), _fx_catch_11); } _fx_F12print_stringv1S(&v_20, 0); fx_str_t slit_7 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_7, 0); ok_0 = false; _fx_catch_11: ; FX_FREE_STR(&v_20); FX_FREE_STR(&v_19); } else { fx_str_t v_21 = {0}; fx_str_t v_22 = {0}; FX_CALL(_fx_F6stringS1E(&exn_0, &v_21, 0), _fx_catch_12); fx_str_t slit_8 = FX_MAKE_STR(": exception "); fx_str_t slit_9 = FX_MAKE_STR(" occured"); { const fx_str_t strs_2[] = { mfname_0, slit_8, v_21, slit_9 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 4, &v_22), _fx_catch_12); } _fx_F12print_stringv1S(&v_22, 0); fx_str_t slit_10 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_10, 0); ok_0 = false; _fx_catch_12: ; FX_FREE_STR(&v_22); FX_FREE_STR(&v_21); } FX_CHECK_EXN(_fx_catch_13); } } _fx_catch_13: ; fx_free_exn(&exn_0); FX_FREE_STR(&mfname_0); if (minfo_0) { _fx_free_N16Ast__defmodule_t(&minfo_0); } FX_FREE_LIST_SIMPLE(&v_7); FX_CHECK_EXN(_fx_cleanup); } *fx_result = ok_0; _fx_cleanup: ; FX_FREE_STR(&cwd_0); FX_FREE_STR(&fname0_1); FX_FREE_STR(&dir0_0); if (inc_dirs0_0) { _fx_free_LS(&inc_dirs0_0); } if (v_0) { _fx_free_LS(&v_0); } if (v_1) { _fx_free_LS(&v_1); } if (inc_dirs0_1) { _fx_free_LS(&inc_dirs0_1); } if (inc_dirs0_2) { _fx_free_LS(&inc_dirs0_2); } if (inc_dirs0_3) { _fx_free_LS(&inc_dirs0_3); } FX_FREE_STR(&v_2); FX_FREE_STR(&v_3); FX_FREE_LIST_SIMPLE(&queue_0); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM8toposortLi1LT2iLi( struct _fx_LT2iLi_data_t* graph_0, struct _fx_Li_data_t** fx_result, void* fx_fv) { fx_arr_t graph_1 = {0}; fx_arr_t processed_0 = {0}; _fx_rLi result_ref_0 = 0; _fx_Li __fold_result___0 = 0; _fx_Li result_0 = 0; int fx_status = 0; _fx_Li* dstptr_0 = 0; _fx_LT2iLi lst_0 = graph_0; int_ len_0 = fx_list_length(lst_0); { const int_ shape_0[] = { len_0 }; FX_CALL(fx_make_arr(1, shape_0, sizeof(_fx_Li), (fx_free_t)fx_free_list_simple, (fx_copy_t)fx_copy_ptr, 0, &graph_1), _fx_cleanup); } dstptr_0 = (_fx_Li*)graph_1.data; for (; lst_0; lst_0 = lst_0->tl, dstptr_0++) { _fx_T2iLi* __pat___0 = &lst_0->hd; FX_COPY_PTR(__pat___0->t1, dstptr_0); } int_ nvtx_0 = FX_ARR_SIZE(graph_1, 0); bool* dstptr_1 = 0; { const int_ shape_1[] = { nvtx_0 }; FX_CALL(fx_make_arr(1, shape_1, sizeof(bool), 0, 0, 0, &processed_0), _fx_cleanup); } dstptr_1 = (bool*)processed_0.data; for (int_ i_0 = 0; i_0 < nvtx_0; i_0++, dstptr_1++) { *dstptr_1 = false; } FX_CALL(_fx_make_rLi(0, &result_ref_0), _fx_cleanup); FX_CHKIDX_RANGE(FX_ARR_SIZE(processed_0, 0), 0, nvtx_0, 1, 1, 0, _fx_cleanup); for (int_ i_1 = 0; i_1 < nvtx_0; i_1++) { if (*FX_PTR_1D(bool, processed_0, i_1)) { FX_CONTINUE(_fx_catch_0); } FX_CALL(_fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi(i_1, 0, &graph_1, &processed_0, result_ref_0, 0), _fx_catch_0); _fx_catch_0: ; FX_CHECK_CONTINUE(); FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(result_ref_0->data, &result_0); _fx_Li lst_1 = result_0; for (; lst_1; lst_1 = lst_1->tl) { _fx_Li r_0 = 0; int_ a_0 = lst_1->hd; FX_COPY_PTR(__fold_result___0, &r_0); FX_CALL(_fx_cons_Li(a_0, r_0, false, &r_0), _fx_catch_1); FX_FREE_LIST_SIMPLE(&__fold_result___0); FX_COPY_PTR(r_0, &__fold_result___0); _fx_catch_1: ; FX_FREE_LIST_SIMPLE(&r_0); FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(__fold_result___0, fx_result); _fx_cleanup: ; FX_FREE_ARR(&graph_1); FX_FREE_ARR(&processed_0); if (result_ref_0) { _fx_free_rLi(&result_ref_0); } FX_FREE_LIST_SIMPLE(&__fold_result___0); FX_FREE_LIST_SIMPLE(&result_0); return fx_status; } static int _fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi( int_ i_0, struct _fx_Li_data_t* visited_0, fx_arr_t* graph_0, fx_arr_t* processed_0, struct _fx_rLi_data_t* result_ref_0, void* fx_fv) { _fx_Li deps_0 = 0; _fx_LS v_0 = 0; fx_str_t vlist_0 = {0}; fx_str_t v_1 = {0}; fx_exn_t v_2 = {0}; _fx_Li visited_1 = 0; _fx_Li v_3 = 0; int fx_status = 0; FX_CALL(fx_check_stack(), _fx_cleanup); _fx_Li* result_0 = &result_ref_0->data; FX_CHKIDX(FX_CHKIDX1(*graph_0, 0, i_0), _fx_cleanup); FX_COPY_PTR(*FX_PTR_1D(_fx_Li, *graph_0, i_0), &deps_0); bool __fold_result___0 = false; _fx_Li lst_0 = visited_0; for (; lst_0; lst_0 = lst_0->tl) { int_ b_0 = lst_0->hd; if (i_0 == b_0) { __fold_result___0 = true; FX_BREAK(_fx_catch_0); } _fx_catch_0: ; FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_cleanup); } if (__fold_result___0) { _fx_LS lstend_0 = 0; _fx_Li lst_1 = visited_0; for (; lst_1; lst_1 = lst_1->tl) { fx_str_t res_0 = {0}; int_ j_0 = lst_1->hd; _fx_R9Ast__id_t v_4; FX_CALL(_fx_M3AstFM15get_module_nameRM4id_t1i(j_0, &v_4, 0), _fx_catch_1); FX_CALL(_fx_M3AstFM2ppS1RM4id_t(&v_4, &res_0, 0), _fx_catch_1); _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(&res_0, 0, false, &node_0), _fx_catch_1); FX_LIST_APPEND(v_0, lstend_0, node_0); _fx_catch_1: ; FX_FREE_STR(&res_0); FX_CHECK_EXN(_fx_cleanup); } fx_str_t slit_0 = FX_MAKE_STR(", "); FX_CALL(_fx_F4joinS2SLS(&slit_0, v_0, &vlist_0, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("error: cyclib dependency between the modules: "); { const fx_str_t strs_0[] = { slit_1, vlist_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &v_1), _fx_cleanup); } FX_CALL(_fx_F9make_FailE1S(&v_1, &v_2), _fx_cleanup); FX_THROW(&v_2, true, _fx_cleanup); } FX_CALL(_fx_cons_Li(i_0, visited_0, true, &visited_1), _fx_cleanup); _fx_Li lst_2 = deps_0; for (; lst_2; lst_2 = lst_2->tl) { int_ j_1 = lst_2->hd; FX_CHKIDX(FX_CHKIDX1(*processed_0, 0, j_1), _fx_catch_2); if (*FX_PTR_1D(bool, *processed_0, j_1)) { FX_CONTINUE(_fx_catch_2); } FX_CALL(_fx_M8CompilerFM3dfsv5iLiA1LiA1BrLi(j_1, visited_1, graph_0, processed_0, result_ref_0, 0), _fx_catch_2); _fx_catch_2: ; FX_CHECK_CONTINUE(); FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_cons_Li(i_0, *result_0, true, &v_3), _fx_cleanup); FX_FREE_LIST_SIMPLE(result_0); FX_COPY_PTR(v_3, result_0); FX_CHKIDX(FX_CHKIDX1(*processed_0, 0, i_0), _fx_cleanup); *FX_PTR_1D(bool, *processed_0, i_0) = true; _fx_cleanup: ; FX_FREE_LIST_SIMPLE(&deps_0); if (v_0) { _fx_free_LS(&v_0); } FX_FREE_STR(&vlist_0); FX_FREE_STR(&v_1); fx_free_exn(&v_2); FX_FREE_LIST_SIMPLE(&visited_1); FX_FREE_LIST_SIMPLE(&v_3); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM11k_skip_someLR17K_form__kmodule_t1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_LR17K_form__kmodule_t_data_t** fx_result, void* fx_fv) { fx_arr_t skip_flags_0 = {0}; fx_str_t build_root_dir_0 = {0}; fx_str_t build_dir_0 = {0}; fx_str_t obj_ext_0 = {0}; _fx_LR17K_form__kmodule_t kmods_1 = 0; fx_exn_t v_0 = {0}; int fx_status = 0; bool* dstptr_0 = 0; int_ v_1 = FX_ARR_SIZE(_fx_g16Ast__all_modules, 0); { const int_ shape_0[] = { v_1 }; FX_CALL(fx_make_arr(1, shape_0, sizeof(bool), 0, 0, 0, &skip_flags_0), _fx_cleanup); } dstptr_0 = (bool*)skip_flags_0.data; for (int_ i_0 = 0; i_0 < v_1; i_0++, dstptr_0++) { *dstptr_0 = false; } fx_copy_str(&_fx_g12Options__opt.build_rootdir, &build_root_dir_0); bool ok_0; FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_root_dir_0, 493, &ok_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.build_dir, &build_dir_0); bool ok_1; if (ok_0) { FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_dir_0, 493, &ok_1, 0), _fx_cleanup); } else { ok_1 = false; } if (_fx_g10Sys__win32) { fx_str_t slit_0 = FX_MAKE_STR(".obj"); fx_copy_str(&slit_0, &obj_ext_0); } else { fx_str_t slit_1 = FX_MAKE_STR(".o"); fx_copy_str(&slit_1, &obj_ext_0); } _fx_LR17K_form__kmodule_t lstend_0 = 0; _fx_LR17K_form__kmodule_t lst_0 = kmods_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_R14Ast__pragmas_t km_pragmas_0 = {0}; _fx_Li km_deps_0 = 0; _fx_LN14K_form__kexp_t km_top_0 = 0; fx_str_t km_cname_0 = {0}; fx_str_t ext_0 = {0}; fx_str_t mname_0 = {0}; fx_str_t cname_0 = {0}; fx_str_t k_filename_0 = {0}; fx_str_t c_filename_0 = {0}; fx_str_t o_filename_0 = {0}; fx_str_t new_kform_0 = {0}; fx_str_t old_kform_0 = {0}; fx_exn_t exn_0 = {0}; _fx_T3BBS v_2 = {0}; fx_exn_t exn_1 = {0}; fx_str_t v_3 = {0}; fx_str_t status_j_0 = {0}; fx_str_t status_j_1 = {0}; fx_str_t v_4 = {0}; _fx_R17K_form__kmodule_t rec_0 = {0}; _fx_R17K_form__kmodule_t* km_0 = &lst_0->hd; _fx_copy_R14Ast__pragmas_t(&km_0->km_pragmas, &km_pragmas_0); FX_COPY_PTR(km_0->km_deps, &km_deps_0); FX_COPY_PTR(km_0->km_top, &km_top_0); fx_copy_str(&km_0->km_cname, &km_cname_0); int_ km_idx_0 = km_0->km_idx; bool is_cpp_0; if (_fx_g12Options__opt.compile_by_cpp) { is_cpp_0 = true; } else { is_cpp_0 = km_pragmas_0.pragma_cpp; } if (is_cpp_0) { fx_str_t slit_2 = FX_MAKE_STR(".cpp"); fx_copy_str(&slit_2, &ext_0); } else { fx_str_t slit_3 = FX_MAKE_STR(".c"); fx_copy_str(&slit_3, &ext_0); } FX_CALL(_fx_M8K_mangleFM12mangle_mnameS1S(&km_cname_0, &mname_0, 0), _fx_catch_5); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&build_dir_0, &mname_0, &cname_0, 0), _fx_catch_5); fx_str_t slit_4 = FX_MAKE_STR(".k"); { const fx_str_t strs_0[] = { cname_0, slit_4 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &k_filename_0), _fx_catch_5); } { const fx_str_t strs_1[] = { cname_0, ext_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 2, &c_filename_0), _fx_catch_5); } { const fx_str_t strs_2[] = { cname_0, obj_ext_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 2, &o_filename_0), _fx_catch_5); } FX_CALL(_fx_M4K_ppFM16pp_top_to_stringS1LN14K_form__kexp_t(km_top_0, &new_kform_0, 0), _fx_catch_5); bool have_k_0; FX_CALL(_fx_M8FilenameFM6existsB1S(&k_filename_0, &have_k_0, 0), _fx_catch_5); bool have_c_0; FX_CALL(_fx_M8FilenameFM6existsB1S(&c_filename_0, &have_c_0, 0), _fx_catch_5); bool have_o_0; FX_CALL(_fx_M8FilenameFM6existsB1S(&o_filename_0, &have_o_0, 0), _fx_catch_5); bool have_all_0 = have_k_0 && have_c_0 && have_o_0; bool t_0; if (_fx_g12Options__opt.force_rebuild) { t_0 = true; } else { t_0 = !have_all_0; } if (t_0) { fx_str_t slit_5 = FX_MAKE_STR(""); fx_copy_str(&slit_5, &old_kform_0); } else { FX_CALL(_fx_M4FileFM9read_utf8S1S(&k_filename_0, &old_kform_0, 0), _fx_catch_0); _fx_catch_0: ; if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_0); fx_status = 0; FX_FREE_STR(&old_kform_0); int tag_0 = exn_0.tag; bool res_0; if (tag_0 == FX_EXN_IOError) { res_0 = true; } else if (tag_0 == FX_EXN_FileOpenError) { res_0 = true; } else { res_0 = false; } FX_CHECK_EXN(_fx_catch_5); if (res_0) { fx_str_t slit_6 = FX_MAKE_STR(""); fx_copy_str(&slit_6, &old_kform_0); goto _fx_endmatch_0; } FX_RETHROW(&exn_0, _fx_catch_5); _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_5); } } bool v_5 = _fx_F6__eq__B2SS(&new_kform_0, &old_kform_0, 0); if (v_5) { fx_str_t slit_7 = FX_MAKE_STR(""); _fx_make_T3BBS(true, true, &slit_7, &v_2); } else { bool well_written_0; FX_CALL(_fx_M4FileFM10write_utf8v2SS(&k_filename_0, &new_kform_0, 0), _fx_catch_1); well_written_0 = true; _fx_catch_1: ; if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_1); fx_status = 0; int tag_1 = exn_1.tag; bool res_1; if (tag_1 == FX_EXN_IOError) { res_1 = true; } else if (tag_1 == FX_EXN_FileOpenError) { res_1 = true; } else { res_1 = false; } FX_CHECK_EXN(_fx_catch_5); if (res_1) { well_written_0 = false; goto _fx_endmatch_1; } FX_RETHROW(&exn_1, _fx_catch_5); _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_5); } if (well_written_0) { fx_str_t slit_8 = FX_MAKE_STR(""); fx_copy_str(&slit_8, &v_3); } else if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_9 = FX_MAKE_STR("failed to write .k"); fx_copy_str(&slit_9, &v_3); } else { fx_str_t slit_10 = FX_MAKE_STR("failed to write .k"); fx_copy_str(&slit_10, &v_3); } _fx_make_T3BBS(well_written_0, false, &v_3, &v_2); } bool ok_j_0 = v_2.t0; bool same_kform_0 = v_2.t1; fx_copy_str(&v_2.t2, &status_j_0); ok_1 = ok_1 && ok_j_0; if (!same_kform_0) { if (have_c_0) { FX_CALL(_fx_M3SysFM6removev1S(&c_filename_0, 0), _fx_catch_5); } if (have_o_0) { FX_CALL(_fx_M3SysFM6removev1S(&o_filename_0, 0), _fx_catch_5); } } bool skip_module_0; if (same_kform_0) { bool __fold_result___0 = true; _fx_Li lst_1 = km_deps_0; for (; lst_1; lst_1 = lst_1->tl) { int_ d_0 = lst_1->hd; FX_CHKIDX(FX_CHKIDX1(skip_flags_0, 0, d_0), _fx_catch_2); if (!*FX_PTR_1D(bool, skip_flags_0, d_0)) { __fold_result___0 = false; FX_BREAK(_fx_catch_2); } _fx_catch_2: ; FX_CHECK_BREAK(); FX_CHECK_EXN(_fx_catch_5); } skip_module_0 = __fold_result___0; } else { skip_module_0 = false; } if (FX_STR_LENGTH(status_j_0) != 0) { fx_copy_str(&status_j_0, &status_j_1); } else if (skip_module_0) { fx_str_t slit_11 = FX_MAKE_STR("skip"); fx_copy_str(&slit_11, &status_j_1); } else if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_12 = FX_MAKE_STR("process"); fx_copy_str(&slit_12, &status_j_1); } else { fx_str_t slit_13 = FX_MAKE_STR("process"); fx_copy_str(&slit_13, &status_j_1); } fx_str_t slit_14 = FX_MAKE_STR("K "); fx_str_t slit_15 = FX_MAKE_STR(": "); { const fx_str_t strs_3[] = { slit_14, km_cname_0, slit_15, status_j_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 4, &v_4), _fx_catch_5); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_4, 0), _fx_catch_5); if (skip_module_0) { _fx_LN14K_form__kexp_t lst_2 = km_top_0; for (; lst_2; lst_2 = lst_2->tl) { _fx_N14K_form__kexp_t e_0 = lst_2->hd; if (FX_REC_VARIANT_TAG(e_0) == 32) { _fx_rR17K_form__kdeffun_t kf_0 = e_0->u.KDefFun; _fx_N17Ast__fun_constr_t v_6 = kf_0->data.kf_flags.fun_flag_ctor; if (v_6.tag == 1) { _fx_N14K_form__ktyp_t kf_rt_0 = 0; _fx_T2N14K_form__ktyp_tR10Ast__loc_t v_7 = {0}; _fx_N14K_form__kexp_t v_8 = 0; _fx_R17K_form__kdeffun_t v_9 = {0}; _fx_R17K_form__kdeffun_t* v_10 = &kf_0->data; _fx_R10Ast__loc_t kf_loc_0 = v_10->kf_loc; FX_COPY_PTR(v_10->kf_rt, &kf_rt_0); _fx_R16Ast__fun_flags_t kf_flags_0 = v_10->kf_flags; _fx_R17K_form__kdeffun_t* v_11 = &kf_0->data; _fx_make_T2N14K_form__ktyp_tR10Ast__loc_t(kf_rt_0, &kf_loc_0, &v_7); fx_str_t slit_16 = FX_MAKE_STR(""); FX_CALL(_fx_M6K_formFM9KExpCCodeN14K_form__kexp_t2ST2N14K_form__ktyp_tR10Ast__loc_t(&slit_16, &v_7, &v_8), _fx_catch_3); _fx_R16Ast__fun_flags_t v_12 = { kf_flags_0.fun_flag_pure, true, kf_flags_0.fun_flag_have_keywords, false, kf_flags_0.fun_flag_nothrow, kf_flags_0.fun_flag_really_nothrow, kf_flags_0.fun_flag_private, kf_flags_0.fun_flag_ctor, kf_flags_0.fun_flag_method_of, kf_flags_0.fun_flag_uses_fv, kf_flags_0.fun_flag_recursive, kf_flags_0.fun_flag_instance }; _fx_make_R17K_form__kdeffun_t(&v_11->kf_name, &v_11->kf_cname, v_11->kf_params, v_11->kf_rt, v_8, &v_12, &v_11->kf_closure, v_11->kf_scope, &v_11->kf_loc, &v_9); _fx_R17K_form__kdeffun_t* v_13 = &kf_0->data; _fx_free_R17K_form__kdeffun_t(v_13); _fx_copy_R17K_form__kdeffun_t(&v_9, v_13); _fx_catch_3: ; _fx_free_R17K_form__kdeffun_t(&v_9); if (v_8) { _fx_free_N14K_form__kexp_t(&v_8); } _fx_free_T2N14K_form__ktyp_tR10Ast__loc_t(&v_7); if (kf_rt_0) { _fx_free_N14K_form__ktyp_t(&kf_rt_0); } goto _fx_endmatch_2; } } _fx_endmatch_2: ; FX_CHECK_EXN(_fx_catch_4); _fx_catch_4: ; FX_CHECK_EXN(_fx_catch_5); } } FX_CHKIDX(FX_CHKIDX1(skip_flags_0, 0, km_idx_0), _fx_catch_5); *FX_PTR_1D(bool, skip_flags_0, km_idx_0) = skip_module_0; _fx_make_R17K_form__kmodule_t(&km_0->km_name, km_0->km_idx, km_0->km_toposort_idx, &km_0->km_cname, km_0->km_top, km_0->km_deps, skip_module_0, km_0->km_main, &km_0->km_pragmas, &rec_0); _fx_LR17K_form__kmodule_t node_0 = 0; FX_CALL(_fx_cons_LR17K_form__kmodule_t(&rec_0, 0, false, &node_0), _fx_catch_5); FX_LIST_APPEND(kmods_1, lstend_0, node_0); _fx_catch_5: ; _fx_free_R17K_form__kmodule_t(&rec_0); FX_FREE_STR(&v_4); FX_FREE_STR(&status_j_1); FX_FREE_STR(&status_j_0); FX_FREE_STR(&v_3); fx_free_exn(&exn_1); _fx_free_T3BBS(&v_2); fx_free_exn(&exn_0); FX_FREE_STR(&old_kform_0); FX_FREE_STR(&new_kform_0); FX_FREE_STR(&o_filename_0); FX_FREE_STR(&c_filename_0); FX_FREE_STR(&k_filename_0); FX_FREE_STR(&cname_0); FX_FREE_STR(&mname_0); FX_FREE_STR(&ext_0); FX_FREE_STR(&km_cname_0); if (km_top_0) { _fx_free_LN14K_form__kexp_t(&km_top_0); } FX_FREE_LIST_SIMPLE(&km_deps_0); _fx_free_R14Ast__pragmas_t(&km_pragmas_0); FX_CHECK_EXN(_fx_cleanup); } if (!ok_1) { fx_str_t slit_17 = FX_MAKE_STR("failed to write some k-forms"); FX_CALL(_fx_F9make_FailE1S(&slit_17, &v_0), _fx_cleanup); FX_THROW(&v_0, true, _fx_cleanup); } FX_COPY_PTR(kmods_1, fx_result); _fx_cleanup: ; FX_FREE_ARR(&skip_flags_0); FX_FREE_STR(&build_root_dir_0); FX_FREE_STR(&build_dir_0); FX_FREE_STR(&obj_ext_0); if (kmods_1) { _fx_free_LR17K_form__kmodule_t(&kmods_1); } fx_free_exn(&v_0); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM14k_optimize_allT2LR17K_form__kmodule_tB1LR17K_form__kmodule_t( struct _fx_LR17K_form__kmodule_t_data_t* kmods_0, struct _fx_T2LR17K_form__kmodule_tB* fx_result, void* fx_fv) { _fx_LR17K_form__kmodule_t temp_kmods_0 = 0; _fx_LR17K_form__kmodule_t v_0 = 0; _fx_LR17K_form__kmodule_t v_1 = 0; _fx_LR17K_form__kmodule_t v_2 = 0; _fx_LR17K_form__kmodule_t v_3 = 0; _fx_LR17K_form__kmodule_t v_4 = 0; _fx_LR17K_form__kmodule_t v_5 = 0; _fx_LR17K_form__kmodule_t v_6 = 0; _fx_LR17K_form__kmodule_t v_7 = 0; _fx_LR17K_form__kmodule_t v_8 = 0; _fx_LR17K_form__kmodule_t v_9 = 0; _fx_LR17K_form__kmodule_t v_10 = 0; _fx_LR17K_form__kmodule_t v_11 = 0; _fx_LR17K_form__kmodule_t v_12 = 0; _fx_LR17K_form__kmodule_t v_13 = 0; _fx_LR17K_form__kmodule_t v_14 = 0; _fx_LR17K_form__kmodule_t v_15 = 0; _fx_LR17K_form__kmodule_t v_16 = 0; _fx_LR17K_form__kmodule_t v_17 = 0; int fx_status = 0; _fx_free_LE(&_fx_g21Ast__all_compile_errs); _fx_g21Ast__all_compile_errs = 0; int_ niters_0 = _fx_g12Options__opt.optim_iters; FX_COPY_PTR(kmods_0, &temp_kmods_0); fx_str_t slit_0 = FX_MAKE_STR("\tremove unused"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_0, 0), _fx_cleanup); FX_CALL(_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, true, &v_0, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_0, &temp_kmods_0); fx_str_t slit_1 = FX_MAKE_STR("\tannotate types"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_1, 0), _fx_cleanup); FX_CALL(_fx_M10K_annotateFM14annotate_typesLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_1, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_1, &temp_kmods_0); fx_str_t slit_2 = FX_MAKE_STR("\tcopy generic/inline functions"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_2, 0), _fx_cleanup); FX_CALL(_fx_M13K_copy_n_skipFM9copy_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_2, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_2, &temp_kmods_0); fx_str_t slit_3 = FX_MAKE_STR("\tremove unused by main"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_3, 0), _fx_cleanup); FX_CALL(_fx_M15K_remove_unusedFM21remove_unused_by_mainLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_3, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_3, &temp_kmods_0); fx_str_t slit_4 = FX_MAKE_STR("\tmangle & dump intermediate K-forms"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_4, 0), _fx_cleanup); FX_CALL(_fx_M8K_mangleFM10mangle_allLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_4, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_4, &temp_kmods_0); FX_CALL(_fx_M8K_mangleFM13mangle_localsLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_5, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_5, &temp_kmods_0); FX_CALL(_fx_M8CompilerFM11k_skip_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_6, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_6, &temp_kmods_0); fx_str_t slit_5 = FX_MAKE_STR("\tdemangle"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_5, 0), _fx_cleanup); FX_CALL(_fx_M8K_mangleFM12demangle_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_7, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_7, &temp_kmods_0); int_ v_18 = niters_0 + 1; int_ n_0 = FX_LOOP_COUNT(1, v_18, 1); for (int_ i_0 = 0; i_0 < n_0; i_0++) { fx_str_t v_19 = {0}; fx_str_t v_20 = {0}; _fx_LR17K_form__kmodule_t v_21 = 0; _fx_LR17K_form__kmodule_t v_22 = 0; _fx_LR17K_form__kmodule_t v_23 = 0; _fx_LR17K_form__kmodule_t v_24 = 0; _fx_LR17K_form__kmodule_t v_25 = 0; _fx_LR17K_form__kmodule_t v_26 = 0; _fx_LR17K_form__kmodule_t v_27 = 0; _fx_LR17K_form__kmodule_t v_28 = 0; _fx_LR17K_form__kmodule_t v_29 = 0; _fx_LR17K_form__kmodule_t v_30 = 0; int_ i_1 = 1 + i_0 * 1; FX_CALL(_fx_F6stringS1i(i_1, &v_19, 0), _fx_catch_0); fx_str_t slit_6 = FX_MAKE_STR("Optimization pass #"); fx_str_t slit_7 = FX_MAKE_STR(":"); { const fx_str_t strs_0[] = { slit_6, v_19, slit_7 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 3, &v_20), _fx_catch_0); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_20, 0), _fx_catch_0); if (i_1 <= 2) { fx_str_t slit_8 = FX_MAKE_STR("\tsimple lambda lifting"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_8, 0), _fx_catch_0); FX_CALL(_fx_M13K_lift_simpleFM4liftLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_21, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_21, &temp_kmods_0); } fx_str_t slit_9 = FX_MAKE_STR("\ttailrec"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_9, 0), _fx_catch_0); FX_CALL(_fx_M9K_tailrecFM17tailrec2loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_22, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_22, &temp_kmods_0); fx_str_t slit_10 = FX_MAKE_STR("\tloop inv"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_10, 0), _fx_catch_0); FX_CALL(_fx_M10K_loop_invFM18move_loop_invs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_23, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_23, &temp_kmods_0); fx_str_t slit_11 = FX_MAKE_STR("\tgemm implantation"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_11, 0), _fx_catch_0); FX_CALL(_fx_M13K_optim_matopFM13optimize_gemmLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_24, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_24, &temp_kmods_0); fx_str_t slit_12 = FX_MAKE_STR("\tinline"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_12, 0), _fx_catch_0); if (_fx_g12Options__opt.inline_thresh > 0) { FX_CALL(_fx_M8K_inlineFM11inline_someLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_25, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_25, &temp_kmods_0); } fx_str_t slit_13 = FX_MAKE_STR("\tflatten"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_13, 0), _fx_catch_0); FX_CALL(_fx_M9K_flattenFM11flatten_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_26, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_26, &temp_kmods_0); fx_str_t slit_14 = FX_MAKE_STR("\tfuse loops"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_14, 0), _fx_catch_0); FX_CALL(_fx_M12K_fuse_loopsFM14fuse_loops_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_27, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_27, &temp_kmods_0); fx_str_t slit_15 = FX_MAKE_STR("\tfast idx"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_15, 0), _fx_catch_0); FX_CALL(_fx_M10K_fast_idxFM23optimize_idx_checks_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_28, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_28, &temp_kmods_0); fx_str_t slit_16 = FX_MAKE_STR("\tconst folding"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_16, 0), _fx_catch_0); FX_CALL(_fx_M15K_cfold_dealiasFM13cfold_dealiasLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_29, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_29, &temp_kmods_0); fx_str_t slit_17 = FX_MAKE_STR("\tremove unused"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_17, 0), _fx_catch_0); FX_CALL( _fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_30, 0), _fx_catch_0); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_30, &temp_kmods_0); _fx_catch_0: ; if (v_30) { _fx_free_LR17K_form__kmodule_t(&v_30); } if (v_29) { _fx_free_LR17K_form__kmodule_t(&v_29); } if (v_28) { _fx_free_LR17K_form__kmodule_t(&v_28); } if (v_27) { _fx_free_LR17K_form__kmodule_t(&v_27); } if (v_26) { _fx_free_LR17K_form__kmodule_t(&v_26); } if (v_25) { _fx_free_LR17K_form__kmodule_t(&v_25); } if (v_24) { _fx_free_LR17K_form__kmodule_t(&v_24); } if (v_23) { _fx_free_LR17K_form__kmodule_t(&v_23); } if (v_22) { _fx_free_LR17K_form__kmodule_t(&v_22); } if (v_21) { _fx_free_LR17K_form__kmodule_t(&v_21); } FX_FREE_STR(&v_20); FX_FREE_STR(&v_19); FX_CHECK_EXN(_fx_cleanup); } fx_str_t slit_18 = FX_MAKE_STR("Finalizing K-form:"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_18, 0), _fx_cleanup); fx_str_t slit_19 = FX_MAKE_STR("\tmaking wrappers for nothrow functions"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_19, 0), _fx_cleanup); FX_CALL( _fx_M18K_nothrow_wrappersFM25make_wrappers_for_nothrowLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_8, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_8, &temp_kmods_0); fx_str_t slit_20 = FX_MAKE_STR("\tmutable freevars referencing"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_20, 0), _fx_cleanup); FX_CALL(_fx_M10K_freevarsFM21mutable_freevars2refsLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_9, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_9, &temp_kmods_0); fx_str_t slit_21 = FX_MAKE_STR("\tdeclosuring"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_21, 0), _fx_cleanup); FX_CALL(_fx_M11K_declosureFM13declosure_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_10, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_10, &temp_kmods_0); fx_str_t slit_22 = FX_MAKE_STR("\tlambda lifting"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_22, 0), _fx_cleanup); FX_CALL(_fx_M6K_liftFM8lift_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_11, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_11, &temp_kmods_0); fx_str_t slit_23 = FX_MAKE_STR("\tflatten"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_23, 0), _fx_cleanup); FX_CALL(_fx_M9K_flattenFM11flatten_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_12, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_12, &temp_kmods_0); fx_str_t slit_24 = FX_MAKE_STR("\tremove unused"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_24, 0), _fx_cleanup); FX_CALL(_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_13, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_13, &temp_kmods_0); fx_str_t slit_25 = FX_MAKE_STR("\tmangle"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_25, 0), _fx_cleanup); FX_CALL(_fx_M8K_mangleFM10mangle_allLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, true, &v_14, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_14, &temp_kmods_0); fx_str_t slit_26 = FX_MAKE_STR("\tremove unused"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_26, 0), _fx_cleanup); FX_CALL(_fx_M15K_remove_unusedFM13remove_unusedLR17K_form__kmodule_t2LR17K_form__kmodule_tB(temp_kmods_0, false, &v_15, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_15, &temp_kmods_0); fx_str_t slit_27 = FX_MAKE_STR("\tmark recursive"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_27, 0), _fx_cleanup); FX_CALL(_fx_M8K_inlineFM24find_recursive_funcs_allLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_16, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_16, &temp_kmods_0); fx_str_t slit_28 = FX_MAKE_STR("\tannotate types"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_28, 0), _fx_cleanup); FX_CALL(_fx_M10K_annotateFM14annotate_typesLR17K_form__kmodule_t1LR17K_form__kmodule_t(temp_kmods_0, &v_17, 0), _fx_cleanup); _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); FX_COPY_PTR(v_17, &temp_kmods_0); _fx_make_T2LR17K_form__kmodule_tB(temp_kmods_0, _fx_g21Ast__all_compile_errs == 0, fx_result); _fx_cleanup: ; if (temp_kmods_0) { _fx_free_LR17K_form__kmodule_t(&temp_kmods_0); } if (v_0) { _fx_free_LR17K_form__kmodule_t(&v_0); } if (v_1) { _fx_free_LR17K_form__kmodule_t(&v_1); } if (v_2) { _fx_free_LR17K_form__kmodule_t(&v_2); } if (v_3) { _fx_free_LR17K_form__kmodule_t(&v_3); } if (v_4) { _fx_free_LR17K_form__kmodule_t(&v_4); } if (v_5) { _fx_free_LR17K_form__kmodule_t(&v_5); } if (v_6) { _fx_free_LR17K_form__kmodule_t(&v_6); } if (v_7) { _fx_free_LR17K_form__kmodule_t(&v_7); } if (v_8) { _fx_free_LR17K_form__kmodule_t(&v_8); } if (v_9) { _fx_free_LR17K_form__kmodule_t(&v_9); } if (v_10) { _fx_free_LR17K_form__kmodule_t(&v_10); } if (v_11) { _fx_free_LR17K_form__kmodule_t(&v_11); } if (v_12) { _fx_free_LR17K_form__kmodule_t(&v_12); } if (v_13) { _fx_free_LR17K_form__kmodule_t(&v_13); } if (v_14) { _fx_free_LR17K_form__kmodule_t(&v_14); } if (v_15) { _fx_free_LR17K_form__kmodule_t(&v_15); } if (v_16) { _fx_free_LR17K_form__kmodule_t(&v_16); } if (v_17) { _fx_free_LR17K_form__kmodule_t(&v_17); } return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM6run_ccB2LR17C_form__cmodule_tS( struct _fx_LR17C_form__cmodule_t_data_t* cmods_0, fx_str_t* ficus_root_0, bool* fx_result, void* fx_fv) { fx_str_t osinfo_0 = {0}; fx_str_t runtime_include_path_0 = {0}; fx_str_t runtime_lib_path_0 = {0}; fx_str_t runtime_impl_0 = {0}; fx_str_t build_root_dir_0 = {0}; fx_str_t build_dir_0 = {0}; _fx_Ta9S v_0 = {0}; fx_str_t opt_flags_0 = {0}; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; fx_str_t v_3 = {0}; fx_str_t v_4 = {0}; fx_str_t cflags_0 = {0}; _fx_Ta4S v_5 = {0}; _fx_Ta2S v_6 = {0}; fx_str_t omp_cflags_0 = {0}; fx_str_t omp_lib_0 = {0}; _fx_Ta3S v_7 = {0}; fx_str_t v_8 = {0}; fx_str_t v_9 = {0}; fx_str_t libpath_0 = {0}; fx_str_t cflags_1 = {0}; fx_str_t clibs_0 = {0}; fx_str_t omp_flags_0 = {0}; fx_str_t os_0 = {0}; fx_str_t libpath_1 = {0}; fx_str_t cflags_2 = {0}; fx_str_t clibs_1 = {0}; fx_str_t ggdb_opt_0 = {0}; fx_str_t v_10 = {0}; fx_str_t v_11 = {0}; fx_str_t v_12 = {0}; fx_str_t v_13 = {0}; fx_str_t v_14 = {0}; fx_str_t cflags_3 = {0}; fx_str_t v_15 = {0}; fx_str_t v_16 = {0}; fx_str_t v_17 = {0}; fx_str_t v_18 = {0}; fx_str_t clibs_2 = {0}; fx_str_t c_comp_0 = {0}; fx_str_t cpp_comp_0 = {0}; fx_str_t obj_ext_0 = {0}; fx_str_t obj_opt_0 = {0}; fx_str_t appname_opt_0 = {0}; fx_str_t link_lib_opt_0 = {0}; fx_str_t cflags_4 = {0}; fx_str_t clibs_3 = {0}; fx_str_t custom_cflags_0 = {0}; fx_str_t v_19 = {0}; fx_str_t custom_cflags_1 = {0}; fx_str_t v_20 = {0}; fx_str_t cflags_5 = {0}; fx_str_t v_21 = {0}; fx_str_t v_22 = {0}; fx_str_t v_23 = {0}; _fx_R14Ast__pragmas_t v_24 = {0}; _fx_R17C_form__cmodule_t runtime_pseudo_cmod_0 = {0}; _fx_LR17C_form__cmodule_t cmods_1 = 0; fx_arr_t v_25 = {0}; fx_arr_t results_0 = {0}; _fx_T5BBLSBLS __fold_result___0 = {0}; _fx_T5BBLSBLS v_26 = {0}; _fx_LS all_clibs_0 = 0; _fx_LS objs_0 = 0; fx_str_t v_27 = {0}; fx_str_t v_28 = {0}; fx_str_t v_29 = {0}; fx_str_t v_30 = {0}; fx_str_t custom_clibs_0 = {0}; fx_str_t v_31 = {0}; fx_str_t custom_clibs_1 = {0}; fx_str_t v_32 = {0}; fx_str_t custom_clibs_2 = {0}; _fx_LS v_33 = 0; _fx_LS v_34 = 0; fx_str_t v_35 = {0}; fx_str_t clibs_4 = {0}; fx_str_t v_36 = {0}; fx_str_t v_37 = {0}; fx_str_t v_38 = {0}; fx_str_t v_39 = {0}; fx_str_t cmd_0 = {0}; fx_str_t v_40 = {0}; fx_str_t cmd_1 = {0}; int fx_status = 0; FX_CALL(_fx_g11Sys__osname.fp(true, &osinfo_0, _fx_g11Sys__osname.fcv), _fx_cleanup); int_ opt_level_0 = _fx_g12Options__opt.optimize_level; bool enable_openmp_0 = _fx_g12Options__opt.enable_openmp; fx_str_t slit_0 = FX_MAKE_STR("runtime"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(ficus_root_0, &slit_0, &runtime_include_path_0, 0), _fx_cleanup); fx_str_t slit_1 = FX_MAKE_STR("runtime/lib"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(ficus_root_0, &slit_1, &runtime_lib_path_0, 0), _fx_cleanup); fx_str_t slit_2 = FX_MAKE_STR("runtime/ficus/impl/libficus"); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(ficus_root_0, &slit_2, &runtime_impl_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.build_rootdir, &build_root_dir_0); bool ok_0; FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_root_dir_0, 493, &ok_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.build_dir, &build_dir_0); bool ok_1; if (ok_0) { FX_CALL(_fx_M3SysFM5mkdirB2Si(&build_dir_0, 493, &ok_1, 0), _fx_cleanup); } else { ok_1 = false; } if (_fx_g10Sys__win32) { if (opt_level_0 == 0) { fx_str_t slit_3 = FX_MAKE_STR(" /MTd /Od /GF"); fx_copy_str(&slit_3, &opt_flags_0); } else { if (opt_level_0 == 1) { fx_str_t slit_4 = FX_MAKE_STR("/O1"); fx_copy_str(&slit_4, &v_1); } else { fx_str_t slit_5 = FX_MAKE_STR("/O2"); fx_copy_str(&slit_5, &v_1); } fx_str_t slit_6 = FX_MAKE_STR(" /MT "); { const fx_str_t strs_0[] = { slit_6, v_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 2, &opt_flags_0), _fx_cleanup); } } FX_CALL(_fx_M8CompilerFM6stringS1S(&opt_flags_0, &v_2, 0), _fx_cleanup); fx_str_t slit_7 = FX_MAKE_STR(""); FX_CALL(_fx_M8CompilerFM6stringS1S(&slit_7, &v_3, 0), _fx_cleanup); FX_CALL(_fx_M8CompilerFM6stringS1S(&runtime_include_path_0, &v_4, 0), _fx_cleanup); fx_str_t slit_8 = FX_MAKE_STR("/nologo"); fx_str_t slit_9 = FX_MAKE_STR(" /I"); { const fx_str_t strs_1[] = { slit_8, v_2, v_3, slit_9, v_4 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 5, &cflags_0), _fx_cleanup); } fx_str_t slit_10 = FX_MAKE_STR("win"); fx_str_t slit_11 = FX_MAKE_STR("cl"); fx_str_t slit_12 = FX_MAKE_STR("cl"); fx_str_t slit_13 = FX_MAKE_STR(".obj"); fx_str_t slit_14 = FX_MAKE_STR("/c /Fo"); fx_str_t slit_15 = FX_MAKE_STR("/Fe"); fx_str_t slit_16 = FX_MAKE_STR(""); fx_str_t slit_17 = FX_MAKE_STR("/F10485760 kernel32.lib advapi32.lib"); _fx_make_Ta9S(&slit_10, &slit_11, &slit_12, &slit_13, &slit_14, &slit_15, &slit_16, &cflags_0, &slit_17, &v_0); } else { bool v_41; fx_str_t slit_18 = FX_MAKE_STR("Darwin"); FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_18, &v_41, 0), _fx_cleanup); if (v_41) { if (enable_openmp_0) { fx_str_t slit_19 = FX_MAKE_STR("-Xclang -fopenmp"); fx_str_t slit_20 = FX_MAKE_STR(" -lomp"); _fx_make_Ta2S(&slit_19, &slit_20, &v_6); } else { fx_str_t slit_21 = FX_MAKE_STR(""); fx_str_t slit_22 = FX_MAKE_STR(""); _fx_make_Ta2S(&slit_21, &slit_22, &v_6); } fx_copy_str(&v_6.t0, &omp_cflags_0); fx_copy_str(&v_6.t1, &omp_lib_0); bool v_42; fx_str_t slit_23 = FX_MAKE_STR("x86_64"); FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_23, &v_42, 0), _fx_cleanup); if (v_42) { fx_str_t slit_24 = FX_MAKE_STR(" "); { const fx_str_t strs_2[] = { slit_24, omp_cflags_0, omp_lib_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 3, &v_8), _fx_cleanup); } fx_str_t slit_25 = FX_MAKE_STR("macos_x64"); _fx_make_Ta3S(&slit_25, &omp_cflags_0, &v_8, &v_7); } else { bool v_43; fx_str_t slit_26 = FX_MAKE_STR("arm64"); FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_26, &v_43, 0), _fx_cleanup); if (v_43) { fx_str_t slit_27 = FX_MAKE_STR(" "); { const fx_str_t strs_3[] = { slit_27, omp_cflags_0, omp_lib_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_9), _fx_cleanup); } fx_str_t slit_28 = FX_MAKE_STR("macos_arm64"); _fx_make_Ta3S(&slit_28, &omp_cflags_0, &v_9, &v_7); } else { fx_str_t slit_29 = FX_MAKE_STR(""); fx_str_t slit_30 = FX_MAKE_STR(""); fx_str_t slit_31 = FX_MAKE_STR(""); _fx_make_Ta3S(&slit_29, &slit_30, &slit_31, &v_7); } } fx_copy_str(&v_7.t0, &libpath_0); fx_copy_str(&v_7.t1, &cflags_1); fx_copy_str(&v_7.t2, &clibs_0); fx_str_t slit_32 = FX_MAKE_STR("macos"); _fx_make_Ta4S(&slit_32, &libpath_0, &cflags_1, &clibs_0, &v_5); } else { bool v_44; fx_str_t slit_33 = FX_MAKE_STR("Linux"); FX_CALL(_fx_M8CompilerFM8containsB2SS(&osinfo_0, &slit_33, &v_44, 0), _fx_cleanup); if (v_44) { if (enable_openmp_0) { fx_str_t slit_34 = FX_MAKE_STR(" -fopenmp"); fx_copy_str(&slit_34, &omp_flags_0); } else { fx_str_t slit_35 = FX_MAKE_STR(""); fx_copy_str(&slit_35, &omp_flags_0); } fx_str_t slit_36 = FX_MAKE_STR("linux"); fx_str_t slit_37 = FX_MAKE_STR(""); _fx_make_Ta4S(&slit_36, &slit_37, &omp_flags_0, &omp_flags_0, &v_5); } else if (_fx_g9Sys__unix) { fx_str_t slit_38 = FX_MAKE_STR("unix"); fx_str_t slit_39 = FX_MAKE_STR(""); fx_str_t slit_40 = FX_MAKE_STR(""); fx_str_t slit_41 = FX_MAKE_STR(""); _fx_make_Ta4S(&slit_38, &slit_39, &slit_40, &slit_41, &v_5); } else { fx_str_t slit_42 = FX_MAKE_STR(""); fx_str_t slit_43 = FX_MAKE_STR(""); fx_str_t slit_44 = FX_MAKE_STR(""); fx_str_t slit_45 = FX_MAKE_STR(""); _fx_make_Ta4S(&slit_42, &slit_43, &slit_44, &slit_45, &v_5); } } fx_copy_str(&v_5.t0, &os_0); fx_copy_str(&v_5.t1, &libpath_1); fx_copy_str(&v_5.t2, &cflags_2); fx_copy_str(&v_5.t3, &clibs_1); if (opt_level_0 == 0) { fx_str_t slit_46 = FX_MAKE_STR(" -ggdb"); fx_copy_str(&slit_46, &ggdb_opt_0); } else { fx_str_t slit_47 = FX_MAKE_STR(""); fx_copy_str(&slit_47, &ggdb_opt_0); } FX_CALL(_fx_F6stringS1i(opt_level_0, &v_10, 0), _fx_cleanup); FX_CALL(_fx_M8CompilerFM6stringS1S(&ggdb_opt_0, &v_11, 0), _fx_cleanup); FX_CALL(_fx_M8CompilerFM6stringS1S(&cflags_2, &v_12, 0), _fx_cleanup); fx_str_t slit_48 = FX_MAKE_STR("-Wno-unknown-warning-option -Wno-dangling-else -Wno-static-in-inline"); FX_CALL(_fx_M8CompilerFM6stringS1S(&slit_48, &v_13, 0), _fx_cleanup); FX_CALL(_fx_M8CompilerFM6stringS1S(&runtime_include_path_0, &v_14, 0), _fx_cleanup); fx_str_t slit_49 = FX_MAKE_STR("-O"); fx_str_t slit_50 = FX_MAKE_STR(" "); fx_str_t slit_51 = FX_MAKE_STR(" "); fx_str_t slit_52 = FX_MAKE_STR(" -I"); { const fx_str_t strs_4[] = { slit_49, v_10, v_11, slit_50, v_12, slit_51, v_13, slit_52, v_14 }; FX_CALL(fx_strjoin(0, 0, 0, strs_4, 9, &cflags_3), _fx_cleanup); } if (FX_STR_LENGTH(libpath_1) != 0) { FX_CALL(_fx_M8CompilerFM6stringS1S(&runtime_lib_path_0, &v_16, 0), _fx_cleanup); FX_CALL(_fx_M8CompilerFM6stringS1S(&libpath_1, &v_17, 0), _fx_cleanup); fx_str_t slit_53 = FX_MAKE_STR("-L"); fx_str_t slit_54 = FX_MAKE_STR("/"); fx_str_t slit_55 = FX_MAKE_STR(" "); { const fx_str_t strs_5[] = { slit_53, v_16, slit_54, v_17, slit_55 }; FX_CALL(fx_strjoin(0, 0, 0, strs_5, 5, &v_15), _fx_cleanup); } } else { fx_str_t slit_56 = FX_MAKE_STR(""); fx_copy_str(&slit_56, &v_15); } FX_CALL(_fx_M8CompilerFM6stringS1S(&clibs_1, &v_18, 0), _fx_cleanup); fx_str_t slit_57 = FX_MAKE_STR("-lm "); { const fx_str_t strs_6[] = { v_15, slit_57, v_18 }; FX_CALL(fx_strjoin(0, 0, 0, strs_6, 3, &clibs_2), _fx_cleanup); } fx_str_t slit_58 = FX_MAKE_STR("cc"); fx_str_t slit_59 = FX_MAKE_STR("c++ -std=c++11"); fx_str_t slit_60 = FX_MAKE_STR(".o"); fx_str_t slit_61 = FX_MAKE_STR("-c -o "); fx_str_t slit_62 = FX_MAKE_STR("-o "); fx_str_t slit_63 = FX_MAKE_STR("-l"); _fx_make_Ta9S(&os_0, &slit_58, &slit_59, &slit_60, &slit_61, &slit_62, &slit_63, &cflags_3, &clibs_2, &v_0); } fx_copy_str(&v_0.t1, &c_comp_0); fx_copy_str(&v_0.t2, &cpp_comp_0); fx_copy_str(&v_0.t3, &obj_ext_0); fx_copy_str(&v_0.t4, &obj_opt_0); fx_copy_str(&v_0.t5, &appname_opt_0); fx_copy_str(&v_0.t6, &link_lib_opt_0); fx_copy_str(&v_0.t7, &cflags_4); fx_copy_str(&v_0.t8, &clibs_3); fx_str_t slit_64 = FX_MAKE_STR("FICUS_CFLAGS"); FX_CALL(_fx_M3SysFM6getenvS1S(&slit_64, &custom_cflags_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.cflags, &v_19); if (FX_STR_LENGTH(v_19) == 0) { fx_copy_str(&custom_cflags_0, &custom_cflags_1); } else { fx_copy_str(&_fx_g12Options__opt.cflags, &v_20); fx_str_t slit_65 = FX_MAKE_STR(" "); { const fx_str_t strs_7[] = { v_20, slit_65, custom_cflags_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_7, 3, &custom_cflags_1), _fx_cleanup); } } fx_str_t slit_66 = FX_MAKE_STR(" "); { const fx_str_t strs_8[] = { cflags_4, slit_66, custom_cflags_1 }; FX_CALL(fx_strjoin(0, 0, 0, strs_8, 3, &cflags_5), _fx_cleanup); } FX_CALL(_fx_M8CompilerFM6stringS1S(&cflags_5, &v_21, 0), _fx_cleanup); fx_str_t slit_67 = FX_MAKE_STR("Compiling .c/.cpp files with cflags="); { const fx_str_t strs_9[] = { slit_67, v_21 }; FX_CALL(fx_strjoin(0, 0, 0, strs_9, 2, &v_22), _fx_cleanup); } FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g17Compiler__MsgBlue, &v_22, &v_23, 0), _fx_cleanup); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_23, 0), _fx_cleanup); _fx_make_R14Ast__pragmas_t(false, 0, &v_24); _fx_make_R17C_form__cmodule_t(&_fx_g9Ast__noid, &runtime_impl_0, 0, false, true, false, &v_24, &runtime_pseudo_cmod_0); FX_CALL(_fx_cons_LR17C_form__cmodule_t(&runtime_pseudo_cmod_0, cmods_0, true, &cmods_1), _fx_cleanup); FX_CALL(_fx_M8CompilerFM5arrayA1R17C_form__cmodule_t1LR17C_form__cmodule_t(cmods_1, &v_25, 0), _fx_cleanup); int par_status_0 = 0; int_ ni_0 = FX_ARR_SIZE(v_25, 0); _fx_R17C_form__cmodule_t* ptr_v_0 = FX_PTR_1D(_fx_R17C_form__cmodule_t, v_25, 0); { const int_ shape_0[] = { ni_0 }; FX_CALL( fx_make_arr(1, shape_0, sizeof(_fx_T5BBLSBS), (fx_free_t)_fx_free_T5BBLSBS, (fx_copy_t)_fx_copy_T5BBLSBS, 0, &results_0), _fx_cleanup); } #pragma omp parallel for for (int_ i_0 = 0; i_0 < ni_0; i_0++) { int fx_status = 0; _fx_R17C_form__cmodule_t __pat___0 = {0}; _fx_LT2SR10Ast__loc_t pragma_clibs_0 = 0; _fx_LN15C_form__cstmt_t cmod_ccode_0 = 0; fx_str_t cmod_cname_0 = {0}; fx_str_t output_fname_0 = {0}; _fx_Ta2S v_45 = {0}; fx_str_t comp_0 = {0}; fx_str_t ext_0 = {0}; fx_str_t output_fname_1 = {0}; fx_str_t output_fname_c_0 = {0}; _fx_T3BBS v_46 = {0}; fx_str_t str_new_0 = {0}; fx_str_t str_old_0 = {0}; fx_exn_t exn_0 = {0}; fx_exn_t exn_1 = {0}; fx_str_t v_47 = {0}; fx_str_t v_48 = {0}; fx_str_t v_49 = {0}; fx_str_t status_j_0 = {0}; fx_str_t c_filename_0 = {0}; fx_str_t obj_filename_0 = {0}; _fx_T3BBS v_50 = {0}; fx_str_t v_51 = {0}; fx_str_t v_52 = {0}; fx_str_t v_53 = {0}; fx_str_t v_54 = {0}; fx_str_t v_55 = {0}; fx_str_t cmd_2 = {0}; fx_str_t status_0 = {0}; fx_str_t status_j_1 = {0}; fx_str_t v_56 = {0}; fx_str_t v_57 = {0}; fx_str_t v_58 = {0}; _fx_LS v_59 = 0; _fx_LS clibs_5 = 0; _fx_T5BBLSBS tup_0 = {0}; _fx_copy_R17C_form__cmodule_t(ptr_v_0 + i_0, &__pat___0); _fx_T5BBLSBS* dstptr_0 = FX_PTR_1D(_fx_T5BBLSBS, results_0, i_0); _fx_R14Ast__pragmas_t* i_1 = &__pat___0.cmod_pragmas; FX_COPY_PTR(i_1->pragma_clibs, &pragma_clibs_0); FX_COPY_PTR(__pat___0.cmod_ccode, &cmod_ccode_0); fx_copy_str(&__pat___0.cmod_cname, &cmod_cname_0); FX_CALL(_fx_M8FilenameFM8basenameS1S(&cmod_cname_0, &output_fname_0, 0), _fx_catch_3); bool is_runtime_0 = _fx_F6__eq__B2SS(&cmod_cname_0, &runtime_impl_0, 0); bool is_cpp_0; if (!is_runtime_0) { if (_fx_g12Options__opt.compile_by_cpp) { is_cpp_0 = true; } else { is_cpp_0 = i_1->pragma_cpp; } } else { is_cpp_0 = false; } if (is_cpp_0) { fx_str_t slit_68 = FX_MAKE_STR(".cpp"); _fx_make_Ta2S(&cpp_comp_0, &slit_68, &v_45); } else { fx_str_t slit_69 = FX_MAKE_STR(".c"); _fx_make_Ta2S(&c_comp_0, &slit_69, &v_45); } fx_copy_str(&v_45.t0, &comp_0); fx_copy_str(&v_45.t1, &ext_0); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&build_dir_0, &output_fname_0, &output_fname_1, 0), _fx_catch_3); { const fx_str_t strs_10[] = { output_fname_1, ext_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_10, 2, &output_fname_c_0), _fx_catch_3); } if (__pat___0.cmod_skip) { fx_str_t slit_70 = FX_MAKE_STR("skipped"); _fx_make_T3BBS(true, false, &slit_70, &v_46); } else if (is_runtime_0) { fx_str_t slit_71 = FX_MAKE_STR(""); _fx_make_T3BBS(true, true, &slit_71, &v_46); } else { FX_CALL(_fx_M4C_ppFM20pprint_top_to_stringS1LN15C_form__cstmt_t(cmod_ccode_0, &str_new_0, 0), _fx_catch_3); if (_fx_g12Options__opt.force_rebuild) { fx_str_t slit_72 = FX_MAKE_STR(""); fx_copy_str(&slit_72, &str_old_0); } else { FX_CALL(_fx_M4FileFM9read_utf8S1S(&output_fname_c_0, &str_old_0, 0), _fx_catch_0); _fx_catch_0: ; if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_0); fx_status = 0; FX_FREE_STR(&str_old_0); int tag_0 = exn_0.tag; bool res_0; if (tag_0 == FX_EXN_IOError) { res_0 = true; } else if (tag_0 == FX_EXN_FileOpenError) { res_0 = true; } else { res_0 = false; } FX_CHECK_EXN(_fx_catch_3); if (res_0) { fx_str_t slit_73 = FX_MAKE_STR(""); fx_copy_str(&slit_73, &str_old_0); goto _fx_endmatch_0; } FX_RETHROW(&exn_0, _fx_catch_3); _fx_endmatch_0: ; FX_CHECK_EXN(_fx_catch_3); } } bool v_60 = _fx_F6__eq__B2SS(&str_new_0, &str_old_0, 0); if (v_60) { fx_str_t slit_74 = FX_MAKE_STR("skipped"); _fx_make_T3BBS(ok_1, false, &slit_74, &v_46); } else { bool well_written_0; FX_CALL(_fx_M4FileFM10write_utf8v2SS(&output_fname_c_0, &str_new_0, 0), _fx_catch_1); well_written_0 = true; _fx_catch_1: ; if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_1); fx_status = 0; int tag_1 = exn_1.tag; bool res_1; if (tag_1 == FX_EXN_IOError) { res_1 = true; } else if (tag_1 == FX_EXN_FileOpenError) { res_1 = true; } else { res_1 = false; } FX_CHECK_EXN(_fx_catch_3); if (res_1) { well_written_0 = false; goto _fx_endmatch_1; } FX_RETHROW(&exn_1, _fx_catch_3); _fx_endmatch_1: ; FX_CHECK_EXN(_fx_catch_3); } if (well_written_0) { fx_str_t slit_75 = FX_MAKE_STR(""); fx_copy_str(&slit_75, &v_47); } else { FX_CALL(_fx_M8CompilerFM6stringS1S(&output_fname_c_0, &v_48, 0), _fx_catch_3); fx_str_t slit_76 = FX_MAKE_STR("failed to write "); { const fx_str_t strs_11[] = { slit_76, v_48 }; FX_CALL(fx_strjoin(0, 0, 0, strs_11, 2, &v_49), _fx_catch_3); } FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g16Compiler__MsgRed, &v_49, &v_47, 0), _fx_catch_3); } _fx_make_T3BBS(well_written_0, well_written_0, &v_47, &v_46); } } bool ok_j_0 = v_46.t0; bool reprocess_0 = v_46.t1; fx_copy_str(&v_46.t2, &status_j_0); if (is_runtime_0) { fx_str_t slit_77 = FX_MAKE_STR(".c"); { const fx_str_t strs_12[] = { runtime_impl_0, slit_77 }; FX_CALL(fx_strjoin(0, 0, 0, strs_12, 2, &c_filename_0), _fx_catch_3); } } else { fx_copy_str(&output_fname_c_0, &c_filename_0); } { const fx_str_t strs_13[] = { output_fname_1, obj_ext_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_13, 2, &obj_filename_0), _fx_catch_3); } bool v_61; if (ok_j_0) { if (reprocess_0) { v_61 = true; } else { bool v_62; FX_CALL(_fx_M8FilenameFM6existsB1S(&obj_filename_0, &v_62, 0), _fx_catch_3); v_61 = !v_62; } } else { v_61 = false; } if (v_61) { FX_CALL(_fx_M8CompilerFM6stringS1S(&comp_0, &v_51, 0), _fx_catch_3); FX_CALL(_fx_M8CompilerFM6stringS1S(&cflags_5, &v_52, 0), _fx_catch_3); FX_CALL(_fx_M8CompilerFM6stringS1S(&obj_opt_0, &v_53, 0), _fx_catch_3); FX_CALL(_fx_M8CompilerFM6stringS1S(&obj_filename_0, &v_54, 0), _fx_catch_3); FX_CALL(_fx_M8CompilerFM6stringS1S(&c_filename_0, &v_55, 0), _fx_catch_3); fx_str_t slit_78 = FX_MAKE_STR(" "); fx_str_t slit_79 = FX_MAKE_STR(" "); fx_str_t slit_80 = FX_MAKE_STR(" "); { const fx_str_t strs_14[] = { v_51, slit_78, v_52, slit_79, v_53, v_54, slit_80, v_55 }; FX_CALL(fx_strjoin(0, 0, 0, strs_14, 8, &cmd_2), _fx_catch_3); } int_ v_63; FX_CALL(_fx_M3SysFM7commandi1S(&cmd_2, &v_63, 0), _fx_catch_3); bool result_0 = v_63 == 0; if (result_0) { fx_str_t slit_81 = FX_MAKE_STR("ok"); FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g18Compiler__MsgGreen, &slit_81, &status_0, 0), _fx_catch_3); } else { fx_str_t slit_82 = FX_MAKE_STR("fail"); FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g16Compiler__MsgRed, &slit_82, &status_0, 0), _fx_catch_3); } _fx_make_T3BBS(result_0, true, &status_0, &v_50); } else { _fx_make_T3BBS(ok_j_0, false, &status_j_0, &v_50); } bool ok_j_1 = v_50.t0; bool recompiled_0 = v_50.t1; fx_copy_str(&v_50.t2, &status_j_1); FX_CALL(_fx_M8CompilerFM6stringS1S(&c_filename_0, &v_56, 0), _fx_catch_3); FX_CALL(_fx_M8CompilerFM6stringS1S(&status_j_1, &v_57, 0), _fx_catch_3); fx_str_t slit_83 = FX_MAKE_STR("CC "); fx_str_t slit_84 = FX_MAKE_STR(": "); { const fx_str_t strs_15[] = { slit_83, v_56, slit_84, v_57 }; FX_CALL(fx_strjoin(0, 0, 0, strs_15, 4, &v_58), _fx_catch_3); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_58, 0), _fx_catch_3); _fx_LS lstend_0 = 0; _fx_LT2SR10Ast__loc_t lst_0 = pragma_clibs_0; for (; lst_0; lst_0 = lst_0->tl) { _fx_T2SR10Ast__loc_t* __pat___1 = &lst_0->hd; _fx_LS node_0 = 0; FX_CALL(_fx_cons_LS(&__pat___1->t0, 0, false, &node_0), _fx_catch_2); FX_LIST_APPEND(v_59, lstend_0, node_0); _fx_catch_2: ; FX_CHECK_EXN(_fx_catch_3); } FX_CALL(_fx_M8CompilerFM3revLS1LS(v_59, &clibs_5, 0), _fx_catch_3); _fx_make_T5BBLSBS(is_cpp_0, recompiled_0, clibs_5, ok_j_1, &obj_filename_0, &tup_0); _fx_copy_T5BBLSBS(&tup_0, dstptr_0); _fx_catch_3: ; _fx_free_T5BBLSBS(&tup_0); if (clibs_5) { _fx_free_LS(&clibs_5); } if (v_59) { _fx_free_LS(&v_59); } FX_FREE_STR(&v_58); FX_FREE_STR(&v_57); FX_FREE_STR(&v_56); FX_FREE_STR(&status_j_1); FX_FREE_STR(&status_0); FX_FREE_STR(&cmd_2); FX_FREE_STR(&v_55); FX_FREE_STR(&v_54); FX_FREE_STR(&v_53); FX_FREE_STR(&v_52); FX_FREE_STR(&v_51); _fx_free_T3BBS(&v_50); FX_FREE_STR(&obj_filename_0); FX_FREE_STR(&c_filename_0); FX_FREE_STR(&status_j_0); FX_FREE_STR(&v_49); FX_FREE_STR(&v_48); FX_FREE_STR(&v_47); fx_free_exn(&exn_1); fx_free_exn(&exn_0); FX_FREE_STR(&str_old_0); FX_FREE_STR(&str_new_0); _fx_free_T3BBS(&v_46); FX_FREE_STR(&output_fname_c_0); FX_FREE_STR(&output_fname_1); FX_FREE_STR(&ext_0); FX_FREE_STR(&comp_0); _fx_free_Ta2S(&v_45); FX_FREE_STR(&output_fname_0); FX_FREE_STR(&cmod_cname_0); if (cmod_ccode_0) { _fx_free_LN15C_form__cstmt_t(&cmod_ccode_0); } if (pragma_clibs_0) { _fx_free_LT2SR10Ast__loc_t(&pragma_clibs_0); } _fx_free_R17C_form__cmodule_t(&__pat___0); FX_CHECK_EXN_PARALLEL(fx_status, par_status_0); } FX_UPDATE_EXN_PARALLEL(par_status_0, _fx_cleanup); _fx_make_T5BBLSBLS(false, false, 0, ok_1, 0, &__fold_result___0); int_ ni_1 = FX_ARR_SIZE(results_0, 0); _fx_T5BBLSBS* ptr_results_0 = FX_PTR_1D(_fx_T5BBLSBS, results_0, 0); for (int_ i_2 = 0; i_2 < ni_1; i_2++) { _fx_T5BBLSBS __pat___2 = {0}; _fx_LS clibs_j_0 = 0; fx_str_t obj_0 = {0}; _fx_T5BBLSBLS v_64 = {0}; _fx_LS all_clibs_1 = 0; _fx_LS objs_1 = 0; _fx_LS v_65 = 0; _fx_T5BBLSBLS v_66 = {0}; _fx_copy_T5BBLSBS(ptr_results_0 + i_2, &__pat___2); FX_COPY_PTR(__pat___2.t2, &clibs_j_0); fx_copy_str(&__pat___2.t4, &obj_0); _fx_copy_T5BBLSBLS(&__fold_result___0, &v_64); FX_COPY_PTR(v_64.t2, &all_clibs_1); FX_COPY_PTR(v_64.t4, &objs_1); FX_CALL(_fx_M8CompilerFM7__add__LS2LSLS(clibs_j_0, all_clibs_1, &v_65, 0), _fx_catch_4); FX_CALL(_fx_cons_LS(&obj_0, objs_1, false, &objs_1), _fx_catch_4); _fx_make_T5BBLSBLS(v_64.t0 || __pat___2.t0, v_64.t1 || __pat___2.t1, v_65, v_64.t3 && __pat___2.t3, objs_1, &v_66); _fx_free_T5BBLSBLS(&__fold_result___0); _fx_copy_T5BBLSBLS(&v_66, &__fold_result___0); _fx_catch_4: ; _fx_free_T5BBLSBLS(&v_66); if (v_65) { _fx_free_LS(&v_65); } if (objs_1) { _fx_free_LS(&objs_1); } if (all_clibs_1) { _fx_free_LS(&all_clibs_1); } _fx_free_T5BBLSBLS(&v_64); FX_FREE_STR(&obj_0); if (clibs_j_0) { _fx_free_LS(&clibs_j_0); } _fx_free_T5BBLSBS(&__pat___2); FX_CHECK_EXN(_fx_cleanup); } _fx_copy_T5BBLSBLS(&__fold_result___0, &v_26); bool any_cpp_0 = v_26.t0; bool any_recompiled_0 = v_26.t1; FX_COPY_PTR(v_26.t2, &all_clibs_0); bool ok_2 = v_26.t3; FX_COPY_PTR(v_26.t4, &objs_0); bool v_67; bool t_0; if (ok_2) { t_0 = !any_recompiled_0; } else { t_0 = false; } if (t_0) { fx_copy_str(&_fx_g12Options__opt.app_filename, &v_27); FX_CALL(_fx_M8FilenameFM6existsB1S(&v_27, &v_67, 0), _fx_cleanup); } else { v_67 = false; } if (v_67) { fx_copy_str(&_fx_g12Options__opt.app_filename, &v_28); FX_CALL(_fx_M8CompilerFM6stringS1S(&v_28, &v_29, 0), _fx_cleanup); fx_str_t slit_85 = FX_MAKE_STR(" is up-to-date\n"); { const fx_str_t strs_16[] = { v_29, slit_85 }; FX_CALL(fx_strjoin(0, 0, 0, strs_16, 2, &v_30), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_30, 0), _fx_cleanup); *fx_result = ok_2; } else if (!ok_2) { *fx_result = ok_2; } else { fx_str_t slit_86 = FX_MAKE_STR("FICUS_LINK_LIBRARIES"); FX_CALL(_fx_M3SysFM6getenvS1S(&slit_86, &custom_clibs_0, 0), _fx_cleanup); fx_copy_str(&_fx_g12Options__opt.clibs, &v_31); if (FX_STR_LENGTH(v_31) == 0) { fx_copy_str(&custom_clibs_0, &custom_clibs_1); } else { fx_copy_str(&_fx_g12Options__opt.clibs, &v_32); fx_str_t slit_87 = FX_MAKE_STR(" "); { const fx_str_t strs_17[] = { custom_clibs_0, slit_87, v_32 }; FX_CALL(fx_strjoin(0, 0, 0, strs_17, 3, &custom_clibs_1), _fx_cleanup); } } if (all_clibs_0 == 0) { fx_copy_str(&custom_clibs_1, &custom_clibs_2); } else { FX_CALL(_fx_M8CompilerFM3revLS1LS(all_clibs_0, &v_33, 0), _fx_cleanup); _fx_LS lstend_1 = 0; _fx_LS lst_1 = v_33; for (; lst_1; lst_1 = lst_1->tl) { fx_str_t concat_str_0 = {0}; fx_str_t* l_0 = &lst_1->hd; { const fx_str_t strs_18[] = { link_lib_opt_0, *l_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_18, 2, &concat_str_0), _fx_catch_5); } _fx_LS node_1 = 0; FX_CALL(_fx_cons_LS(&concat_str_0, 0, false, &node_1), _fx_catch_5); FX_LIST_APPEND(v_34, lstend_1, node_1); _fx_catch_5: ; FX_FREE_STR(&concat_str_0); FX_CHECK_EXN(_fx_cleanup); } fx_str_t slit_88 = FX_MAKE_STR(" "); FX_CALL(_fx_M8CompilerFM4joinS2SLS(&slit_88, v_34, &v_35, 0), _fx_cleanup); fx_str_t slit_89 = FX_MAKE_STR(" "); { const fx_str_t strs_19[] = { custom_clibs_1, slit_89, v_35 }; FX_CALL(fx_strjoin(0, 0, 0, strs_19, 3, &custom_clibs_2), _fx_cleanup); } } fx_str_t slit_90 = FX_MAKE_STR(" "); { const fx_str_t strs_20[] = { clibs_3, slit_90, custom_clibs_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_20, 3, &clibs_4), _fx_cleanup); } FX_CALL(_fx_M8CompilerFM6stringS1S(&clibs_4, &v_36, 0), _fx_cleanup); fx_str_t slit_91 = FX_MAKE_STR("Linking the app with flags="); { const fx_str_t strs_21[] = { slit_91, v_36 }; FX_CALL(fx_strjoin(0, 0, 0, strs_21, 2, &v_37), _fx_cleanup); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_37, 0), _fx_cleanup); if (any_cpp_0) { fx_copy_str(&cpp_comp_0, &v_38); } else { fx_copy_str(&c_comp_0, &v_38); } fx_copy_str(&_fx_g12Options__opt.app_filename, &v_39); fx_str_t slit_92 = FX_MAKE_STR(" "); { const fx_str_t strs_22[] = { v_38, slit_92, appname_opt_0, v_39 }; FX_CALL(fx_strjoin(0, 0, 0, strs_22, 4, &cmd_0), _fx_cleanup); } fx_str_t slit_93 = FX_MAKE_STR(" "); FX_CALL(_fx_M8CompilerFM4joinS2SLS(&slit_93, objs_0, &v_40, 0), _fx_cleanup); fx_str_t slit_94 = FX_MAKE_STR(" "); fx_str_t slit_95 = FX_MAKE_STR(" "); { const fx_str_t strs_23[] = { cmd_0, slit_94, v_40, slit_95, clibs_4 }; FX_CALL(fx_strjoin(0, 0, 0, strs_23, 5, &cmd_1), _fx_cleanup); } int_ v_68; FX_CALL(_fx_M3SysFM7commandi1S(&cmd_1, &v_68, 0), _fx_cleanup); *fx_result = v_68 == 0; } _fx_cleanup: ; FX_FREE_STR(&osinfo_0); FX_FREE_STR(&runtime_include_path_0); FX_FREE_STR(&runtime_lib_path_0); FX_FREE_STR(&runtime_impl_0); FX_FREE_STR(&build_root_dir_0); FX_FREE_STR(&build_dir_0); _fx_free_Ta9S(&v_0); FX_FREE_STR(&opt_flags_0); FX_FREE_STR(&v_1); FX_FREE_STR(&v_2); FX_FREE_STR(&v_3); FX_FREE_STR(&v_4); FX_FREE_STR(&cflags_0); _fx_free_Ta4S(&v_5); _fx_free_Ta2S(&v_6); FX_FREE_STR(&omp_cflags_0); FX_FREE_STR(&omp_lib_0); _fx_free_Ta3S(&v_7); FX_FREE_STR(&v_8); FX_FREE_STR(&v_9); FX_FREE_STR(&libpath_0); FX_FREE_STR(&cflags_1); FX_FREE_STR(&clibs_0); FX_FREE_STR(&omp_flags_0); FX_FREE_STR(&os_0); FX_FREE_STR(&libpath_1); FX_FREE_STR(&cflags_2); FX_FREE_STR(&clibs_1); FX_FREE_STR(&ggdb_opt_0); FX_FREE_STR(&v_10); FX_FREE_STR(&v_11); FX_FREE_STR(&v_12); FX_FREE_STR(&v_13); FX_FREE_STR(&v_14); FX_FREE_STR(&cflags_3); FX_FREE_STR(&v_15); FX_FREE_STR(&v_16); FX_FREE_STR(&v_17); FX_FREE_STR(&v_18); FX_FREE_STR(&clibs_2); FX_FREE_STR(&c_comp_0); FX_FREE_STR(&cpp_comp_0); FX_FREE_STR(&obj_ext_0); FX_FREE_STR(&obj_opt_0); FX_FREE_STR(&appname_opt_0); FX_FREE_STR(&link_lib_opt_0); FX_FREE_STR(&cflags_4); FX_FREE_STR(&clibs_3); FX_FREE_STR(&custom_cflags_0); FX_FREE_STR(&v_19); FX_FREE_STR(&custom_cflags_1); FX_FREE_STR(&v_20); FX_FREE_STR(&cflags_5); FX_FREE_STR(&v_21); FX_FREE_STR(&v_22); FX_FREE_STR(&v_23); _fx_free_R14Ast__pragmas_t(&v_24); _fx_free_R17C_form__cmodule_t(&runtime_pseudo_cmod_0); if (cmods_1) { _fx_free_LR17C_form__cmodule_t(&cmods_1); } FX_FREE_ARR(&v_25); FX_FREE_ARR(&results_0); _fx_free_T5BBLSBLS(&__fold_result___0); _fx_free_T5BBLSBLS(&v_26); if (all_clibs_0) { _fx_free_LS(&all_clibs_0); } if (objs_0) { _fx_free_LS(&objs_0); } FX_FREE_STR(&v_27); FX_FREE_STR(&v_28); FX_FREE_STR(&v_29); FX_FREE_STR(&v_30); FX_FREE_STR(&custom_clibs_0); FX_FREE_STR(&v_31); FX_FREE_STR(&custom_clibs_1); FX_FREE_STR(&v_32); FX_FREE_STR(&custom_clibs_2); if (v_33) { _fx_free_LS(&v_33); } if (v_34) { _fx_free_LS(&v_34); } FX_FREE_STR(&v_35); FX_FREE_STR(&clibs_4); FX_FREE_STR(&v_36); FX_FREE_STR(&v_37); FX_FREE_STR(&v_38); FX_FREE_STR(&v_39); FX_FREE_STR(&cmd_0); FX_FREE_STR(&v_40); FX_FREE_STR(&cmd_1); return fx_status; } FX_EXTERN_C int _fx_M8CompilerFM11process_allB1S(fx_str_t* fname0_0, bool* fx_result, void* fx_fv) { fx_exn_t exn_0 = {0}; _fx_LE __fold_result___0 = 0; _fx_LE v_0 = 0; fx_str_t v_1 = {0}; fx_str_t v_2 = {0}; int fx_status = 0; FX_CALL(_fx_M3AstFM8init_allv0(0), _fx_cleanup); _fx_T2SLS v_3 = {0}; fx_str_t ficus_root_0 = {0}; _fx_LS ficus_path_0 = 0; fx_str_t v_4 = {0}; fx_str_t v_5 = {0}; fx_str_t v_6 = {0}; fx_exn_t v_7 = {0}; _fx_LT2iLi graph_0 = 0; _fx_Li v_8 = 0; _fx_Li v_9 = 0; _fx_Li v_10 = 0; _fx_LS v_11 = 0; fx_str_t modules_used_0 = {0}; fx_str_t parsing_complete_0 = {0}; fx_str_t v_12 = {0}; fx_str_t v_13 = {0}; _fx_T2LR17K_form__kmodule_tB v_14 = {0}; _fx_LR17K_form__kmodule_t kmods_0 = 0; _fx_LR17K_form__kmodule_t kmods_1 = 0; fx_str_t v_15 = {0}; _fx_T2LR17K_form__kmodule_tB v_16 = {0}; fx_str_t v_17 = {0}; _fx_LR17K_form__kmodule_t kmods_2 = 0; fx_str_t v_18 = {0}; _fx_T2LR17C_form__cmodule_tB v_19 = {0}; fx_str_t v_20 = {0}; _fx_LR17C_form__cmodule_t cmods_0 = 0; fx_str_t v_21 = {0}; _fx_LR17C_form__cmodule_t cmods_1 = 0; _fx_LR17C_form__cmodule_t cmods_2 = 0; _fx_LR17C_form__cmodule_t cmods_3 = 0; fx_str_t appname_0 = {0}; fx_str_t v_22 = {0}; fx_str_t appname_1 = {0}; _fx_LS v_23 = 0; fx_str_t cmd_0 = {0}; _fx_LE __fold_result___1 = 0; _fx_LE v_24 = 0; fx_str_t v_25 = {0}; fx_str_t v_26 = {0}; FX_CALL(_fx_M8CompilerFM15find_ficus_dirsT2SLS0(&v_3, 0), _fx_catch_8); fx_copy_str(&v_3.t0, &ficus_root_0); FX_COPY_PTR(v_3.t1, &ficus_path_0); if (FX_STR_LENGTH(ficus_root_0) == 0) { FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_major__, &v_4, 0), _fx_catch_8); FX_CALL(_fx_F6stringS1i(_fx_g15__ficus_minor__, &v_5, 0), _fx_catch_8); fx_str_t slit_0 = FX_MAKE_STR("Ficus root directory is not found.\n" U"Please, add the directory \'lib\' containing Builtins.fx to\n" U"\'FICUS_PATH\' environment variable or make sure that either\n" U"1. \'ficus\' executable is put in a directory <ficus_root>/bin\n" U"and there are <ficus_root>/runtime and <ficus_root>/lib.\n" U"2. or \'ficus\' executable is in (/usr|/usr/local|/opt|...)/bin and\n" U" there are (/usr|...)/lib/ficus-"); fx_str_t slit_1 = FX_MAKE_STR("."); fx_str_t slit_2 = FX_MAKE_STR("/{runtime, lib}"); { const fx_str_t strs_0[] = { slit_0, v_4, slit_1, v_5, slit_2 }; FX_CALL(fx_strjoin(0, 0, 0, strs_0, 5, &v_6), _fx_catch_8); } FX_CALL(_fx_F9make_FailE1S(&v_6, &v_7), _fx_catch_8); FX_THROW(&v_7, true, _fx_catch_8); } bool ok_0; FX_CALL(_fx_M8CompilerFM9parse_allB2SLS(fname0_0, ficus_path_0, &ok_0, 0), _fx_catch_8); if (!ok_0) { FX_THROW(&_fx_E30Compiler__CumulativeParseErrorv, false, _fx_catch_8); } _fx_LT2iLi lstend_0 = 0; int_ ni_0 = FX_ARR_SIZE(_fx_g16Ast__all_modules, 0); _fx_N16Ast__defmodule_t* ptr_all_modules_0 = FX_PTR_1D(_fx_N16Ast__defmodule_t, _fx_g16Ast__all_modules, 0); for (int_ i_0 = 0; i_0 < ni_0; i_0++) { _fx_N16Ast__defmodule_t minfo_0 = 0; _fx_Li v_27 = 0; _fx_T2iLi tup_0 = {0}; FX_COPY_PTR(ptr_all_modules_0[i_0], &minfo_0); FX_COPY_PTR(minfo_0->u.defmodule_t.t5, &v_27); _fx_make_T2iLi(minfo_0->u.defmodule_t.t2, v_27, &tup_0); _fx_LT2iLi node_0 = 0; FX_CALL(_fx_cons_LT2iLi(&tup_0, 0, false, &node_0), _fx_catch_0); FX_LIST_APPEND(graph_0, lstend_0, node_0); _fx_catch_0: ; _fx_free_T2iLi(&tup_0); FX_FREE_LIST_SIMPLE(&v_27); if (minfo_0) { _fx_free_N16Ast__defmodule_t(&minfo_0); } FX_CHECK_EXN(_fx_catch_8); } FX_CALL(_fx_M8CompilerFM8toposortLi1LT2iLi(graph_0, &v_8, 0), _fx_catch_8); if (v_8 != 0) { FX_COPY_PTR(v_8->tl, &v_9); } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_8); } FX_CHECK_EXN(_fx_catch_8); if (v_9 != 0) { FX_COPY_PTR(v_9->tl, &v_10); } else { FX_FAST_THROW(FX_EXN_NullListError, _fx_catch_8); } FX_CHECK_EXN(_fx_catch_8); FX_FREE_LIST_SIMPLE(&_fx_g23Ast__all_modules_sorted); FX_COPY_PTR(v_10, &_fx_g23Ast__all_modules_sorted); if (_fx_g12Options__opt.print_ast0) { _fx_Li lst_0 = _fx_g23Ast__all_modules_sorted; for (; lst_0; lst_0 = lst_0->tl) { _fx_N16Ast__defmodule_t minfo_1 = 0; int_ m_0 = lst_0->hd; FX_CALL(_fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(m_0, &minfo_1, 0), _fx_catch_1); FX_CALL(_fx_M6Ast_ppFM10pprint_modv1N16Ast__defmodule_t(minfo_1, 0), _fx_catch_1); _fx_catch_1: ; if (minfo_1) { _fx_free_N16Ast__defmodule_t(&minfo_1); } FX_CHECK_EXN(_fx_catch_8); } } _fx_LS lstend_1 = 0; _fx_Li lst_1 = _fx_g23Ast__all_modules_sorted; for (; lst_1; lst_1 = lst_1->tl) { fx_str_t res_0 = {0}; int_ m_idx_0 = lst_1->hd; _fx_R9Ast__id_t v_28; FX_CALL(_fx_M3AstFM15get_module_nameRM4id_t1i(m_idx_0, &v_28, 0), _fx_catch_2); FX_CALL(_fx_M3AstFM2ppS1RM4id_t(&v_28, &res_0, 0), _fx_catch_2); _fx_LS node_1 = 0; FX_CALL(_fx_cons_LS(&res_0, 0, false, &node_1), _fx_catch_2); FX_LIST_APPEND(v_11, lstend_1, node_1); _fx_catch_2: ; FX_FREE_STR(&res_0); FX_CHECK_EXN(_fx_catch_8); } fx_str_t slit_3 = FX_MAKE_STR(", "); FX_CALL(_fx_F4joinS2SLS(&slit_3, v_11, &modules_used_0, 0), _fx_catch_8); if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_4 = FX_MAKE_STR("Parsing complete"); fx_copy_str(&slit_4, &parsing_complete_0); } else { fx_str_t slit_5 = FX_MAKE_STR("Parsing complete"); fx_copy_str(&slit_5, &parsing_complete_0); } fx_str_t slit_6 = FX_MAKE_STR(". Modules used: "); { const fx_str_t strs_1[] = { parsing_complete_0, slit_6, modules_used_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_1, 3, &v_12), _fx_catch_8); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_12, 0), _fx_catch_8); _fx_free_LE(&_fx_g21Ast__all_compile_errs); _fx_g21Ast__all_compile_errs = 0; _fx_Li lst_2 = _fx_g23Ast__all_modules_sorted; for (; lst_2; lst_2 = lst_2->tl) { int_ m_1 = lst_2->hd; FX_CALL(_fx_M13Ast_typecheckFM9check_modv1i(m_1, 0), _fx_catch_3); _fx_catch_3: ; FX_CHECK_EXN(_fx_catch_8); } bool ok_1 = _fx_g21Ast__all_compile_errs == 0; if (ok_1) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_7 = FX_MAKE_STR("Type checking complete"); fx_copy_str(&slit_7, &v_13); } else { fx_str_t slit_8 = FX_MAKE_STR("Type checking complete"); fx_copy_str(&slit_8, &v_13); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_13, 0), _fx_catch_8); if (_fx_g12Options__opt.print_ast) { _fx_Li lst_3 = _fx_g23Ast__all_modules_sorted; for (; lst_3; lst_3 = lst_3->tl) { _fx_N16Ast__defmodule_t minfo_2 = 0; int_ m_2 = lst_3->hd; FX_CALL(_fx_M3AstFM10get_moduleN16Ast__defmodule_t1i(m_2, &minfo_2, 0), _fx_catch_4); FX_CALL(_fx_M6Ast_ppFM10pprint_modv1N16Ast__defmodule_t(minfo_2, 0), _fx_catch_4); _fx_catch_4: ; if (minfo_2) { _fx_free_N16Ast__defmodule_t(&minfo_2); } FX_CHECK_EXN(_fx_catch_8); } } } if (ok_1) { _fx_free_LE(&_fx_g21Ast__all_compile_errs); _fx_g21Ast__all_compile_errs = 0; FX_CALL(_fx_M6K_formFM13init_all_idksv0(0), _fx_catch_8); FX_CALL(_fx_M11K_normalizeFM21normalize_all_modulesLR17K_form__kmodule_t1Li(_fx_g23Ast__all_modules_sorted, &kmods_0, 0), _fx_catch_8); _fx_make_T2LR17K_form__kmodule_tB(kmods_0, _fx_g21Ast__all_compile_errs == 0, &v_14); } else { _fx_make_T2LR17K_form__kmodule_tB(0, false, &v_14); } FX_COPY_PTR(v_14.t0, &kmods_1); bool ok_2 = v_14.t1; if (ok_2) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_9 = FX_MAKE_STR("K-normalization complete"); fx_copy_str(&slit_9, &v_15); } else { fx_str_t slit_10 = FX_MAKE_STR("K-normalization complete"); fx_copy_str(&slit_10, &v_15); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_15, 0), _fx_catch_8); if (_fx_g12Options__opt.print_k0) { FX_CALL(_fx_M4K_ppFM8pp_kmodsv1LR17K_form__kmodule_t(kmods_1, 0), _fx_catch_8); } } if (ok_2) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_11 = FX_MAKE_STR("K-form optimization started"); fx_copy_str(&slit_11, &v_17); } else { fx_str_t slit_12 = FX_MAKE_STR("K-form optimization started"); fx_copy_str(&slit_12, &v_17); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_17, 0), _fx_catch_8); FX_CALL(_fx_M8CompilerFM14k_optimize_allT2LR17K_form__kmodule_tB1LR17K_form__kmodule_t(kmods_1, &v_16, 0), _fx_catch_8); } else { _fx_make_T2LR17K_form__kmodule_tB(0, false, &v_16); } FX_COPY_PTR(v_16.t0, &kmods_2); bool ok_3 = v_16.t1; if (ok_3) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_13 = FX_MAKE_STR("K-form optimization complete"); fx_copy_str(&slit_13, &v_18); } else { fx_str_t slit_14 = FX_MAKE_STR("K-form optimization complete"); fx_copy_str(&slit_14, &v_18); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_18, 0), _fx_catch_8); if (_fx_g12Options__opt.print_k) { FX_CALL(_fx_M4K_ppFM8pp_kmodsv1LR17K_form__kmodule_t(kmods_2, 0), _fx_catch_8); } } bool ok_4; if (!_fx_g12Options__opt.gen_c) { ok_4 = ok_3; } else { if (ok_3) { if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_15 = FX_MAKE_STR("Generating C code"); fx_copy_str(&slit_15, &v_20); } else { fx_str_t slit_16 = FX_MAKE_STR("Generating C code"); fx_copy_str(&slit_16, &v_20); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_20, 0), _fx_catch_8); _fx_free_LE(&_fx_g21Ast__all_compile_errs); _fx_g21Ast__all_compile_errs = 0; FX_CALL(_fx_M6C_formFM13init_all_idcsv0(0), _fx_catch_8); FX_CALL(_fx_M9C_gen_stdFM14init_std_namesv0(0), _fx_catch_8); FX_CALL(_fx_M10C_gen_codeFM13gen_ccode_allLR17C_form__cmodule_t1LR17K_form__kmodule_t(kmods_2, &cmods_0, 0), _fx_catch_8); if (_fx_g21Compiler__iscolorterm) { fx_str_t slit_17 = FX_MAKE_STR("C code generated"); fx_copy_str(&slit_17, &v_21); } else { fx_str_t slit_18 = FX_MAKE_STR("C code generated"); fx_copy_str(&slit_18, &v_21); } FX_CALL(_fx_M3AstFM10pr_verbosev1S(&v_21, 0), _fx_catch_8); FX_CALL(_fx_M20C_post_rename_localsFM13rename_localsLR17C_form__cmodule_t1LR17C_form__cmodule_t(cmods_0, &cmods_1, 0), _fx_catch_8); _fx_LR17C_form__cmodule_t lstend_2 = 0; _fx_LR17C_form__cmodule_t lst_4 = cmods_1; for (; lst_4; lst_4 = lst_4->tl) { _fx_R17C_form__cmodule_t t_0 = {0}; _fx_R17C_form__cmodule_t* cmod_0 = &lst_4->hd; bool is_cpp_0; if (_fx_g12Options__opt.compile_by_cpp) { is_cpp_0 = true; } else { is_cpp_0 = cmod_0->cmod_pragmas.pragma_cpp; } if (is_cpp_0) { FX_CALL(_fx_M19C_post_adjust_declsFM12adjust_declsR17C_form__cmodule_t1R17C_form__cmodule_t(cmod_0, &t_0, 0), _fx_catch_5); } else { _fx_copy_R17C_form__cmodule_t(cmod_0, &t_0); } _fx_LR17C_form__cmodule_t node_2 = 0; FX_CALL(_fx_cons_LR17C_form__cmodule_t(&t_0, 0, false, &node_2), _fx_catch_5); FX_LIST_APPEND(cmods_2, lstend_2, node_2); _fx_catch_5: ; _fx_free_R17C_form__cmodule_t(&t_0); FX_CHECK_EXN(_fx_catch_8); } fx_str_t slit_19 = FX_MAKE_STR("\tConversion to C-form complete"); FX_CALL(_fx_M3AstFM10pr_verbosev1S(&slit_19, 0), _fx_catch_8); _fx_make_T2LR17C_form__cmodule_tB(cmods_2, _fx_g21Ast__all_compile_errs == 0, &v_19); } else { _fx_make_T2LR17C_form__cmodule_tB(0, false, &v_19); } FX_COPY_PTR(v_19.t0, &cmods_3); bool ok_5 = v_19.t1; bool t_1; if (ok_5) { if (_fx_g12Options__opt.make_app) { t_1 = true; } else { t_1 = _fx_g12Options__opt.run_app; } } else { t_1 = false; } bool ok_6; if (t_1) { FX_CALL(_fx_M8CompilerFM6run_ccB2LR17C_form__cmodule_tS(cmods_3, &ficus_root_0, &ok_6, 0), _fx_catch_8); } else { ok_6 = ok_5; } bool t_2; if (ok_6) { t_2 = _fx_g12Options__opt.run_app; } else { t_2 = false; } if (t_2) { fx_copy_str(&_fx_g12Options__opt.app_filename, &appname_0); FX_CALL(_fx_M8FilenameFM6getcwdS0(&v_22, 0), _fx_catch_8); FX_CALL(_fx_M8FilenameFM9normalizeS2SS(&v_22, &appname_0, &appname_1, 0), _fx_catch_8); FX_COPY_PTR(_fx_g12Options__opt.app_args, &v_23); FX_CALL(_fx_cons_LS(&appname_1, v_23, false, &v_23), _fx_catch_8); fx_str_t slit_20 = FX_MAKE_STR(" "); FX_CALL(_fx_F4joinS2SLS(&slit_20, v_23, &cmd_0, 0), _fx_catch_8); int_ v_29; FX_CALL(_fx_M3SysFM7commandi1S(&cmd_0, &v_29, 0), _fx_catch_8); ok_4 = v_29 == 0; } else { ok_4 = ok_6; } } if (!ok_4) { int_ nerrs_0 = _fx_M8CompilerFM6lengthi1LE(_fx_g21Ast__all_compile_errs, 0); if (nerrs_0 != 0) { _fx_LE lst_5 = _fx_g21Ast__all_compile_errs; for (; lst_5; lst_5 = lst_5->tl) { _fx_LE r_0 = 0; fx_exn_t* a_0 = &lst_5->hd; FX_COPY_PTR(__fold_result___1, &r_0); FX_CALL(_fx_cons_LE(a_0, r_0, false, &r_0), _fx_catch_6); _fx_free_LE(&__fold_result___1); FX_COPY_PTR(r_0, &__fold_result___1); _fx_catch_6: ; if (r_0) { _fx_free_LE(&r_0); } FX_CHECK_EXN(_fx_catch_8); } FX_COPY_PTR(__fold_result___1, &v_24); _fx_LE lst_6 = v_24; for (; lst_6; lst_6 = lst_6->tl) { fx_exn_t* x_0 = &lst_6->hd; FX_CALL(_fx_M3AstFM17print_compile_errv1E(x_0, 0), _fx_catch_7); _fx_catch_7: ; FX_CHECK_EXN(_fx_catch_8); } FX_CALL(_fx_F6stringS1i(nerrs_0, &v_25, 0), _fx_catch_8); fx_str_t slit_21 = FX_MAKE_STR("\n"); fx_str_t slit_22 = FX_MAKE_STR(" errors occured during type checking."); { const fx_str_t strs_2[] = { slit_21, v_25, slit_22 }; FX_CALL(fx_strjoin(0, 0, 0, strs_2, 3, &v_26), _fx_catch_8); } _fx_F12print_stringv1S(&v_26, 0); fx_str_t slit_23 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_23, 0); } } *fx_result = ok_4; _fx_catch_8: ; _fx_free_T2SLS(&v_3); FX_FREE_STR(&ficus_root_0); if (ficus_path_0) { _fx_free_LS(&ficus_path_0); } FX_FREE_STR(&v_4); FX_FREE_STR(&v_5); FX_FREE_STR(&v_6); fx_free_exn(&v_7); if (graph_0) { _fx_free_LT2iLi(&graph_0); } FX_FREE_LIST_SIMPLE(&v_8); FX_FREE_LIST_SIMPLE(&v_9); FX_FREE_LIST_SIMPLE(&v_10); if (v_11) { _fx_free_LS(&v_11); } FX_FREE_STR(&modules_used_0); FX_FREE_STR(&parsing_complete_0); FX_FREE_STR(&v_12); FX_FREE_STR(&v_13); _fx_free_T2LR17K_form__kmodule_tB(&v_14); if (kmods_0) { _fx_free_LR17K_form__kmodule_t(&kmods_0); } if (kmods_1) { _fx_free_LR17K_form__kmodule_t(&kmods_1); } FX_FREE_STR(&v_15); _fx_free_T2LR17K_form__kmodule_tB(&v_16); FX_FREE_STR(&v_17); if (kmods_2) { _fx_free_LR17K_form__kmodule_t(&kmods_2); } FX_FREE_STR(&v_18); _fx_free_T2LR17C_form__cmodule_tB(&v_19); FX_FREE_STR(&v_20); if (cmods_0) { _fx_free_LR17C_form__cmodule_t(&cmods_0); } FX_FREE_STR(&v_21); if (cmods_1) { _fx_free_LR17C_form__cmodule_t(&cmods_1); } if (cmods_2) { _fx_free_LR17C_form__cmodule_t(&cmods_2); } if (cmods_3) { _fx_free_LR17C_form__cmodule_t(&cmods_3); } FX_FREE_STR(&appname_0); FX_FREE_STR(&v_22); FX_FREE_STR(&appname_1); if (v_23) { _fx_free_LS(&v_23); } FX_FREE_STR(&cmd_0); if (__fold_result___1) { _fx_free_LE(&__fold_result___1); } if (v_24) { _fx_free_LE(&v_24); } FX_FREE_STR(&v_25); FX_FREE_STR(&v_26); if (fx_status < 0) { fx_exn_get_and_reset(fx_status, &exn_0); fx_status = 0; int_ nerrs_1 = _fx_M8CompilerFM6lengthi1LE(_fx_g21Ast__all_compile_errs, 0); if (nerrs_1 != 0) { _fx_LE lst_7 = _fx_g21Ast__all_compile_errs; for (; lst_7; lst_7 = lst_7->tl) { _fx_LE r_1 = 0; fx_exn_t* a_1 = &lst_7->hd; FX_COPY_PTR(__fold_result___0, &r_1); FX_CALL(_fx_cons_LE(a_1, r_1, false, &r_1), _fx_catch_9); _fx_free_LE(&__fold_result___0); FX_COPY_PTR(r_1, &__fold_result___0); _fx_catch_9: ; if (r_1) { _fx_free_LE(&r_1); } FX_CHECK_EXN(_fx_cleanup); } FX_COPY_PTR(__fold_result___0, &v_0); _fx_LE lst_8 = v_0; for (; lst_8; lst_8 = lst_8->tl) { fx_exn_t* x_1 = &lst_8->hd; FX_CALL(_fx_M3AstFM17print_compile_errv1E(x_1, 0), _fx_catch_10); _fx_catch_10: ; FX_CHECK_EXN(_fx_cleanup); } FX_CALL(_fx_F6stringS1i(nerrs_1, &v_1, 0), _fx_cleanup); fx_str_t slit_24 = FX_MAKE_STR("\n"); fx_str_t slit_25 = FX_MAKE_STR(" errors occured during type checking."); { const fx_str_t strs_3[] = { slit_24, v_1, slit_25 }; FX_CALL(fx_strjoin(0, 0, 0, strs_3, 3, &v_2), _fx_cleanup); } _fx_F12print_stringv1S(&v_2, 0); fx_str_t slit_26 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_26, 0); } int tag_0 = exn_0.tag; if (tag_0 == _FX_EXN_E4Fail) { fx_str_t v_30 = {0}; fx_str_t slit_27 = FX_MAKE_STR(": "); fx_str_t* msg_0 = &FX_EXN_DATA(_fx_E4Fail_data_t, exn_0.data); { const fx_str_t strs_4[] = { _fx_g15Compiler__error, slit_27, *msg_0 }; FX_CALL(fx_strjoin(0, 0, 0, strs_4, 3, &v_30), _fx_catch_11); } _fx_F12print_stringv1S(&v_30, 0); fx_str_t slit_28 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_28, 0); _fx_catch_11: ; FX_FREE_STR(&v_30); } else if (tag_0 == _FX_EXN_E17Ast__CompileError) { FX_CALL(_fx_M3AstFM17print_compile_errv1E(&exn_0, 0), _fx_catch_12); _fx_catch_12: ; } else if (tag_0 != _FX_EXN_E30Compiler__CumulativeParseError) { fx_str_t v_31 = {0}; fx_str_t v_32 = {0}; FX_CALL(_fx_F6stringS1E(&exn_0, &v_31, 0), _fx_catch_13); fx_str_t slit_29 = FX_MAKE_STR("\n" U"\n"); fx_str_t slit_30 = FX_MAKE_STR(": Exception "); fx_str_t slit_31 = FX_MAKE_STR(" occured"); { const fx_str_t strs_5[] = { slit_29, _fx_g15Compiler__error, slit_30, v_31, slit_31 }; FX_CALL(fx_strjoin(0, 0, 0, strs_5, 5, &v_32), _fx_catch_13); } _fx_F12print_stringv1S(&v_32, 0); fx_str_t slit_32 = FX_MAKE_STR("\n"); _fx_F12print_stringv1S(&slit_32, 0); _fx_catch_13: ; FX_FREE_STR(&v_32); FX_FREE_STR(&v_31); } FX_CHECK_EXN(_fx_cleanup); *fx_result = false; } _fx_cleanup: ; fx_free_exn(&exn_0); if (__fold_result___0) { _fx_free_LE(&__fold_result___0); } if (v_0) { _fx_free_LE(&v_0); } FX_FREE_STR(&v_1); FX_FREE_STR(&v_2); return fx_status; } FX_EXTERN_C int fx_init_Compiler(void) { FX_REG_SIMPLE_EXN("Compiler.CumulativeParseError", _FX_EXN_E30Compiler__CumulativeParseError, _fx_E30Compiler__CumulativeParseError_info, _fx_E30Compiler__CumulativeParseErrorv); int fx_status = 0; FX_CALL(_fx_M3SysFM9colortermB0(&_fx_g21Compiler__iscolorterm, 0), _fx_cleanup); fx_str_t slit_0 = FX_MAKE_STR("error"); FX_CALL(_fx_M8CompilerFM6clrmsgS2N20Compiler__msgcolor_tS(&_fx_g16Compiler__MsgRed, &slit_0, &_fx_g15Compiler__error, 0), _fx_cleanup); _fx_cleanup: ; return fx_status; } FX_EXTERN_C void fx_deinit_Compiler(void) { FX_FREE_STR(&_fx_g15Compiler__error); }
api_test.c
#include "ctest/ctest.h" #include "splatt_test.h" #include "../src/sptensor.h" /* API includes */ #include "../include/splatt.h" #include <omp.h> CTEST_DATA(api) { splatt_idx_t ntensors; sptensor_t * tensors[MAX_DSETS]; }; CTEST_SETUP(api) { data->ntensors = sizeof(datasets) / sizeof(datasets[0]); for(idx_t i=0; i < data->ntensors; ++i) { data->tensors[i] = tt_read(datasets[i]); } } CTEST_TEARDOWN(api) { for(idx_t i=0; i < data->ntensors; ++i) { tt_free(data->tensors[i]); } } CTEST2(api, opts_alloc) { double * opts = splatt_default_opts(); ASSERT_NOT_NULL(opts); /* test defaults */ ASSERT_EQUAL(omp_get_num_procs(), (int) opts[SPLATT_OPTION_NTHREADS]); splatt_free_opts(opts); } CTEST2(api, par_opts_alloc) { #pragma omp parallel num_threads(5) { double * opts = splatt_default_opts(); ASSERT_EQUAL(1, (int) opts[SPLATT_OPTION_NTHREADS]); splatt_free_opts(opts); } } CTEST2(api, csf_load) { splatt_csf loaded; for(idx_t i=0; i < data->ntensors; ++i) { #if 0 int ret = splatt_csf_load(datasets[i], &nmodes, &loaded, opts); #endif } }
a7.c
#include "omp.h" void axpy(int N, float *Y, float *X, float a) { int i; #pragma omp declare target to(X) #pragma omp parallel for for (i = 0; i < N; ++i) Y[i] += a * X[i]; }
image-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickCore Image View Methods % % % % Software Design % % Cristy % % March 2003 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/MagickCore.h" #include "MagickCore/exception-private.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor-private.h" #include "MagickCore/thread-private.h" /* Typedef declarations. */ struct _ImageView { char *description; RectangleInfo extent; Image *image; CacheView *view; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageView() makes a copy of the specified image view. % % The format of the CloneImageView method is: % % ImageView *CloneImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *CloneImageView(const ImageView *image_view) { ImageView *clone_view; assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); clone_view=(ImageView *) AcquireCriticalMemory(sizeof(*clone_view)); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->description=ConstantString(image_view->description); clone_view->extent=image_view->extent; clone_view->view=CloneCacheView(image_view->view); clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,image_view->exception); clone_view->debug=image_view->debug; clone_view->signature=MagickCoreSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageView() deallocates memory associated with a image view. % % The format of the DestroyImageView method is: % % ImageView *DestroyImageView(ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport ImageView *DestroyImageView(ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); if (image_view->description != (char *) NULL) image_view->description=DestroyString(image_view->description); image_view->view=DestroyCacheView(image_view->view); image_view->exception=DestroyExceptionInfo(image_view->exception); image_view->signature=(~MagickCoreSignature); image_view=(ImageView *) RelinquishMagickMemory(image_view); return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferImageViewIterator() iterates over three image views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination image view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source, % const ImageView *duplex,ImageView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferImageViewIterator method is: % % MagickBooleanType DuplexTransferImageViewIterator(ImageView *source, % ImageView *duplex,ImageView *destination, % DuplexTransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o duplex: the duplex image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType DuplexTransferImageViewIterator( ImageView *source,ImageView *duplex,ImageView *destination, DuplexTransferImageViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (DuplexTransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict duplex_pixels, *magick_restrict pixels; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_DuplexTransferImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticMetacontent() returns the image view authentic % meta-content. % % The format of the GetImageViewAuthenticPixels method is: % % void *GetImageViewAuthenticMetacontent( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport void *GetImageViewAuthenticMetacontent( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticMetacontent(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewAuthenticPixels() returns the image view authentic pixels. % % The format of the GetImageViewAuthenticPixels method is: % % Quantum *GetImageViewAuthenticPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Quantum *GetImageViewAuthenticPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewAuthenticPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewException() returns the severity, reason, and description of any % error that occurs when utilizing a image view. % % The format of the GetImageViewException method is: % % char *GetImageViewException(const PixelImage *image_view, % ExceptionType *severity) % % A description of each parameter follows: % % o image_view: the pixel image_view. % % o severity: the severity of the error is returned here. % */ MagickExport char *GetImageViewException(const ImageView *image_view, ExceptionType *severity) { char *description; assert(image_view != (const ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); assert(severity != (ExceptionType *) NULL); *severity=image_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent, sizeof(*description)); if (description == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *description='\0'; if (image_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->reason), MagickPathExtent); if (image_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MagickPathExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( image_view->exception->severity,image_view->exception->description), MagickPathExtent); (void) ConcatenateMagickString(description,")",MagickPathExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewExtent() returns the image view extent. % % The format of the GetImageViewExtent method is: % % RectangleInfo GetImageViewExtent(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewImage() returns the image associated with the image view. % % The format of the GetImageViewImage method is: % % MagickCore *GetImageViewImage(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport Image *GetImageViewImage(const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(image_view->image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewIterator() iterates over the image view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetImageViewIterator method is: % % MagickBooleanType GetImageViewIterator(ImageView *source, % GetImageViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o get: the get callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType GetImageViewIterator(ImageView *source, GetImageViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (get == (GetImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const Quantum *pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualMetacontent() returns the image view virtual % meta-content. % % The format of the GetImageViewVirtualMetacontent method is: % % const void *GetImageViewVirtualMetacontent( % const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const void *GetImageViewVirtualMetacontent( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualMetacontent(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i e w V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageViewVirtualPixels() returns the image view virtual pixels. % % The format of the GetImageViewVirtualPixels method is: % % const Quantum *GetImageViewVirtualPixels(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport const Quantum *GetImageViewVirtualPixels( const ImageView *image_view) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); return(GetCacheViewVirtualPixelQueue(image_view->view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageView() returns MagickTrue if the the parameter is verified as a image % view object. % % The format of the IsImageView method is: % % MagickBooleanType IsImageView(const ImageView *image_view) % % A description of each parameter follows: % % o image_view: the image view. % */ MagickExport MagickBooleanType IsImageView(const ImageView *image_view) { if (image_view == (const ImageView *) NULL) return(MagickFalse); if (image_view->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageView() returns a image view required for all other methods in the % Image View API. % % The format of the NewImageView method is: % % ImageView *NewImageView(MagickCore *wand,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageView *NewImageView(Image *image,ExceptionInfo *exception) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view)); (void) ResetMagickMemory(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->image=image; image_view->view=AcquireVirtualCacheView(image_view->image,exception); image_view->extent.width=image->columns; image_view->extent.height=image->rows; image_view->extent.x=0; image_view->extent.y=0; image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w I m a g e V i e w R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewImageViewRegion() returns a image view required for all other methods % in the Image View API. % % The format of the NewImageViewRegion method is: % % ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height, % ExceptionInfo *exception) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x, const ssize_t y,const size_t width,const size_t height, ExceptionInfo *exception) { ImageView *image_view; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view)); (void) ResetMagickMemory(image_view,0,sizeof(*image_view)); image_view->description=ConstantString("ImageView"); image_view->view=AcquireVirtualCacheView(image_view->image,exception); image_view->image=image; image_view->extent.width=width; image_view->extent.height=height; image_view->extent.x=x; image_view->extent.y=y; image_view->exception=AcquireExceptionInfo(); image_view->debug=IsEventLogging(); image_view->signature=MagickCoreSignature; return(image_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewDescription() associates a description with an image view. % % The format of the SetImageViewDescription method is: % % void SetImageViewDescription(ImageView *image_view, % const char *description) % % A description of each parameter follows: % % o image_view: the image view. % % o description: the image view description. % */ MagickExport void SetImageViewDescription(ImageView *image_view, const char *description) { assert(image_view != (ImageView *) NULL); assert(image_view->signature == MagickCoreSignature); image_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageViewIterator() iterates over the image view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetImageViewIterator method is: % % MagickBooleanType SetImageViewIterator(ImageView *destination, % SetImageViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the image view. % % o set: the set callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination, SetImageViewMethod set,void *context) { Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (ImageView *) NULL); assert(destination->signature == MagickCoreSignature); if (set == (SetImageViewMethod) NULL) return(MagickFalse); destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=destination->extent.height-destination->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,destination->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetImageViewIterator) #endif proceed=SetImageProgress(destination_image,destination->description, progress++,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferImageViewIterator() iterates over two image views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination image view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const ImageView *source, % ImageView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferImageViewIterator method is: % % MagickBooleanType TransferImageViewIterator(ImageView *source, % ImageView *destination,TransferImageViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o destination: the destination image view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source, ImageView *destination,TransferImageViewMethod transfer,void *context) { Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (transfer == (TransferImageViewMethod) NULL) return(MagickFalse); source_image=source->image; destination_image=destination->image; status=SetImageStorageClass(destination_image,DirectClass, destination->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const Quantum *magick_restrict pixels; register Quantum *magick_restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const Quantum *) NULL) { status=MagickFalse; continue; } destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1, destination->exception); if (destination_pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception); if (sync == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransferImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e I m a g e V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateImageViewIterator() iterates over the image view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(ImageView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateImageViewIterator method is: % % MagickBooleanType UpdateImageViewIterator(ImageView *source, % UpdateImageViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source image view. % % o update: the update callback method. % % o context: the user defined context. % */ MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source, UpdateImageViewMethod update,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (ImageView *) NULL); assert(source->signature == MagickCoreSignature); if (update == (UpdateImageViewMethod) NULL) return(MagickFalse); source_image=source->image; status=SetImageStorageClass(source_image,DirectClass,source->exception); if (status == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=source->extent.height-source->extent.y; #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_number_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (Quantum *) NULL) { status=MagickFalse; continue; } if (update(source,y,id,context) == MagickFalse) status=MagickFalse; status=SyncCacheViewAuthenticPixels(source->view,source->exception); if (status == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_UpdateImageViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status); }
symm_x_dia_n_hi_row.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA *mat, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, const ALPHA_Number beta, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT num_threads = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_threads) #endif for (ALPHA_INT r = 0; r < mat->rows; r++) for(ALPHA_INT c = 0; c < columns; c++) alpha_mul(y[index2(r,c,ldy)],y[index2(r,c,ldy)],beta); #ifdef _OPENMP #pragma omp parallel num_threads(num_threads) #endif { ALPHA_INT tid = alpha_get_thread_id(); ALPHA_INT bcl = cross_block_low(tid,num_threads,columns); ALPHA_INT bch = cross_block_high(tid,num_threads,columns); for(ALPHA_INT di = 0; di < mat->ndiag;++di){ ALPHA_INT d = mat->distance[di]; if(d > 0){ ALPHA_INT ars = alpha_max(0,-d); ALPHA_INT acs = alpha_max(0,d); ALPHA_INT an = alpha_min(mat->rows - ars,mat->cols - acs); for(ALPHA_INT i = 0; i < an; ++i){ ALPHA_INT ar = ars + i; ALPHA_INT ac = acs + i; ALPHA_Number val; alpha_mul(val,mat->values[index2(di,ar,mat->lval)],alpha); for(ALPHA_INT bc = bcl;bc < bch;++bc){ alpha_madde(y[index2(ar,bc,ldy)],val,x[index2(ac,bc,ldx)]); alpha_madde(y[index2(ac,bc,ldy)],val,x[index2(ar,bc,ldx)]); } } } if(d == 0){ for(ALPHA_INT r = 0; r < mat->rows; ++r){ ALPHA_Number val; alpha_mul(val,mat->values[index2(di,r,mat->lval)],alpha); for(ALPHA_INT bc = bcl;bc < bch;++bc){ alpha_madde(y[index2(r,bc,ldy)],val,x[index2(r,bc,ldx)]); } } } } } return ALPHA_SPARSE_STATUS_SUCCESS; }
isogeometric_post_utility.h
// // Project Name: Kratos // Last Modified by: $Author: hbui $ // Date: $Date: 2013-10-12 $ // Revision: $Revision: 1.0 $ // // #if !defined(KRATOS_ISOGEOMETRIC_POST_UTILITY_H_INCLUDED ) #define KRATOS_ISOGEOMETRIC_POST_UTILITY_H_INCLUDED // System includes #include <string> #include <vector> #include <tuple> #include <iostream> // External includes #include <omp.h> #include "boost/progress.hpp" // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/element.h" #include "includes/properties.h" #include "utilities/openmp_utils.h" #include "custom_utilities/iga_define.h" #include "custom_utilities/isogeometric_utility.h" #define USE_TRIANGULATION_UTILS_FOR_TRIANGULATION #if defined(USE_TRIANGULATION_UTILS_FOR_TRIANGULATION) #include "custom_utilities/triangulation_utils.h" #elif defined(USE_CGAL_FOR_TRIANGULATION) && defined(ISOGEOMETRIC_APPLICATION_USE_CGAL) #include <CGAL/Exact_predicates_inexact_constructions_kernel.h> #include <CGAL/Exact_predicates_exact_constructions_kernel.h> #include <CGAL/Delaunay_triangulation_2.h> #include <CGAL/Triangulation_vertex_base_with_info_2.h> #endif namespace Kratos { ///@addtogroup IsogeometricApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** * Abstract class for all utility to export mesh from NURBS. Also to provide basic utility functions. */ class IsogeometricPostUtility : public IsogeometricUtility { public: ///@name Type Definitions ///@{ typedef typename ModelPart::NodesContainerType NodesArrayType; typedef typename ModelPart::ElementsContainerType ElementsArrayType; typedef typename ModelPart::ConditionsContainerType ConditionsArrayType; typedef typename Element::GeometryType GeometryType; typedef typename GeometryType::PointType NodeType; typedef typename NodeType::PointType PointType; typedef typename GeometryType::IntegrationPointsArrayType IntegrationPointsArrayType; typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType; typedef typename NodeType::DofsContainerType DofsContainerType; typedef std::size_t IndexType; /// Pointer definition of IsogeometricPostUtility KRATOS_CLASS_POINTER_DEFINITION(IsogeometricPostUtility); ///@} ///@name Life Cycle ///@{ /// Default constructor. IsogeometricPostUtility() { } /// Destructor. virtual ~IsogeometricPostUtility() { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Create a node for a model_part with a specific Id and transfer the values template<class TPatchType, typename TCoordinatesType, typename TIndexType> static typename NodeType::Pointer CreateNodeAndTransferValues(const TCoordinatesType& p_ref, const TPatchType& rPatch, ModelPart& r_model_part, const TIndexType& NodeCounter) { typename NodeType::Pointer pNewNode = CreateNode(p_ref, rPatch, r_model_part, NodeCounter); TransferValuesToNodes(*pNewNode, p_ref, rPatch); return pNewNode; } /// Create a node for a model_part with a specific Id template<class TPatchType, typename TCoordinatesType, typename TIndexType> static typename NodeType::Pointer CreateNode(const TCoordinatesType& p_ref, const TPatchType& rPatch, ModelPart& r_model_part, const TIndexType& NodeCounter) { typename TPatchType::ControlPointType p = rPatch.pControlPointGridFunction()->GetValue(p_ref); typename NodeType::Pointer pNewNode = r_model_part.CreateNewNode(NodeCounter, p.X(), p.Y(), p.Z()); return pNewNode; } /// Transfer the control values from patch to node /// The node has to be inside the patch template<class TPatchType> static void TransferValuesToNodes(NodeType& rNode, const TPatchType& rPatch) { typename TPatchType::Array1DGridFunctionType::ConstPointer pControlPointCoordinatesGridFunction = rPatch.pGetGridFunction(CONTROL_POINT_COORDINATES); typename TPatchType::Array1DGridFunctionType::DataType p_ref; pControlPointCoordinatesGridFunction->LocalCoordinates(rNode, p_ref); TransferValuesToNodes(rNode, p_ref, rPatch); } /// Transfer the control values from patch to node /// p_ref is the local coordinates of the node in patch template<class TPatchType, typename TCoordinatesType> static void TransferValuesToNodes(NodeType& rNode, const TCoordinatesType& p_ref, const TPatchType& rPatch) { typedef typename TPatchType::DoubleGridFunctionContainerType DoubleGridFunctionContainerType; typedef typename TPatchType::Array1DGridFunctionContainerType Array1DGridFunctionContainerType; typedef typename TPatchType::VectorGridFunctionContainerType VectorGridFunctionContainerType; // transfer the control values DoubleGridFunctionContainerType DoubleGridFunctions_ = rPatch.DoubleGridFunctions(); for (typename DoubleGridFunctionContainerType::const_iterator it_gf = DoubleGridFunctions_.begin(); it_gf != DoubleGridFunctions_.end(); ++it_gf) { typedef double DataType; typedef Variable<DataType> VariableType; const std::string& var_name = (*it_gf)->pControlGrid()->Name(); if (KratosComponents<VariableData>::Has(var_name)) { VariableType* pVariable = dynamic_cast<VariableType*>(&KratosComponents<VariableData>::Get(var_name)); DataType value = (*it_gf)->GetValue(p_ref); if (rNode.SolutionStepsDataHas(*pVariable)) rNode.GetSolutionStepValue(*pVariable) = value; } } Array1DGridFunctionContainerType Array1DGridFunctions_ = rPatch.Array1DGridFunctions(); for (typename Array1DGridFunctionContainerType::const_iterator it_gf = Array1DGridFunctions_.begin(); it_gf != Array1DGridFunctions_.end(); ++it_gf) { typedef array_1d<double, 3> DataType; typedef Variable<DataType> VariableType; const std::string& var_name = (*it_gf)->pControlGrid()->Name(); if (var_name == "CONTROL_POINT_COORDINATES") continue; if (KratosComponents<VariableData>::Has(var_name)) { VariableType* pVariable = dynamic_cast<VariableType*>(&KratosComponents<VariableData>::Get(var_name)); DataType value = (*it_gf)->GetValue(p_ref); if (rNode.SolutionStepsDataHas(*pVariable)) rNode.GetSolutionStepValue(*pVariable) = value; } } VectorGridFunctionContainerType VectorGridFunctions_ = rPatch.VectorGridFunctions(); for (typename VectorGridFunctionContainerType::const_iterator it_gf = VectorGridFunctions_.begin(); it_gf != VectorGridFunctions_.end(); ++it_gf) { typedef Vector DataType; typedef Variable<DataType> VariableType; const std::string& var_name = (*it_gf)->pControlGrid()->Name(); if (KratosComponents<VariableData>::Has(var_name)) { VariableType* pVariable = dynamic_cast<VariableType*>(&KratosComponents<VariableData>::Get(var_name)); DataType value = (*it_gf)->GetValue(p_ref); if (rNode.SolutionStepsDataHas(*pVariable)) rNode.GetSolutionStepValue(*pVariable) = value; } } } /// Transfer the control values from patch to Gauss points template<class TEntityType, typename TVariableType, class TPatchType> static void TransferValuesToGaussPoints(TEntityType& rElement, const TVariableType& rVariable, const TPatchType& rPatch, const ProcessInfo& rProcessInfo) { GeometryData::IntegrationMethod ThisIntegrationMethod = rElement.GetIntegrationMethod(); GeometryType& rGeometry = rElement.GetGeometry(); typename TPatchType::Array1DGridFunctionType::ConstPointer pControlPointCoordinatesGridFunction = rPatch.pGetGridFunction(CONTROL_POINT_COORDINATES); typename GridFunction<TPatchType::FESpaceType::Dim(), typename TVariableType::Type>::ConstPointer pGridFunc = rPatch.pGetGridFunction(rVariable); #ifdef ENABLE_BEZIER_GEOMETRY //initialize the geometry rGeometry.Initialize(ThisIntegrationMethod); #endif const IntegrationPointsArrayType& integration_points = rGeometry.IntegrationPoints(ThisIntegrationMethod); std::vector<typename TVariableType::Type> ValuesOnIntPoint(integration_points.size()); CoordinatesArrayType GlobalCoords; typename TPatchType::Array1DGridFunctionType::DataType p_ref; for (unsigned int PointNumber = 0; PointNumber < integration_points.size(); ++PointNumber) { rGeometry.GlobalCoordinates(GlobalCoords, integration_points[PointNumber]); typename TPatchType::Array1DGridFunctionType::ConstPointer pControlPointCoordinatesGridFunction = rPatch.pGetGridFunction(CONTROL_POINT_COORDINATES); pControlPointCoordinatesGridFunction->LocalCoordinates(GlobalCoords, p_ref); ValuesOnIntPoint[PointNumber] = pGridFunc->GetValue(p_ref); } #ifdef ENABLE_BEZIER_GEOMETRY // clean the geometry rGeometry.Clean(); #endif rElement.SetValuesOnIntegrationPoints( rVariable, ValuesOnIntPoint, rProcessInfo); } /// Generate corner points for regular geometry template<int TDim, typename TCoordinatesType, typename TValueType> static void GenerateRegular(std::vector<TCoordinatesType>& points, const std::vector<TCoordinatesType>& cmin, const std::vector<TCoordinatesType>& cmax) { if (TDim == 2) { GenerateRectangle(points, cmin[0], cmax[0], cmin[1], cmax[1]); } else if (TDim == 3) { GenerateBox(points, cmin[0], cmax[0], cmin[1], cmax[1], cmin[2], cmax[2]); } else KRATOS_THROW_ERROR(std::logic_error, "Invalid dimension", TDim) } /// Generate a single rectangle. The 4 corner points are denoted as /// 4---3 /// | | /// 1---2 template<typename TCoordinatesType, typename TValueType> static void GenerateRectangle(std::vector<TCoordinatesType>& points, const TValueType& xmin, const TValueType& xmax, const TValueType& ymin, const TValueType& ymax) { points[0][0] = xmin; points[0][1] = ymin; // points[0][2] = 0.0; points[1][0] = xmax; points[1][1] = ymin; // points[1][2] = 0.0; points[2][0] = xmax; points[2][1] = ymax; // points[2][2] = 0.0; points[3][0] = xmin; points[3][1] = ymax; // points[3][2] = 0.0; } /// Generate the triangulation for a list of points in 3D /// The triangulation will be performed on the physical points with the information {cemter, normal, t1, t2} template<typename TCoordinatesType, typename TVectorType, typename TIndexType> static std::vector<std::vector<TIndexType> > GenerateTriangleGrid(const std::vector<TCoordinatesType>& points, const TVectorType& rCenter, const TVectorType& rNormal, const TVectorType& rTangent1, const TVectorType& rTangent2) { // create the 2D coordinates for points, in order to triangulate std::vector<double> XY; TCoordinatesType Projection; for (std::size_t i = 0; i < points.size(); ++i) { noalias(Projection) = points[i] - inner_prod(points[i] - rCenter, rNormal) * rNormal; XY.push_back(inner_prod(Projection - rCenter, rTangent1)); XY.push_back(inner_prod(Projection - rCenter, rTangent2)); } // std::cout << "XY:" << std::endl; // for (std::size_t i = 0; i < XY.size()/2; ++i) // std::cout << " " << XY[2*i] << " " << XY[2*i+1] << std::endl; // compute the triangulation typedef std::vector<std::vector<TIndexType> > connectivity_t; connectivity_t Connectivities; #if defined(USE_CGAL_FOR_TRIANGULATION) typedef CGAL::Exact_predicates_inexact_constructions_kernel Kernel; typedef CGAL::Triangulation_vertex_base_with_info_2<unsigned int, Kernel> Vb; typedef CGAL::Triangulation_data_structure_2<Vb> Tds; typedef CGAL::Delaunay_triangulation_2<Kernel, Tds> Delaunay; typedef Kernel::Point_2 Point2; std::vector< std::pair<Point2, unsigned int> > clipped_points; for(std::size_t i = 0; i < XY.size() / 2; ++i) { clipped_points.push_back( std::make_pair( Point2(XY[2*i], XY[2*i+1]), i ) ); } Delaunay triangulation; triangulation.insert(clipped_points.begin(), clipped_points.end()); for(Delaunay::Finite_faces_iterator fit = triangulation.finite_faces_begin(); fit != triangulation.finite_faces_end(); ++fit) { Delaunay::Face_handle face = fit; std::vector<unsigned int> con(3); con[0] = face->vertex(0)->info(); con[1] = face->vertex(1)->info(); con[2] = face->vertex(2)->info(); Connectivities.push_back(con); } #elif defined(USE_TRIANGULATION_UTILS_FOR_TRIANGULATION) TriangulationUtils tri_util; tri_util.ComputeDelaunayTriangulation(XY, Connectivities); #else // REMARK: a tool to perform triangulation is not defined. You must define it. KRATOS_THROW_ERROR(std::logic_error, "A triangulation method must be specialized", "") #endif return Connectivities; } /// Generate the triangulation for a list of points in 3D /// The triangulation will be performed on the physical points with the information {cemter, normal, t1, t2} /// The refinement is performed on the local points instead. template<typename TCoordinatesType, typename TVectorType, typename TIndexType> static std::pair<std::vector<TCoordinatesType>, std::vector<std::vector<TIndexType> > > GenerateTriangleGrid(const std::vector<TCoordinatesType>& physical_points, const TVectorType& rCenter, const TVectorType& rNormal, const TVectorType& rTangent1, const TVectorType& rTangent2, const std::vector<TCoordinatesType>& local_points, const TIndexType& offset, const std::size_t& nrefine) { // compute the triangulation typedef std::vector<std::vector<TIndexType> > connectivity_t; connectivity_t Connectivities = GenerateTriangleGrid<TCoordinatesType, TVectorType, TIndexType>(physical_points, rCenter, rNormal, rTangent1, rTangent2); // refine if needed std::vector<TCoordinatesType> new_points = local_points; for (std::size_t i = 0; i < nrefine; ++i) RefineTriangleGrid<TIndexType, TCoordinatesType>(new_points, Connectivities); // offset the connectivity for (std::size_t i = 0; i < Connectivities.size(); ++i) for (std::size_t j = 0; j < Connectivities[i].size(); ++j) Connectivities[i][j] += offset; return std::make_pair(new_points, Connectivities); } /// Generate the quadrilateral grid. The 4 corner points are denoted as /// 4---3 /// | | /// 1---2 template<typename TCoordinatesType, typename TIndexType> static std::pair<std::vector<TCoordinatesType>, std::vector<std::vector<TIndexType> > > GenerateQuadGrid(const TCoordinatesType& p1, const TCoordinatesType& p2, const TCoordinatesType& p3, const TCoordinatesType& p4, const TIndexType& starting_node_id, const std::size_t& num_div_1, const std::size_t& num_div_2) { TCoordinatesType p, pm, pn; std::vector<TCoordinatesType> points; std::vector<std::vector<TIndexType> > connectivities; double xi, eta; std::size_t i, j; for (i = 0; i <= num_div_1; ++i) { xi = ((double) i) / num_div_1; pm = p1 + xi*(p2 - p1); pn = p4 + xi*(p3 - p4); for (j = 0; j <= num_div_2; ++j) { eta = ((double) j) / num_div_2; p = pm + eta*(pn - pm); points.push_back(p); } } TIndexType n1, n2, n3, n4; for (i = 0; i < num_div_1; ++i) { for(j = 0; j < num_div_2; ++j) { n1 = starting_node_id + i * (num_div_2 + 1) + j; n2 = starting_node_id + i * (num_div_2 + 1) + j + 1; n3 = starting_node_id + (i + 1) * (num_div_2 + 1) + j; n4 = starting_node_id + (i + 1) * (num_div_2 + 1) + j + 1; connectivities.push_back(std::vector<std::size_t>{n1, n2, n4, n3}); } } return std::make_pair(points, connectivities); } /// Generate a single box. The 8 corner points are denoted as /// 4---3 8---7 /// | | --> | | /// 1---2 5---6 template<typename TCoordinatesType, typename TValueType> static void GenerateBox(std::vector<TCoordinatesType>& points, const TValueType& xmin, const TValueType& xmax, const TValueType& ymin, const TValueType& ymax, const TValueType& zmin, const TValueType& zmax) { points[0][0] = xmin; points[0][1] = ymin; points[0][2] = zmin; points[1][0] = xmax; points[1][1] = ymin; points[1][2] = zmin; points[2][0] = xmax; points[2][1] = ymax; points[2][2] = zmin; points[3][0] = xmin; points[3][1] = ymax; points[3][2] = zmin; points[4][0] = xmin; points[4][1] = ymin; points[4][2] = zmax; points[5][0] = xmax; points[5][1] = ymin; points[5][2] = zmax; points[6][0] = xmax; points[6][1] = ymax; points[6][2] = zmax; points[7][0] = xmin; points[7][1] = ymax; points[7][2] = zmax; } /// Generate the hexahedral grid. The 8 corner points are denoted as /// 4---3 8---7 /// | | --> | | /// 1---2 5---6 template<typename TCoordinatesType, typename TIndexType> static std::pair<std::vector<TCoordinatesType>, std::vector<std::vector<TIndexType> > > GenerateHexGrid(const TCoordinatesType& p1, const TCoordinatesType& p2, const TCoordinatesType& p3, const TCoordinatesType& p4, const TCoordinatesType& p5, const TCoordinatesType& p6, const TCoordinatesType& p7, const TCoordinatesType& p8, const TIndexType& starting_node_id, const std::size_t& num_div_1, const std::size_t& num_div_2, const std::size_t& num_div_3) { TCoordinatesType p, pm1, pn1, pm2, pn2, pq1, pq2; std::vector<TCoordinatesType> points; std::vector<std::vector<TIndexType> > connectivities; double xi, eta, zeta; std::size_t i, j, k; for (i = 0; i <= num_div_1; ++i) { xi = ((double) i) / num_div_1; pm1 = p1 + xi*(p2 - p1); pn1 = p4 + xi*(p3 - p4); pm2 = p5 + xi*(p6 - p5); pn2 = p8 + xi*(p7 - p8); for (j = 0; j <= num_div_2; ++j) { eta = ((double) j) / num_div_2; pq1 = pm1 + eta*(pn1 - pm1); pq2 = pm2 + eta*(pn2 - pm2); for (k = 0; k <= num_div_3; ++k) { zeta = ((double) k) / num_div_3; p = pq1 + zeta*(pq2-pq1); points.push_back(p); } } } // std::cout << "points:" << std::endl; // for (std::size_t i = 0; i < points.size(); ++i) // std::cout << " " << points[i] << std::endl; // std::cout << std::endl; TIndexType n1, n2, n3, n4, n5, n6, n7, n8; for (i = 0; i < num_div_1; ++i) { for (j = 0; j < num_div_2; ++j) { for (k = 0; k < num_div_3; ++k) { IndexType n1 = starting_node_id + (i * (num_div_2 + 1) + j) * (num_div_3 + 1) + k; IndexType n2 = starting_node_id + (i * (num_div_2 + 1) + j + 1) * (num_div_3 + 1) + k; IndexType n3 = starting_node_id + ((i + 1) * (num_div_2 + 1) + j) * (num_div_3 + 1) + k; IndexType n4 = starting_node_id + ((i + 1) * (num_div_2 + 1) + j + 1) * (num_div_3 + 1) + k; IndexType n5 = n1 + 1; IndexType n6 = n2 + 1; IndexType n7 = n3 + 1; IndexType n8 = n4 + 1; connectivities.push_back(std::vector<std::size_t>{n1, n2, n4, n3, n5, n6, n8, n7}); } } } // std::cout << "connectivities:" << std::endl; // for (std::size_t i = 0; i < connectivities.size(); ++i) // { // std::cout << " "; // for (std::size_t j = 0; j < connectivities[i].size(); ++j) // std::cout << " " << connectivities[i][j]; // std::cout << std::endl; // } // std::cout << std::endl; return std::make_pair(points, connectivities); } /// Refine a triangle grid by sub-divide a triangle into 4 sub-triangles. template<typename TIndexType = std::size_t, typename TCoordinatesType = std::vector<double>, typename TCoordinatesListType = std::vector<TCoordinatesType>, typename TConnectivityType = std::vector<TIndexType>, typename TConnectivityListType = std::vector<TConnectivityType> > static void RefineTriangleGrid(TCoordinatesListType& Points, TConnectivityListType& Connectivities) { std::size_t npoints = Points.size(); TIndexType last_id = static_cast<TIndexType>(npoints-1); // generate the new middle points typedef std::pair<TIndexType, TIndexType> key_t; std::map<key_t, TIndexType> map_corner_to_middle; key_t key1, key2; TIndexType n1, n2, n3; for (typename TConnectivityListType::iterator it = Connectivities.begin(); it != Connectivities.end(); ++it) { n1 = (*it)[0]; n2 = (*it)[1]; n3 = (*it)[2]; key1 = std::make_pair(n1, n2); key2 = std::make_pair(n2, n1); if (map_corner_to_middle.find(key1) == map_corner_to_middle.end()) { Points.push_back(0.5*(Points[n1] + Points[n2])); map_corner_to_middle[key1] = ++last_id; map_corner_to_middle[key2] = last_id; } key1 = std::make_pair(n2, n3); key2 = std::make_pair(n3, n2); if (map_corner_to_middle.find(key1) == map_corner_to_middle.end()) { Points.push_back(0.5*(Points[n2] + Points[n3])); map_corner_to_middle[key1] = ++last_id; map_corner_to_middle[key2] = last_id; } key1 = std::make_pair(n3, n1); key2 = std::make_pair(n1, n3); if (map_corner_to_middle.find(key1) == map_corner_to_middle.end()) { Points.push_back(0.5*(Points[n3] + Points[n1])); map_corner_to_middle[key1] = ++last_id; map_corner_to_middle[key2] = last_id; } } // generate new triangles TIndexType m1, m2, m3; TConnectivityListType Connectivities_old = Connectivities; Connectivities.clear(); for (typename TConnectivityListType::iterator it = Connectivities_old.begin(); it != Connectivities_old.end(); ++it) { n1 = (*it)[0]; n2 = (*it)[1]; n3 = (*it)[2]; m1 = map_corner_to_middle[std::make_pair(n1, n2)]; m2 = map_corner_to_middle[std::make_pair(n2, n3)]; m3 = map_corner_to_middle[std::make_pair(n3, n1)]; Connectivities.push_back(TConnectivityType{n1, m1, m3}); Connectivities.push_back(TConnectivityType{m1, n2, m2}); Connectivities.push_back(TConnectivityType{m1, m2, m3}); Connectivities.push_back(TConnectivityType{m2, n3, m3}); } } /// Find the entity of the same type in the list of entities template<class TEntityType, class TEntitiesContainerType> static TEntitiesContainerType FindEntities(TEntitiesContainerType& pEntities, TEntityType const& r_sample_entity) { TEntitiesContainerType pFoundEntities; for (typename TEntitiesContainerType::ptr_iterator it = pEntities.ptr_begin(); it != pEntities.ptr_end(); ++it) { if (typeid(*(*it)) == typeid(r_sample_entity)) if (typeid((*it)->GetGeometry()) == typeid(r_sample_entity.GetGeometry())) pFoundEntities.push_back(*it); } return pFoundEntities; } /// Create the entities based on the connectivities /// It is noted that the newly created entities are not added to the other model_part. User must do it manually. template<typename TConnectivityType, typename TEntityType, typename TEntitiesContainerType> static TEntitiesContainerType CreateEntities( const TConnectivityType& r_connectivities, ModelPart& r_model_part, TEntityType const& r_sample_entity, std::size_t& last_entity_id, Properties::Pointer pProperties, const std::string& NodeKey) { TEntitiesContainerType pNewEntities; typename TEntityType::NodesArrayType temp_entity_nodes; for (typename TConnectivityType::const_iterator it = r_connectivities.begin(); it != r_connectivities.end(); ++it) { temp_entity_nodes.clear(); for (typename TConnectivityType::value_type::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) temp_entity_nodes.push_back(*(FindKey(r_model_part.Nodes(), *it2, NodeKey).base())); typename TEntityType::Pointer pNewEntity = r_sample_entity.Create(++last_entity_id, temp_entity_nodes, pProperties); pNewEntities.push_back(pNewEntity); } return pNewEntities; } /// Create a list of entities (element/condition) from a model_part to another model_part /// It is noted that the newly created entities are not added to the other model_part. User must do it manually. template<class TEntityType, class TEntitiesContainerType> static TEntitiesContainerType CreateEntities( TEntitiesContainerType& pEntities, ModelPart& r_other_model_part, TEntityType const& r_sample_entity, std::size_t& last_entity_id, Properties::Pointer pProperties, const bool& retain_prop_id = false) { // first collect all the nodes from the elements std::map<std::size_t, NodeType::Pointer> pNodes; for (typename TEntitiesContainerType::ptr_iterator it = pEntities.ptr_begin(); it != pEntities.ptr_end(); ++it) { for (std::size_t i = 0; i < (*it)->GetGeometry().size(); ++i) { pNodes[(*it)->GetGeometry()[i].Id()] = (*it)->GetGeometry().pGetPoint(i); } } // create the new nodes in the other model_part std::size_t last_node_id = GetLastNodeId(r_other_model_part); std::map<std::size_t, std::size_t> MapOldToNew; for (std::map<std::size_t, NodeType::Pointer>::iterator it = pNodes.begin(); it != pNodes.end(); ++it) { const PointType& rPoint = it->second->GetInitialPosition(); NodeType::Pointer pNewNode = r_other_model_part.CreateNewNode(++last_node_id, rPoint[0], rPoint[1], rPoint[2]); MapOldToNew[it->second->Id()] = last_node_id; } // create new elements in the other model_part const std::string NodeKey = std::string("Node"); typename TEntityType::NodesArrayType temp_entity_nodes; TEntitiesContainerType pNewEntities; for (typename TEntitiesContainerType::ptr_iterator it = pEntities.ptr_begin(); it != pEntities.ptr_end(); ++it) { temp_entity_nodes.clear(); for (std::size_t i = 0; i < (*it)->GetGeometry().size(); ++i) { std::size_t node_id = MapOldToNew[(*it)->GetGeometry()[i].Id()]; temp_entity_nodes.push_back(*(FindKey(r_other_model_part.Nodes(), node_id, NodeKey).base())); } if (!retain_prop_id) { pNewEntities.push_back(r_sample_entity.Create(++last_entity_id, temp_entity_nodes, pProperties)); } else { Properties::Pointer pNewProperties = r_other_model_part.pGetProperties((*it)->GetProperties().Id()); pNewEntities.push_back(r_sample_entity.Create(++last_entity_id, temp_entity_nodes, pNewProperties)); } } return pNewEntities; } // /// Create conditions/elements from the list of points. // /// The triangulation will be performed on the physical points with the information {cemter, normal, t1, t2} // /// The refinement is performed on the local points instead. // /// The point list will be triangulated before the conditions are created. // /// It is noted that the newly created entities are not added to the other model_part. User must do it manually. Nevertheless, the nodes are added to the model_part. // /// The new node will be created from last_node_id+1 // template<typename TPointType, typename TVectorType, class TEntityType, class TPointsContainerType, class TNodesContainerType, class TEntitiesContainerType> // static std::tuple<TPointsContainerType, TNodesContainerType, TEntitiesContainerType> CreateEntities( // const TPointsContainerType& physical_points, // const TVectorType& rCenter, // const TVectorType& rNormal, // const TVectorType& rTangent1, // const TVectorType& rTangent2, // const TPointsContainerType& local_points, // const std::size_t& nrefine, // ModelPart& r_model_part, // TEntityType const& r_sample_entity, // std::size_t& last_node_id, // std::size_t& last_entity_id, // Properties::Pointer pProperties) // { // // compute the triangulation // typedef unsigned int IndexType; // typedef std::vector<std::vector<IndexType> > connectivity_t; // connectivity_t Connectivities = GenerateTriangleGrid<TPointType, TVectorType, IndexType>(physical_points, rCenter, rNormal, rTangent1, rTangent2); // // refine if needed // TPointsContainerType new_points = local_points; // for (std::size_t i = 0; i < nrefine; ++i) // RefineTriangleGrid<unsigned int, TPointType>(new_points, Connectivities); // // offset the connectivity // for (std::size_t i = 0; i < Connectivities.size(); ++i) // for (std::size_t j = 0; j < Connectivities[i].size(); ++j) // Connectivities[i][j] += last_node_id+1; // // std::cout << "Connectivities:" << std::endl; // // for (std::size_t i = 0; i < Connectivities.size(); ++i) // // { // // std::cout << " " << i << ":"; // // for (std::size_t j = 0; j < Connectivities[i].size(); ++j) // // std::cout << " " << Connectivities[i][j]; // // std::cout << std::endl; // // } // // create the nodes // std::vector<std::size_t> map_con_to_mp(new_points.size()); // TNodesContainerType pNewNodes; // for (std::size_t i = 0; i < new_points.size(); ++i) // { // NodeType::Pointer pNewNode = r_model_part.CreateNewNode(++last_node_id, new_points[i][0], new_points[i][1], new_points[i][2]); // map_con_to_mp[i] = pNewNode->Id(); // pNewNodes.push_back(pNewNode); // } // // create the entities based on connectivity // const std::string NodeKey = std::string("Node"); // TEntitiesContainerType pNewEntities = CreateEntities<connectivity_t, TEntityType, TEntitiesContainerType>(Connectivities, r_model_part, // r_sample_entity, last_entity_id, pProperties, NodeKey); // return std::make_tuple(new_points, pNewNodes, pNewEntities); // } //**********AUXILIARY FUNCTION************************************************************** // Construct the matrix structure for high performance assembling // This subroutine shall only be used to construct the matrix structure for L2 projection // using in post-processing //****************************************************************************************** template<typename TElementType, typename TCompressedMatrixType, typename TElementsArrayType> static void ConstructL2MatrixStructure ( TCompressedMatrixType& A, TElementsArrayType& rElements, std::map<std::size_t, std::size_t> MapNodeIdToVec) { std::size_t equation_size = A.size1(); std::vector<std::vector<std::size_t> > indices(equation_size); typename TElementType::EquationIdVectorType ids; for(typename TElementsArrayType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; ++i_element) { ids.resize((i_element)->GetGeometry().size()); for(unsigned int i = 0; i < (i_element)->GetGeometry().size(); ++i) ids[i] = MapNodeIdToVec[(i_element)->GetGeometry()[i].Id()]; for(std::size_t i = 0 ; i < ids.size() ; ++i) { if(ids[i] < equation_size) { std::vector<std::size_t>& row_indices = indices[ids[i]]; for(std::size_t j = 0 ; j < ids.size() ; ++j) { if(ids[j] < equation_size) AddUnique(row_indices, ids[j]); } } } } //allocating the memory needed int data_size = 0; for(std::size_t i = 0 ; i < indices.size() ; ++i) { data_size += indices[i].size(); } A.reserve(data_size, false); //filling with zero the matrix (creating the structure) #ifndef _OPENMP for(std::size_t i = 0 ; i < indices.size() ; ++i) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end() ; ++it) { A.push_back(i, *it, 0.00); } row_indices.clear(); } #else int number_of_threads = omp_get_max_threads(); std::vector<unsigned int> matrix_partition; OpenMPUtils::CreatePartition(number_of_threads, indices.size(), matrix_partition); for( int k=0; k < number_of_threads; ++k ) { #pragma omp parallel if( omp_get_thread_num() == k ) { for( std::size_t i = matrix_partition[k]; i < matrix_partition[k+1]; i++ ) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end() ; ++it) { A.push_back(i, *it, 0.00); } row_indices.clear(); } } } #endif } //**********AUXILIARY FUNCTION************************************************************** // Construct the matrix structure for high performance assembling // This subroutine shall only be used to construct the matrix structure for L2 projection // using in post-processing //****************************************************************************************** template<typename TElementType, typename TCompressedMatrixType, typename TElementsArrayType> static void ConstructL2MatrixStructure ( TCompressedMatrixType& A, TElementsArrayType& rElements) { std::size_t equation_size = A.size1(); std::vector<std::vector<std::size_t> > indices(equation_size); typename TElementType::EquationIdVectorType ids; for(typename TElementsArrayType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; ++i_element) { ids.resize((i_element)->GetGeometry().size()); for(unsigned int i = 0; i < (i_element)->GetGeometry().size(); ++i) ids[i] = (i_element)->GetGeometry()[i].Id() - 1; for(std::size_t i = 0 ; i < ids.size() ; ++i) { if(ids[i] < equation_size) { std::vector<std::size_t>& row_indices = indices[ids[i]]; for(std::size_t j = 0 ; j < ids.size() ; ++j) { if(ids[j] < equation_size) AddUnique(row_indices, ids[j]); } } } } //allocating the memory needed int data_size = 0; for(std::size_t i = 0 ; i < indices.size() ; ++i) { data_size += indices[i].size(); } A.reserve(data_size, false); //filling with zero the matrix (creating the structure) #ifndef _OPENMP for(std::size_t i = 0 ; i < indices.size() ; i++) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++) { A.push_back(i, *it, 0.00); } row_indices.clear(); } #else int number_of_threads = omp_get_max_threads(); std::vector<unsigned int> matrix_partition; OpenMPUtils::CreatePartition(number_of_threads, indices.size(), matrix_partition); for( int k=0; k < number_of_threads; ++k ) { #pragma omp parallel if( omp_get_thread_num() == k ) { for( std::size_t i = matrix_partition[k]; i < matrix_partition[k+1]; i++ ) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++) { A.push_back(i, *it, 0.00); } row_indices.clear(); } } } #endif } //**********AUXILIARY FUNCTION************************************************************** // Support function for ConstructMatrixStructure //****************************************************************************************** static inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while ( i != endit && (*i) != candidate) { ++i; } if( i == endit ) { v.push_back(candidate); } } //**********AUXILIARY FUNCTION************************************************************** //****************************************************************************************** static inline double CoordinateScaling(const double& x, const int& Type) { if(Type == _NURBS_) { return x; } else if(Type == _BEZIER_) { return 2 * x - 1; } else return 0.0; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "IsogeometricPostUtility"; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "IsogeometricPostUtility"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. IsogeometricPostUtility& operator=(IsogeometricPostUtility const& rOther) { return *this; } /// Copy constructor. IsogeometricPostUtility(IsogeometricPostUtility const& rOther) { } ///@} }; // Class IsogeometricPostUtility ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >>(std::istream& rIStream, IsogeometricPostUtility& rThis) { return rIStream; } /// output stream function inline std::ostream& operator <<(std::ostream& rOStream, const IsogeometricPostUtility& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block }// namespace Kratos. #endif // KRATOS_ISOGEOMETRIC_POST_UTILITY_H_INCLUDED
valid.mob8.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_512_7_7_512_3_3.h" #include "gen_ukr_A1B2gemm_1_512_7_7_512_3_3.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 7; int Ny = 7; int Nh = 3; long long Astrides[6] = {0,2,4,6,8,10}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } #pragma omp barrier// begin push button generated block for(int c5=0;c5<512+0;c5+=512) { for(int xy5=0;xy5<49+0;xy5+=49) { for(int f5=0;f5<512+0;f5+=512) { for(int c4=c5;c4<min(512, 512+c5);c4+=512) { for(int xy4=xy5;xy4<min(49, 49+xy5);xy4+=49) { for(int f4=f5;f4<min(512, 512+f5);f4+=512) { for(int c3=c4;c3<min(512, 512+c4);c3+=Tc1) { for(int f3=f4;f3<min(512, 512+f4);f3+=Tf2) { for(int xy3=xy4;xy3<min(49, 49+xy4);xy3+=Txy3) { for(int xy2=xy3;xy2<min(49, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(512, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(512, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(512, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(49, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(512, 16+f2);f1+=16) { int ctile=min(Tc1, 512-c1); int x1=xy1/7; int y1=xy1%7/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*131072+c1_1*256+2*x1*16+2*y1*1+c1_2*1; int offsetB=0+kf1_1*73728+c1*144+0*48+0*16+kf1_2*1; int offsetC=0+b1*25088+of1_1*49+x1*7+y1*1+of1_2*1; if(7-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(7*7-xy1>=6){ for(int sti=7-y1;sti<6;sti+=1) { Astrides[sti]+=18; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=7-y1;sti<6;sti+=1) { Astrides[sti]-=18; } } else{ cnn_ukr_float_scatter_1x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
mpm_search_element_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ \. // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Bodhinanda Chandra // #ifndef KRATOS_MPM_SEARCH_ELEMENT_UTILITY #define KRATOS_MPM_SEARCH_ELEMENT_UTILITY // System includes // External includes // Project includes #include "includes/define.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/quadrature_points_utility.h" #include "particle_mechanics_application_variables.h" #include "geometries/geometry.h" #include "includes/model_part.h" #include "pqmpm_partition_utilities.h" namespace Kratos { namespace MPMSearchElementUtility { // Standard types typedef std::size_t IndexType; typedef std::size_t SizeType; typedef Node<3> NodeType; typedef typename ModelPart::GeometryType GeometryType; inline double CrossProductDet2D(array_1d<double, 3> VectorA, array_1d<double, 3> VectorB) { return (VectorA[0] * VectorB[1] - VectorB[0] * VectorA[1]); } inline bool CheckIsInside(const GeometryType& rGeom, array_1d<double, 3>& LocalCoords, const array_1d<double, 3>& Coords, const double Tolerance, const bool IsCalcLocalCoords = true) { // TODO some optimisation for simple 2D shapes. return rGeom.IsInside(Coords, LocalCoords, Tolerance); } inline void ConstructNeighbourRelations(GeometryType& rGeom, const ModelPart& rBackgroundGridModelPart) { std::vector<typename Geometry<Node<3>>::Pointer> geometry_neighbours; for (IndexType j = 0; j < rBackgroundGridModelPart.NumberOfElements(); j++) { auto p_geometry_neighbour = (rBackgroundGridModelPart.ElementsBegin() + j)->pGetGeometry(); if (p_geometry_neighbour->Id() != rGeom.Id()) // dont add the parent as its own neighbour { for (IndexType n = 0; n < p_geometry_neighbour->size(); n++) { for (IndexType k = 0; k < rGeom.size(); k++) { if (rGeom[k].Id() == (*p_geometry_neighbour)[n].Id()) { // Prevent duplicate additions bool add_entry = true; for (size_t i = 0; i < geometry_neighbours.size(); i++) { if (geometry_neighbours[i]->Id() == p_geometry_neighbour->Id()) { add_entry = false; break; } } if (add_entry) { geometry_neighbours.push_back(p_geometry_neighbour); } break; } } } } } #pragma omp critical rGeom.SetValue(GEOMETRY_NEIGHBOURS, geometry_neighbours); } inline bool IsExplicitAndNeedsCorrection(GeometryType::Pointer pQuadraturePoint, const ProcessInfo& rProcessInfo) { if (rProcessInfo.Has(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) { if (rProcessInfo.GetValue(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) { if (pQuadraturePoint->IntegrationPointsNumber() == 1) { for (size_t i = 0; i < pQuadraturePoint->ShapeFunctionsValues().size2(); ++i) { if (pQuadraturePoint->ShapeFunctionsValues()(0, i) < std::numeric_limits<double>::epsilon()) return true; } } } } return false; } inline GeometryType& FindGridGeom(GeometryType& rParentGeom, const ModelPart& rBackgroundGridModelPart, const double Tolerance, const array_1d<double, 3>& xg, array_1d<double, 3>& rLocalCoords, const ProcessInfo& rProcessInfo, bool& IsFound) { IsFound = false; if (CheckIsInside(rParentGeom, rLocalCoords, xg, Tolerance)) { IsFound = true; return rParentGeom; } else { if (!rParentGeom.Has(GEOMETRY_NEIGHBOURS)) ConstructNeighbourRelations(rParentGeom, rBackgroundGridModelPart); auto& geometry_neighbours = rParentGeom.GetValue(GEOMETRY_NEIGHBOURS); for (IndexType k = 0; k < geometry_neighbours.size(); ++k) { if (CheckIsInside(*geometry_neighbours[k], rLocalCoords, xg, Tolerance)) { IsFound = true; return *(geometry_neighbours[k].get()); } } } return rParentGeom; } inline void UpdatePartitionedQuadraturePoint(const ModelPart& rBackgroundGridModelPart, const array_1d<double, 3>& rCoordinates, Element& rMasterMaterialPoint, typename GeometryType::Pointer pQuadraturePointGeometry, const double Tolerance) { KRATOS_TRY; array_1d<double, 3> local_coords; pQuadraturePointGeometry->IsInside(rCoordinates, local_coords, Tolerance); PQMPMPartitionUtilities::PartitionMasterMaterialPointsIntoSubPoints(rBackgroundGridModelPart, rCoordinates, local_coords, rMasterMaterialPoint, pQuadraturePointGeometry, Tolerance); KRATOS_CATCH(""); } inline void NeighbourSearchElements(const ModelPart& rMPMModelPart, const ModelPart& rBackgroundGridModelPart, std::vector<typename Element::Pointer>& rMissingElements, const double Tolerance) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rMPMModelPart.Elements().size()); ++i) { auto element_itr = (rMPMModelPart.ElementsBegin() + i); array_1d<double, 3> local_coordinates; bool is_found = false; std::vector<array_1d<double, 3>> xg; element_itr->CalculateOnIntegrationPoints(MP_COORD, xg, rBackgroundGridModelPart.GetProcessInfo()); GeometryType& r_found_geom = FindGridGeom(element_itr->GetGeometry().GetGeometryParent(0), rBackgroundGridModelPart, Tolerance, xg[0], local_coordinates, rMPMModelPart.GetProcessInfo(), is_found); if (is_found) { const bool is_pqmpm = (rBackgroundGridModelPart.GetProcessInfo().Has(IS_PQMPM)) ? rBackgroundGridModelPart.GetProcessInfo().GetValue(IS_PQMPM) : false; if (is_pqmpm) { // Updates the quadrature point geometry. (*element_itr).GetGeometry().SetGeometryParent(&r_found_geom); PQMPMPartitionUtilities::PartitionMasterMaterialPointsIntoSubPoints(rBackgroundGridModelPart, xg[0], local_coordinates, *element_itr, element_itr->pGetGeometry(), Tolerance); } else { CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( element_itr->pGetGeometry(), local_coordinates, element_itr->GetGeometry().IntegrationPoints()[0].Weight(), r_found_geom); } if (IsExplicitAndNeedsCorrection(element_itr->pGetGeometry(), rBackgroundGridModelPart.GetProcessInfo())) is_found = false; else { for (IndexType j = 0; j < r_found_geom.PointsNumber(); ++j) r_found_geom.Points()[j].Set(ACTIVE); } } if(!is_found) { #pragma omp critical rMissingElements.push_back(&*element_itr); } } } // inline void NeighbourSearchConditions(const ModelPart& rMPMModelPart, const ModelPart& rBackgroundGridModelPart, std::vector<typename Condition::Pointer>& rMissingConditions, const double Tolerance) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rMPMModelPart.Conditions().size()); ++i) { auto condition_itr = rMPMModelPart.Conditions().begin() + i; std::vector<array_1d<double, 3>> xg; condition_itr->CalculateOnIntegrationPoints(MPC_COORD, xg, rMPMModelPart.GetProcessInfo()); if (xg.size() > 0 && condition_itr->Is(BOUNDARY)) { array_1d<double, 3> local_coordinates; bool is_found = false; GeometryType& r_found_geom = FindGridGeom(condition_itr->GetGeometry().GetGeometryParent(0), rBackgroundGridModelPart, Tolerance, xg[0], local_coordinates, rMPMModelPart.GetProcessInfo(), is_found); if (is_found) { CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( condition_itr->pGetGeometry(), local_coordinates, condition_itr->GetGeometry().IntegrationPoints()[0].Weight(), r_found_geom); for (IndexType j = 0; j < r_found_geom.PointsNumber(); ++j) r_found_geom[j].Set(ACTIVE); } else { #pragma omp critical rMissingConditions.push_back(&*condition_itr); } } } } inline bool IsFixExplicitAndOnElementEdge(const Vector& N, const ProcessInfo& rProcessInfo) { if (rProcessInfo.Has(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) { if (rProcessInfo.GetValue(IS_FIX_EXPLICIT_MP_ON_GRID_EDGE)) { // check if MP is exactly on the edge of the element, this gives spurious strains in explicit for (SizeType i = 0; i < N.size(); ++i) { if (std::abs(N[i]) < std::numeric_limits<double>::epsilon()) { return true; } } } } return false; } template <std::size_t TDimension> void BinBasedSearchElementsAndConditions(ModelPart& rMPMModelPart, ModelPart& rBackgroundGridModelPart, std::vector<typename Element::Pointer>& rMissingElements, std::vector<typename Condition::Pointer>& rMissingConditions, const std::size_t MaxNumberOfResults, const double Tolerance) { const ProcessInfo& r_process_info = rBackgroundGridModelPart.GetProcessInfo(); bool is_pqmpm = (r_process_info.Has(IS_PQMPM)) ? r_process_info.GetValue(IS_PQMPM) : false; // Search background grid and make element active Vector N; const int max_result = 1000; #pragma omp parallel { BinBasedFastPointLocator<TDimension> SearchStructure(rBackgroundGridModelPart); SearchStructure.UpdateSearchDatabase(); typename BinBasedFastPointLocator<TDimension>::ResultContainerType results(max_result); // Element search and assign background grid #pragma omp for for (int i = 0; i < static_cast<int>(rMissingElements.size()); ++i) { auto element_itr = *(rMissingElements.begin() + i); std::vector<array_1d<double, 3>> xg; element_itr->CalculateOnIntegrationPoints(MP_COORD, xg, rMPMModelPart.GetProcessInfo()); typename BinBasedFastPointLocator<TDimension>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelem; // FindPointOnMesh find the background element in which a given point falls and the relative shape functions bool is_found = SearchStructure.FindPointOnMesh(xg[0], N, pelem, result_begin, MaxNumberOfResults, Tolerance); if (is_found == true) { if (IsFixExplicitAndOnElementEdge(N, r_process_info) && !is_pqmpm) { // MP is exactly on the edge. Now we give it a little 'nudge' array_1d<double, 3> xg_nudged = array_1d<double, 3>(xg[0]); std::vector<array_1d<double, 3>> mp_vel; element_itr->CalculateOnIntegrationPoints(MP_VELOCITY, mp_vel, rMPMModelPart.GetProcessInfo()); xg_nudged += r_process_info[DELTA_TIME] / 1000.0 * mp_vel[0]; if (SearchStructure.FindPointOnMesh(xg_nudged, N, pelem, result_begin, MaxNumberOfResults, Tolerance)) { element_itr->SetValuesOnIntegrationPoints(MP_COORD, { xg_nudged }, rMPMModelPart.GetProcessInfo()); KRATOS_INFO("MPMSearchElementUtility") << "WARNING: To prevent spurious explicit stresses, Material Point " << element_itr->Id() << " was nudged." << std::endl; } else { is_found = SearchStructure.FindPointOnMesh(xg[0], N, pelem, result_begin, MaxNumberOfResults, Tolerance); KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Material Point " << element_itr->Id() << " lies exactly on an element edge and may give spurious results." << std::endl; } } pelem->Set(ACTIVE); const bool is_pqmpm = (rBackgroundGridModelPart.GetProcessInfo().Has(IS_PQMPM)) ? rBackgroundGridModelPart.GetProcessInfo().GetValue(IS_PQMPM) : false; if (is_pqmpm) { // Updates the quadrature point geometry. (*element_itr).GetGeometry().SetGeometryParent((pelem->pGetGeometry().get())); UpdatePartitionedQuadraturePoint(rBackgroundGridModelPart, xg[0], *element_itr, pelem->pGetGeometry(), Tolerance); } else { auto p_quadrature_point_geometry = element_itr->pGetGeometry(); array_1d<double, 3> local_coordinates; p_quadrature_point_geometry->PointLocalCoordinates(local_coordinates, xg[0]); CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( p_quadrature_point_geometry, local_coordinates, p_quadrature_point_geometry->IntegrationPoints()[0].Weight(), pelem->GetGeometry()); } auto& r_geometry = element_itr->GetGeometry(); for (IndexType j = 0; j < r_geometry.PointsNumber(); ++j) r_geometry[j].Set(ACTIVE); } else { KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Search Element for Material Point: " << element_itr->Id() << " is failed. Geometry is cleared." << std::endl; element_itr->GetGeometry().clear(); element_itr->Reset(ACTIVE); element_itr->Set(TO_ERASE); } } // Condition search and assign background grid #pragma omp for for (int i = 0; i < static_cast<int>(rMissingConditions.size()); ++i) { auto condition_itr = *(rMissingConditions.begin() + i); std::vector<array_1d<double, 3>> xg; condition_itr->CalculateOnIntegrationPoints(MPC_COORD, xg, rMPMModelPart.GetProcessInfo()); if (xg.size() > 0) { // Only search for particle based BCs! // Grid BCs are still applied on MP_model_part but we don't want to search for them. typename BinBasedFastPointLocator<TDimension>::ResultIteratorType result_begin = results.begin(); Element::Pointer pelem; // FindPointOnMesh find the background element in which a given point falls and the relative shape functions bool is_found = SearchStructure.FindPointOnMesh(xg[0], N, pelem, result_begin, MaxNumberOfResults, Tolerance); if (is_found == true) { auto p_quadrature_point_geometry = condition_itr->pGetGeometry(); array_1d<double, 3> local_coordinates; p_quadrature_point_geometry->PointLocalCoordinates(local_coordinates, xg[0]); CreateQuadraturePointsUtility<Node<3>>::UpdateFromLocalCoordinates( p_quadrature_point_geometry, local_coordinates, p_quadrature_point_geometry->IntegrationPoints()[0].Weight(), pelem->GetGeometry()); auto& r_geometry = condition_itr->GetGeometry(); for (IndexType j = 0; j < r_geometry.PointsNumber(); ++j) r_geometry[j].Set(ACTIVE); } else { KRATOS_INFO("MPMSearchElementUtility") << "WARNING: Search Element for Material Point Condition: " << condition_itr->Id() << " is failed. Geometry is cleared." << std::endl; condition_itr->GetGeometry().clear(); condition_itr->Reset(ACTIVE); condition_itr->Set(TO_ERASE); } } } } } inline void ResetElementsAndNodes(ModelPart& rBackgroundGridModelPart) { #pragma omp parallel for for (int i = 0; i < static_cast<int>(rBackgroundGridModelPart.Elements().size()); ++i) { auto element_itr = rBackgroundGridModelPart.Elements().begin() + i; auto& r_geometry = element_itr->GetGeometry(); element_itr->Reset(ACTIVE); for (IndexType j = 0; j < r_geometry.PointsNumber(); ++j) r_geometry[j].Reset(ACTIVE); } } /** * @brief Search element connectivity for each particle * @details A search is performed to know in which grid element the material point falls. * If one or more material points fall in the grid element, the grid element is * set to be active and its connectivity is associated to the material point * element. * STEPS: * 1) All the elements are set to be INACTIVE * 2) A searching is performed and the grid elements which contain at least a MP are set to be ACTIVE * */ template<std::size_t TDimension> void SearchElement(ModelPart& rBackgroundGridModelPart, ModelPart& rMPMModelPart, const std::size_t MaxNumberOfResults, const double Tolerance) { ResetElementsAndNodes(rBackgroundGridModelPart); std::vector<typename Element::Pointer> missing_elements; std::vector<typename Condition::Pointer> missing_conditions; NeighbourSearchElements(rMPMModelPart, rBackgroundGridModelPart, missing_elements, Tolerance); NeighbourSearchConditions(rMPMModelPart, rBackgroundGridModelPart, missing_conditions, Tolerance); if (missing_conditions.size() > 0 || missing_elements.size() > 0) BinBasedSearchElementsAndConditions<TDimension>(rMPMModelPart, rBackgroundGridModelPart, missing_elements, missing_conditions, MaxNumberOfResults, Tolerance); } } // end namespace MPMSearchElementUtility } // end namespace Kratos #endif // KRATOS_MPM_SEARCH_ELEMENT_UTILITY
softplus_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: qtang@openailab.com */ #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "module/module.h" #include "operator/op.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include "utility/float.h" #include "utility/sys_port.h" #include "utility/log.h" #include <math.h> int ref_softplus_fp32(struct tensor* input_tensor, struct tensor* output_tensor, int num_thread) { int w = input_tensor->dims[3]; int h = output_tensor->dims[2]; int channels = input_tensor->dims[1]; int size = h * w; int c_step = h * w; float* input_data = input_tensor->data; float* out_data = output_tensor->data; #pragma omp parallel for num_threads(num_thread) for (int q = 0; q < channels; q++) { float* src = input_data + c_step * q; float* dst = out_data + c_step * q; for (int i = 0; i < size; i++) { dst[i] = log(exp(src[i]) + 1.0f); } } return 0; } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* ir_node = exec_node->ir_node; struct graph* ir_graph = ir_node->graph; struct tensor* input_tensor; struct tensor* output_tensor; input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); int ret = -1; if(input_tensor->data_type == TENGINE_DT_FP32) ret = ref_softplus_fp32(input_tensor, output_tensor, exec_graph->num_thread); else printf("Input data type %d not to be supported.\n", input_tensor->data_type); return ret; } static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct node* node = exec_node->ir_node; struct graph* ir_graph = node->graph; struct tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]); struct tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]); int ret = set_ir_tensor_shape(output, input->dims, input->dim_num); return ret; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node) { return OPS_SCORE_CANDO; } static struct node_ops hcl_node_ops = { .prerun = NULL, .run = run, .reshape = reshape, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score }; int register_softplus_ref_op(void* arg) { return register_builtin_node_ops(OP_SOFTPLUS, &hcl_node_ops); } int unregister_softplus_ref_op(void* arg) { return unregister_builtin_node_ops(OP_SOFTPLUS, &hcl_node_ops); }
distribute_parallel_for_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp distribute parallel for simd for (int i = 0; i < 10; ++i) argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute parallel for simd'}} #pragma omp distribute parallel for simd foo void test_no_clause() { int i; #pragma omp distribute parallel for simd for (i = 0; i < 16; ++i) ; // expected-error@+2 {{statement after '#pragma omp distribute parallel for simd' must be a for loop}} #pragma omp distribute parallel for simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd; for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd firstprivate(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} #pragma omp distribute parallel for simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd safelen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd safelen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd safelen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd safelen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd safelen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd safelen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd simdlen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd simdlen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd simdlen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd simdlen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd simdlen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute parallel for simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute parallel for simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd collapse for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute parallel for simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute parallel for simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute parallel for simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute parallel for simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute parallel for simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute parallel for simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd collapse(2) for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp distribute parallel for simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd linear(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd linear(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd linear(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute parallel for simd linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute parallel for simd linear(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute parallel for simd linear(x, y, z) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned( for (i = 0; i < 16; ++i) ; // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd aligned(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd aligned(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp distribute parallel for simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute parallel for simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute parallel for simd private( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd private(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd private() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_lastprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_firstprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute parallel for simd firstprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute parallel for simd firstprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; // expected-error@+3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 {{defined as lastprivate}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x) firstprivate(x) for (i = 0; i < 16; ++i) ; // expected-error@+3 2 {{lastprivate variable cannot be firstprivate}} expected-note@+3 2 {{defined as lastprivate}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y) firstprivate(x, y) for (i = 0; i < 16; ++i) ; // expected-error@+3 3 {{lastprivate variable cannot be firstprivate}} expected-note@+3 3 {{defined as lastprivate}} #pragma omp target #pragma omp teams #pragma omp distribute parallel for simd lastprivate(x, y, z) firstprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute parallel for simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute parallel for simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } }
GB_unop__sin_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__sin_fc64_fc64 // op(A') function: GB_unop_tran__sin_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = csin (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = csin (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = csin (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SIN || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__sin_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csin (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csin (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__sin_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
time_omp_task_spawn.c
#ifdef HAVE_CONFIG_H # include "config.h" /* for _GNU_SOURCE */ #endif #include <assert.h> #include <stdio.h> #include <omp.h> #include <qthread/qthread.h> #include <qthread/qtimer.h> #include "argparsing.h" static aligned_t null_task(void *args_) { return 0; } int main(int argc, char *argv[]) { uint64_t count = 1048576; int par_fork = 0; unsigned long threads = 1; qtimer_t timer; double total_time = 0.0; CHECK_VERBOSE(); NUMARG(count, "MT_COUNT"); NUMARG(par_fork, "MT_PAR_FORK"); assert(0 != count); #pragma omp parallel #pragma omp single { timer = qtimer_create(); threads = omp_get_num_threads(); if (par_fork) { qtimer_start(timer); #pragma omp parallel for for (uint64_t i = 0; i < count; i++) { #pragma omp task untied null_task(NULL); } } else { qtimer_start(timer); #pragma omp task untied for (uint64_t i = 0; i < count; i++) { #pragma omp task untied null_task(NULL); } } #pragma omp taskwait qtimer_stop(timer); } total_time = qtimer_secs(timer); qtimer_destroy(timer); printf("%lu %lu %f\n", threads, (unsigned long)count, total_time); return 0; } /* vim:set expandtab */
GB_unop__identity_uint64_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_fp32) // op(A') function: GB (_unop_tran__identity_uint64_fp32) // C type: uint64_t // A type: float // cast: uint64_t cij = GB_cast_to_uint64_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_fp32) ( uint64_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Contexts.h
/* Copyright (c) 2005-2016, University of Oxford. All rights reserved. University of Oxford means the Chancellor, Masters and Scholars of the University of Oxford, having an administrative office at Wellington Square, Oxford OX1 2JD, UK. This file is part of Aboria. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University of Oxford nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CONTEXTS_DETAIL_H_ #define CONTEXTS_DETAIL_H_ #include "detail/Symbolic.h" namespace Aboria { namespace detail { //////////////// /// Contexts /// //////////////// // Here is an evaluation context that indexes into a lazy vector // expression, and combines the result. template <typename labels_type, typename dx_type> struct EvalCtx { typedef typename fusion::result_of::size<labels_type>::type size_type; typedef typename fusion::result_of::size<dx_type>::type dx_size_type; static constexpr int dx_size = size_type::value * (size_type::value - 1) / 2; // BOOST_MPL_ASSERT_MSG(dx_size_type::value==dx_size,DX_SIZE_NOT_CONSISTENT_WITH_LABELS_SIZE,(dx_size,dx_size_type)); static_assert(dx_size_type::value == dx_size, "dx size not consitent with labels_size"); EvalCtx(labels_type labels = fusion::nil(), dx_type dx = fusion::nil()) : m_labels(labels), m_dx(dx) {} template <typename Expr // defaulted template parameters, so we can // specialize on the expressions that need // special handling. , typename Tag = typename proto::tag_of<Expr>::type, typename Enable = void> struct eval : proto::default_eval<Expr, EvalCtx const> { // terminals with variables, labels, uniform or normal should not be // evaluated... static_assert( mpl::not_<mpl::or_< proto::matches<Expr, proto::terminal<uniform>>, proto::matches<Expr, proto::terminal<normal>>, proto::matches<Expr, proto::terminal<label<_, _>>>, proto::matches<Expr, proto::terminal<symbolic<_>>>>>::value, "\nError: labels, symbols, uniform or normals can not be evaluated on " "their own.\n\tAlways subscript a symbol with a label, e.g. p[a] or " "symbol[label]"); }; // Handle normal and uniform subscripts here... template <typename Expr> struct eval< Expr, proto::tag::subscript, typename boost::enable_if<mpl::and_< proto::matches<typename proto::result_of::child_c<Expr, 1>::type, proto::terminal<label<_, _>>>, mpl::or_< proto::matches<typename proto::result_of::child_c<Expr, 0>::type, proto::terminal<normal>>, proto::matches<typename proto::result_of::child_c<Expr, 0>::type, proto::terminal<uniform>>>, mpl::greater<size_type, mpl::int_<0>>>>::type> { typedef typename proto::result_of::child_c<Expr, 1>::type child1_type; typedef typename proto::result_of::value<child1_type>::type label_type; static_assert(fusion::result_of::has_key<labels_type, label_type>::value, "label not in evaluation context"); typedef double result_type; result_type operator()(Expr &expr, EvalCtx const &ctx) const { // Normal and uniform terminal types have a operator() that takes a // generator. Pass the random generator for the labeled particle to this // operator() return proto::value(proto::child_c<0>(expr))( // need to const_cast this cause everything is // normally held as a const &. Could cause problems??? const_cast<generator_type &>( get<generator>(fusion::at_key<label_type>(ctx.m_labels)))); } }; // Handle other subscripts here... template <typename Expr> struct eval< Expr, proto::tag::subscript, typename boost::enable_if<mpl::and_< proto::matches<typename proto::result_of::child_c<Expr, 1>::type, proto::terminal<label<_, _>>>, mpl::not_<mpl::or_< proto::matches<typename proto::result_of::child_c<Expr, 0>::type, proto::terminal<normal>>, proto::matches<typename proto::result_of::child_c<Expr, 0>::type, proto::terminal<uniform>>>>, mpl::greater<size_type, mpl::int_<0>>>>::type> { typedef typename proto::result_of::child_c<Expr, 1>::type child1_type; typedef typename proto::result_of::value<child1_type>::type label_type; /* typedef typename label_type::particles_type particles_type; typedef typename particles_type::const_reference particle_ref; typedef typename fusion::pair<label_type,particle_ref> search_type; */ static_assert(fusion::result_of::has_key<labels_type, label_type>::value, "label not in evaluation context"); typedef typename proto::result_of::child_c<Expr, 0>::type child0_type; typedef typename proto::result_of::value<child0_type>::type symbolic_type; typedef typename symbolic_type::variable_type variable_type; typedef const typename variable_type::value_type &result_type; result_type operator()(Expr &expr, EvalCtx const &ctx) const { return get<variable_type>(fusion::at_key<label_type>(ctx.m_labels)); } }; // Handle dx terminals here... template <typename Expr> struct eval<Expr, proto::tag::terminal, typename boost::enable_if< mpl::and_<proto::matches<Expr, proto::terminal<dx<_, _>>>, mpl::equal<size_type, mpl::int_<2>>>>::type> { typedef typename fusion::result_of::front<const dx_type>::type result_type; typedef typename proto::result_of::value<Expr>::type expr_dx; typedef typename expr_dx::label_a_type expr_label_a_type; typedef typename expr_dx::label_b_type expr_label_b_type; /* BOOST_MPL_ASSERT_MSG((fusion::result_of::has_key<labels_type,expr_label_a_type>::value),ASDFASDFASDF,(expr_dx,labels_type,expr_label_a_type)); BOOST_MPL_ASSERT_MSG((fusion::result_of::has_key<labels_type,expr_label_b_type>::value),ASDFASDFASDF,(expr_label_b_type)); */ static_assert( fusion::result_of::has_key<labels_type, expr_label_a_type>::value, "dx label a not in evaluation context"); static_assert( fusion::result_of::has_key<labels_type, expr_label_b_type>::value, "dx label b not in evaluation context"); result_type operator()(Expr &expr, EvalCtx const &ctx) const { return fusion::front(ctx.m_dx); } }; template <typename result_type, typename label_type, typename expr_type, typename accumulate_type> static result_type dense_sum_impl( const label_type &label, expr_type &expr, accumulate_type &accum, const EvalCtx &ctx, mpl::int_<0>) { // note: using tag dispatching here cause I couldn't // figure out how to do this via enable_if.... result_type sum = accum.init; auto particles = label.get_particles(); #ifdef HAVE_OPENMP #pragma omp parallel for #endif for (size_t i = 0; i < particles.size(); ++i) { const auto &p = particles[i]; auto new_labels = fusion::make_map<label_type>(p); EvalCtx<decltype(new_labels), decltype(ctx.m_dx)> const new_ctx( new_labels, ctx.m_dx); sum = accum.functor(sum, proto::eval(expr, new_ctx)); } return sum; } template <typename result_type, typename label_b_type, typename expr_type, typename accumulate_type> static result_type dense_sum_impl(const label_b_type &label, expr_type &expr, accumulate_type &accum, const EvalCtx &ctx, mpl::int_<1>) { typedef typename label_b_type::particles_type particles_b_type; typedef typename particles_b_type::position position; typedef typename position::value_type double_d; typedef typename std::remove_reference<typename fusion::result_of::at_c< labels_type, 0>::type>::type::first_type label_a_type; typedef typename std::remove_reference<typename fusion::result_of::at_c< labels_type, 0>::type>::type::second_type const_a_reference; typedef typename particles_b_type::const_reference const_b_reference; typedef typename fusion::map<fusion::pair<label_a_type, const_a_reference>, fusion::pair<label_b_type, const_b_reference>> map_type; typedef fusion::list<const double_d &> list_type; const particles_b_type &particlesb = label.get_particles(); ASSERT(!particlesb.get_periodic().any(), "periodic does not work with dense"); const_a_reference ai = fusion::front(ctx.m_labels).second; const size_t nb = particlesb.size(); result_type sum = accum.init; if (is_trivially_zero(expr)) { return sum; } else { for (size_t i = 0; i < nb; ++i) { const_b_reference bi = particlesb[i]; EvalCtx<map_type, list_type> const new_ctx( fusion::make_map<label_a_type, label_b_type>(ai, bi), fusion::make_list(get<position>(bi) - get<position>(ai))); sum = accum.functor(sum, proto::eval(expr, new_ctx)); } } return sum; } template <typename result_type, typename label_b_type, typename expr_type, typename accumulate_type, typename dummy = size_type> static result_type sparse_sum_impl(const label_b_type &label, expr_type &expr, accumulate_type &accum, const EvalCtx &ctx, mpl::int_<1>) { typedef typename label_b_type::particles_type particles_b_type; const particles_b_type &particlesb = label.get_particles(); typedef typename std::remove_reference<typename fusion::result_of::at_c< labels_type, 0>::type>::type::first_type label_a_type; typedef typename std::remove_reference<typename fusion::result_of::at_c< labels_type, 0>::type>::type::second_type const_a_reference; typedef typename particles_b_type::position position; typedef typename position::value_type double_d; typedef typename particles_b_type::const_reference const_b_reference; const_a_reference ai = fusion::front(ctx.m_labels).second; typedef typename fusion::map<fusion::pair<label_a_type, const_a_reference>, fusion::pair<label_b_type, const_b_reference>> map_type; typedef fusion::list<const double_d &> list_type; const int LNormNumber = accumulate_type::norm_number_type::value; result_type sum = accum.init; // TODO: get query range and put it in box search for (auto b = distance_search<LNormNumber>( particlesb.get_query(), get<position>(ai), accum.max_distance); b != false; ++b) { EvalCtx<map_type, list_type> const new_ctx( // fusion::make_map<label_a_type, label_b_type>(ai, *b), map_type(ai, *b), list_type(b.dx())); // fusion::make_list(b.dx())); sum = accum.functor(sum, proto::eval(expr, new_ctx)); } return sum; } template <typename Expr> struct eval<Expr, proto::tag::function, typename boost::enable_if< proto::matches<Expr, AccumulateGrammar>>::type> { typedef typename proto::result_of::child_c<Expr, 0>::type child0_type; typedef typename proto::result_of::value<child0_type>::type functor_terminal_type; typedef typename functor_terminal_type::functor_type functor_type; typedef typename functor_type::result_type result_type; result_type operator()(Expr &expr, EvalCtx const &ctx) const { return dense_sum_impl<result_type>( proto::value(proto::child_c<1>(expr)), proto::child_c<2>(expr), proto::value(proto::child_c<0>(expr)), ctx, size_type()); } }; template <typename Expr> struct eval<Expr, proto::tag::function, typename boost::enable_if<mpl::and_< proto::matches<Expr, AccumulateWithinDistanceGrammar>, mpl::equal<size_type, mpl::int_<1>>>>::type> { typedef typename proto::result_of::child_c<Expr, 0>::type child0_type; typedef typename proto::result_of::value<child0_type>::type functor_terminal_type; typedef typename functor_terminal_type::functor_type functor_type; typedef typename functor_type::result_type result_type; result_type operator()(Expr &expr, EvalCtx const &ctx) const { return sparse_sum_impl<result_type>( proto::value(proto::child_c<1>(expr)), proto::child_c<2>(expr), proto::value(proto::child_c<0>(expr)), ctx, size_type()); } }; labels_type m_labels; dx_type m_dx; }; } // namespace detail } // namespace Aboria #endif
host_function.c
#include <stdio.h> #include <omp.h> #pragma omp declare target void hostrpc_fptr0(void* fun_ptr); #pragma omp end declare target // A host function will synchronously call from a device as a function pointer void myfun() { fprintf(stderr, " This is myfun writing to stderr \n"); } int main() { int N = 10; int a[N]; int b[N]; int i; for (i=0; i<N; i++){ a[i]=0; b[i]=i; } //void (*fun_ptr)(int) = &myfun; void (*fun_ptr)() = &myfun; printf("Testing myfun execution as a function pointer \n"); (*fun_ptr)(); printf("Testing myfun execution from device using hostrpc_fptr0\n"); #pragma omp target parallel for map(from: a[0:N]) map(to: b[0:N]) is_device_ptr(fun_ptr) for (int j = 0; j< N; j++) { a[j+1]=b[j]; hostrpc_fptr0(fun_ptr); } printf("Testing the host fallback of hostrpc_fptr0 \n"); hostrpc_fptr0(fun_ptr); int rc = 0; for (i=0; i<N; i++) if (a[i] != b[i] ) { rc++; printf ("Wrong value: a[%d]=%d\n", i, a[i]); } if (!rc){ printf("Success\n"); return EXIT_SUCCESS; } else{ printf("Failure\n"); return EXIT_FAILURE; } }
test.c
/* /gsa/yktgsa/home/e/i/eichen/lnew/obj/bin/clang -v -I/usr/local/cuda/include -I/gsa/yktgsa/home/e/i/eichen/new-tlomp/lomp/source/lib64/ -I/gsa/yktgsa/home/e/i/eichen/new-tlomp/lomp/source/ -L/gsa/yktgsa/home/e/i/eichen/new-tlomp/lomp/source/lib64/ -L/gsa/yktgsa/home/e/i/eichen/new-tlomp/lomp/source/lib64/ -fopenmp=libomp -O3 -target powerpc64le-ibm-linux-gnu -mcpu=pwr8 -fopenmp-targets=nvptx64-nvidia-cuda test-pinned.c -L /usr/local/cuda/lib64/ -lcudart */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <assert.h> #ifndef USE_PINNED // shoudl be set in the makefile, add here if tested directly #define USE_PINNED 1 #endif #if USE_PINNED #include <cuda.h> #include <cuda_runtime.h> void *AllocMem(size_t memSize) { void *hostAddr; cudaError_t error; if (omp_get_num_devices() > 0) { fprintf(stderr, "used pinned mem\n"); error = cudaMallocHost(&hostAddr, memSize); assert(error == cudaSuccess); return hostAddr; } fprintf(stderr, "used host mem\n"); hostAddr = malloc(memSize); assert(hostAddr); return hostAddr; } #else void *AllocMem(size_t memSize) { void *hostAddr; fprintf(stderr, "used malloc mem\n"); hostAddr = malloc(memSize); assert(hostAddr); return hostAddr; } #endif #define N 1024 int *a, *b, *c; int main() { int i, errors; // alloc a = (int *) AllocMem(N*sizeof(int)); b = (int *) AllocMem(N*sizeof(int)); c = (int *) AllocMem(N*sizeof(int)); // init for (i=0; i<N; i++) { a[i] = i; b[i] = 2*i; c[i] = -1; } // test #pragma omp target map(to: a[0:N]) map(tofrom: b[0:N]) map(from: c[0:N]) { for(int j=0; j<N; j++) { c[j] = a[j] + b[j]; b[j]++; } } errors = 0; for(i=0; i<N; i++) { int bb = 2*i+1; if (bb != b[i]) printf("%d: b expected %d, got %d, error %d\n", i, bb, b[i], ++errors); if (errors>20) break; } for(i=0; i<N; i++) { int cc = 3*i; if (cc != c[i]) printf("%d: c expected %d, got %d, error %d\n", i, cc, c[i], ++errors); if (errors>20) break; } printf("got %d errors\n", errors); return 1; }
GB_unop__identity_fc32_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_uint8) // op(A') function: GB (_unop_tran__identity_fc32_uint8) // C type: GxB_FC32_t // A type: uint8_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_uint8) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif