source
stringlengths
3
92
c
stringlengths
26
2.25M
ex_static.c
#include <stdio.h> #include <omp.h> #include <unistd.h> #define THREADS 8 int main() { int i,N=10; printf("First part....\n"); #pragma omp parallel for num_threads(THREADS) for (i = 0; i < N; i++) { printf("Thread %d is doing iteration %d.\n", omp_get_thread_num( ), i); } printf("\n\nSecond part....\n"); #pragma omp parallel for schedule(static) num_threads(THREADS) for (i = 0; i < N; i++) { sleep(i); /* wait for i seconds */ printf("Thread %d has completed iteration %d.\n", omp_get_thread_num( ), i); } return 0; }
nestedFunc.c
// OpenMP Nested Functions Example #include <omp.h> #include <stdio.h> #include <stdlib.h> int main( int argc, char** argv ) { omp_set_nested( 1 ); // Enable Nested Parallelism omp_set_dynamic( 0 ); // Disable Dynamic Threads int num = 0; // Thread Number int threads = 0; // Current Threads int max = 0; // Maximum Threads // Master Report In num = omp_get_thread_num( ); // Get Thread Number threads = omp_get_num_threads( ); // Get Current Number of Threads max = omp_get_max_threads( ); // Get Maximum Number of Threads printf( "Master : Thread %d of %d (%d Max)\n\n", num, threads, max ); // Outer Level Parallel Region - 2 Threads #pragma omp parallel num_threads( 8 ) { num = omp_get_thread_num( ); // Get Thread Number threads = omp_get_num_threads( ); // Get Current Number of Threads max = omp_get_max_threads( ); // Get Maximum Number of Threads printf( "Outer : Thread %d of %d (%d Max)\n\n", num, threads, max ); // Inner Level Parallel Region - 2 Threads Each #pragma omp parallel num_threads( 4 ) { num = omp_get_thread_num( ); // Get Thread Number threads = omp_get_num_threads( ); // Get Current Number of Threads max = omp_get_max_threads( ); // Get Maximum Number of Threads printf( "Inner : Thread %d of %d (%d Max)\n", num, threads, max ); } } return 0; } // End nestedFunc.c - EWG SDG
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/annotate.h" #include "magick/artifact.h" #include "magick/blob.h" #include "magick/cache.h" #include "magick/cache-private.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory-private.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/paint.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/resample.h" #include "magick/resample-private.h" #include "magick/resource_.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/transform.h" #include "magick/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define PrimitiveExtentPad 128 #define MaxBezierCoordinates 4194304 #define ThrowPointExpectedException(image,token) \ { \ (void) ThrowMagickException(&(image)->exception,GetMagickModule(),DrawError, \ "NonconformingDrawingPrimitiveDefinition","`%s'",token); \ status=MagickFalse; \ break; \ } /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _MVGInfo { PrimitiveInfo **primitive_info; size_t *extent; ssize_t offset; PointInfo point; ExceptionInfo *exception; } MVGInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static Image *DrawClippingMask(Image *,const DrawInfo *,const char *,const char *, ExceptionInfo *); static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *), RenderMVGContent(Image *,const DrawInfo *,const size_t), TraceArc(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(MVGInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(MVGInfo *,const size_t), TraceCircle(MVGInfo *,const PointInfo,const PointInfo), TraceEllipse(MVGInfo *,const PointInfo,const PointInfo,const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(MVGInfo *,const PointInfo,const PointInfo,PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); static PrimitiveInfo *TraceStrokePolygon(const Image *,const DrawInfo *,const PrimitiveInfo *); static size_t TracePath(Image *,MVGInfo *,const char *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); if (draw_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->compliance=draw_info->compliance; clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, &draw_info->fill_pattern->exception); else if (draw_info->tile != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->tile,0,0,MagickTrue, &draw_info->tile->exception); clone_info->tile=NewImageList(); /* tile is deprecated */ if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,&draw_info->stroke_pattern->exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= MagickEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) (x+4), sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->dash_pattern,draw_info->dash_pattern,(size_t) (x+4)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) memcpy(clone_info->gradient.stops,draw_info->gradient.stops, (size_t) number_stops*sizeof(*clone_info->gradient.stops)); } clone_info->bounds=draw_info->bounds; clone_info->fill_opacity=draw_info->fill_opacity; clone_info->stroke_opacity=draw_info->stroke_opacity; clone_info->element_reference=draw_info->element_reference; clone_info->clip_path=draw_info->clip_path; clone_info->clip_units=draw_info->clip_units; if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) clone_info->clipping_mask=CloneImage(draw_info->clipping_mask,0,0, MagickTrue,&draw_info->clipping_mask->exception); if (draw_info->composite_mask != (Image *) NULL) clone_info->composite_mask=CloneImage(draw_info->composite_mask,0,0, MagickTrue,&draw_info->composite_mask->exception); clone_info->render=draw_info->render; clone_info->debug=IsEventLogging(); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int DrawCompareEdges(const void *p_edge,const void *q_edge) { #define DrawCompareEdge(p,q) \ { \ if (((p)-(q)) < 0.0) \ return(-1); \ if (((p)-(q)) > 0.0) \ return(1); \ } register const PointInfo *p, *q; /* Edge sorting for right-handed coordinate system. */ p=((const EdgeInfo *) p_edge)->points; q=((const EdgeInfo *) q_edge)->points; DrawCompareEdge(p[0].y,q[0].y); DrawCompareEdge(p[0].x,q[0].x); DrawCompareEdge((p[1].x-p[0].x)*(q[1].y-q[0].y),(p[1].y-p[0].y)* (q[1].x-q[0].x)); DrawCompareEdge(p[1].y,q[1].y); DrawCompareEdge(p[1].x,q[1].x); return(0); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) memset(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) memset(&point,0,sizeof(point)); (void) memset(&bounds,0,sizeof(bounds)); polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=0.0; polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) direction; polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->number_edges=0; for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < MagickEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),DrawCompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath( const DrawInfo *magick_unused(draw_info),const PrimitiveInfo *primitive_info) { MagickBooleanType closed_subpath; PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; magick_unreferenced(draw_info); /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case PointPrimitive: case ColorPrimitive: case MattePrimitive: case TextPrimitive: case ImagePrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (3UL*i+1UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; closed_subpath=MagickFalse; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { /* New subpath. */ coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; closed_subpath=primitive_info[i].closed_subpath; } coordinates--; if ((code == MoveToCode) || (coordinates <= 0) || (fabs(q.x-primitive_info[i].point.x) >= MagickEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= MagickEpsilon)) { /* Eliminate duplicate points. */ path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; /* next point in current subpath */ if (closed_subpath != MagickFalse) { closed_subpath=MagickFalse; continue; } /* Mark the p point as open if the subpath is not closed. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); path_info=(PathInfo *) ResizeQuantumMemory(path_info,(size_t) (n+1), sizeof(*path_info)); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { assert(draw_info != (DrawInfo *) NULL); if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info->signature == MagickCoreSignature); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->tile != (Image *) NULL) draw_info->tile=DestroyImage(draw_info->tile); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); if (draw_info->clipping_mask != (Image *) NULL) draw_info->clipping_mask=DestroyImage(draw_info->clipping_mask); if (draw_info->composite_mask != (Image *) NULL) draw_info->composite_mask=DestroyImage(draw_info->composite_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) memmove(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges); return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= MagickEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -MagickEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= MagickEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -MagickEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; ExceptionInfo *exception; MagickBooleanType status; MagickPixelPacket zero; PointInfo extent[4], min, max, point; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetMagickPixelPacket(image,&zero); exception=(&image->exception); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { MagickPixelPacket composite, pixel; PointInfo point; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (PixelPacket *) NULL) continue; indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolateMagickPixelPacket(source,source_view, UndefinedInterpolatePixel,point.x,point.y,&pixel,exception); if (status == MagickFalse) break; SetMagickPixelPacket(image,q,indexes+x_offset,&composite); MagickPixelCompositeOver(&pixel,pixel.opacity,&composite, composite.opacity,&composite); SetPixelPacket(image,&composite,q,indexes+x_offset); x_offset++; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % MagickBooleanType DrawBoundingRectangles(Image *image, % const DrawInfo *draw_info,PolygonInfo *polygon_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % */ static inline double SaneStrokeWidth(const Image *image, const DrawInfo *draw_info) { return(MagickMin((double) draw_info->stroke_width, (2.0*sqrt(2.0)+MagickEpsilon)*MagickMax(image->columns,image->rows))); } static MagickBooleanType DrawBoundingRectangles(Image *image, const DrawInfo *draw_info,const PolygonInfo *polygon_info) { double mid; DrawInfo *clone_info; MagickBooleanType status; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; (void) memset(primitive_info,0,sizeof(primitive_info)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); status=QueryColorDatabase("#0000",&clone_info->fill,&image->exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(status); } resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* SaneStrokeWidth(image,clone_info)/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) status=QueryColorDatabase("#f00",&clone_info->stroke, &image->exception); else status=QueryColorDatabase("#0f0",&clone_info->stroke, &image->exception); if (status == MagickFalse) break; start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info); if (status == MagickFalse) break; } if (i < (ssize_t) polygon_info->number_edges) { clone_info=DestroyDrawInfo(clone_info); return(status); } } status=QueryColorDatabase("#00f",&clone_info->stroke,&image->exception); if (status == MagickFalse) { clone_info=DestroyDrawInfo(clone_info); return(status); } start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; status&=TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; status=DrawPrimitive(image,clone_info,primitive_info); clone_info=DestroyDrawInfo(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *id) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *id) { const char *clip_path; Image *clipping_mask; MagickBooleanType status; clip_path=GetImageArtifact(image,id); if (clip_path == (const char *) NULL) return(MagickFalse); clipping_mask=DrawClippingMask(image,draw_info,draw_info->clip_mask,clip_path, &image->exception); if (clipping_mask == (Image *) NULL) return(MagickFalse); status=SetImageClipMask(image,clipping_mask); clipping_mask=DestroyImage(clipping_mask); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p p i n g M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClippingMask() draws the clip path and returns it as an image clipping % mask. % % The format of the DrawClippingMask method is: % % Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *clip_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the clip path id. % % o clip_path: the clip path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawClippingMask(Image *image,const DrawInfo *draw_info, const char *id,const char *clip_path,ExceptionInfo *exception) { DrawInfo *clone_info; Image *clip_mask; MagickStatusType status; /* Draw a clip path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); clip_mask=AcquireImage((const ImageInfo *) NULL); status=SetImageExtent(clip_mask,image->columns,image->rows); if (status == MagickFalse) return(DestroyImage(clip_mask)); status=SetImageClipMask(image,(Image *) NULL); status=QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.opacity=(Quantum) TransparentOpacity; status=SetImageBackgroundColor(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,clip_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); if (clone_info->clip_mask != (char *) NULL) clone_info->clip_mask=DestroyString(clone_info->clip_mask); (void) QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->opacity=OpaqueOpacity; clone_info->clip_path=MagickTrue; status=RenderMVGContent(clip_mask,clone_info,1); clone_info=DestroyDrawInfo(clone_info); status&=SeparateImageChannel(clip_mask,TrueAlphaChannel); status&=NegateImage(clip_mask,MagickFalse); if (status == MagickFalse) clip_mask=DestroyImage(clip_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(clip_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C o m p o s i t e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawCompositeMask() draws the mask path and returns it as an image mask. % % The format of the DrawCompositeMask method is: % % Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, % const char *id,const char *mask_path,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o id: the mask path id. % % o mask_path: the mask path. % % o exception: return any errors or warnings in this structure. % */ static Image *DrawCompositeMask(Image *image,const DrawInfo *draw_info, const char *id,const char *mask_path,ExceptionInfo *exception) { Image *composite_mask; DrawInfo *clone_info; MagickStatusType status; /* Draw a mask path. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); composite_mask=AcquireImage((const ImageInfo *) NULL); status=SetImageExtent(composite_mask,image->columns,image->rows); if (status == MagickFalse) return(DestroyImage(composite_mask)); status=SetImageMask(image,(Image *) NULL); status=QueryColorCompliance("#0000",AllCompliance, &composite_mask->background_color,exception); composite_mask->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin mask-path %s", id); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,mask_path); status=QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); status=QueryColorCompliance("#00000000",AllCompliance,&clone_info->stroke, exception); clone_info->stroke_width=0.0; clone_info->opacity=OpaqueOpacity; status=RenderMVGContent(composite_mask,clone_info,1); clone_info=DestroyDrawInfo(clone_info); status&=SeparateImageChannel(composite_mask,TrueAlphaChannel); status&=NegateImage(composite_mask,MagickFalse); if (status == MagickFalse) composite_mask=DestroyImage(composite_mask); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end mask-path"); return(composite_mask); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image) { double length, maximum_length, offset, scale, total_length; DrawInfo *clone_info; MagickStatusType status; PrimitiveInfo *dash_polygon; register double dx, dy; register ssize_t i; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+32UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); (void) memset(dash_polygon,0,(2UL*number_vertices+32UL)* sizeof(*dash_polygon)); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*draw_info->dash_pattern[0]; offset=fabs(draw_info->dash_offset) >= MagickEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*draw_info->dash_pattern[n]; continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot(dx,dy); if (maximum_length > MaxBezierCoordinates) break; if (fabs(length) < MagickEpsilon) { n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); j=1; } else { if ((j+1) > (ssize_t) number_vertices) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length*PerceptibleReciprocal(maximum_length)); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon); } n++; if (fabs(draw_info->dash_pattern[n]) < MagickEpsilon) n=0; length=scale*draw_info->dash_pattern[n]; } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((total_length < maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=MagickEpsilon; dash_polygon[j].point.y+=MagickEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.x); v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))*PerceptibleReciprocal(gradient->radii.y); return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; ExceptionInfo *exception; MagickBooleanType status; MagickPixelPacket zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; exception=(&image->exception); GetMagickPixelPacket(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { double alpha, offset; MagickPixelPacket composite, pixel; register IndexPacket *magick_restrict indexes; register ssize_t i, x; register PixelPacket *magick_restrict q; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { SetMagickPixelPacket(image,q,indexes+x,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset*=PerceptibleReciprocal(length); } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { double repeat; MagickBooleanType antialias; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=PerceptibleReciprocal(length)*repeat; } else { repeat=fmod(offset,(double) gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat, (double) gradient->radius); else repeat=fmod(offset,(double) gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } MagickPixelCompositeBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } MagickPixelCompositeOver(&composite,composite.opacity,&pixel, pixel.opacity,&pixel); SetPixelPacket(image,&pixel,q,indexes+x); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % */ static MagickBooleanType CheckPrimitiveExtent(MVGInfo *mvg_info, const size_t pad) { double extent; size_t quantum; /* Check if there is enough storage for drawing pimitives. */ extent=(double) mvg_info->offset+pad+PrimitiveExtentPad; quantum=sizeof(**mvg_info->primitive_info); if (((extent*quantum) < (double) SSIZE_MAX) && ((extent*quantum) < (double) GetMaxMemoryRequest())) { if (extent <= (double) *mvg_info->extent) return(MagickTrue); *mvg_info->primitive_info=ResizeQuantumMemory(*mvg_info->primitive_info, (size_t) extent,quantum); if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) { *mvg_info->extent=(size_t) extent; return(MagickTrue); } } /* Reallocation failed, allocate a primitive to facilitate unwinding. */ if (*mvg_info->primitive_info != (PrimitiveInfo *) NULL) *mvg_info->primitive_info=(PrimitiveInfo *) RelinquishMagickMemory( *mvg_info->primitive_info); (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); *mvg_info->primitive_info=AcquireCriticalMemory(PrimitiveExtentPad*quantum); (void) memset(*mvg_info->primitive_info,0,PrimitiveExtentPad*quantum); *mvg_info->extent=1; return(MagickFalse); } static SplayTreeInfo *GetMVGMacros(const char *primitive) { char *token; const char *q; size_t extent; SplayTreeInfo *macros; /* Scan graphic primitives for definitions and classes. */ if (primitive == (const char *) NULL) return((SplayTreeInfo *) NULL); macros=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, RelinquishMagickMemory); token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; for (q=primitive; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (*token == '\0') break; if (LocaleCompare("push",token) == 0) { register const char *end, *start; GetNextToken(q,&q,extent,token); if (*q == '"') { char name[MagickPathExtent]; const char *p; ssize_t n; /* Named macro (e.g. push graphic-context "wheel"). */ GetNextToken(q,&q,extent,token); start=q; end=q; (void) CopyMagickString(name,token,MagickPathExtent); n=1; for (p=q; *p != '\0'; ) { GetNextToken(p,&p,extent,token); if (*token == '\0') break; if (LocaleCompare(token,"pop") == 0) { end=p-strlen(token)-1; n--; } if (LocaleCompare(token,"push") == 0) n++; if ((n == 0) && (end > start)) { char *macro; /* Extract macro. */ GetNextToken(p,&p,extent,token); macro=AcquireString(start); macro[end-start]='\0'; (void) AddValueToSplayTree(macros,ConstantString(name), ConstantString(macro)); macro=DestroyString(macro); break; } } } } } token=DestroyString(token); return(macros); } static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=StringToDouble(point,&p); return((fabs(value) < MagickEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline MagickBooleanType TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->closed_subpath=MagickFalse; primitive_info->point=point; return(MagickTrue); } static MagickBooleanType RenderMVGContent(Image *image, const DrawInfo *draw_info,const size_t depth) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char key[2*MaxTextExtent], keyword[MaxTextExtent], geometry[MaxTextExtent], name[MaxTextExtent], *next_token, pattern[MaxTextExtent], *primitive, *token; const char *q; double angle, coordinates, cursor, factor, primitive_extent; DrawInfo *clone_info, **graphic_context; MagickBooleanType proceed; MagickStatusType status; MVGInfo mvg_info; PointInfo point; PixelPacket start_color; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_points; SplayTreeInfo *macros; ssize_t defsDepth, j, k, n, symbolDepth; TypeMetric metrics; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if (depth > MagickMaxRecursionDepth) ThrowBinaryImageException(DrawError,"VectorGraphicsNestedTooDeeply", image->filename); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) { status=SetImageAlphaChannel(image,OpaqueAlphaChannel); if (status == MagickFalse) return(status); } primitive=(char *) NULL; if (*draw_info->primitive != '@') primitive=AcquireString(draw_info->primitive); else if ((strlen(draw_info->primitive) > 1) && (*(draw_info->primitive+1) != '-')) primitive=FileToString(draw_info->primitive+1,~0UL,&image->exception); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"MVG",primitive); n=0; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=PrimitiveExtentPad; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryImageException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(primitive_info,0,(size_t) number_points* sizeof(*primitive_info)); mvg_info.primitive_info=(&primitive_info); mvg_info.extent=(&number_points); mvg_info.offset=0; mvg_info.exception=(&image->exception); graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MaxTextExtent; (void) QueryColorDatabase("#000000",&start_color,&image->exception); cursor=0.0; defsDepth=0; symbolDepth=0; macros=GetMVGMacros(primitive); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ GetNextToken(q,&q,MaxTextExtent,keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { GetNextToken(q,&q,extent,token); status&=QueryColorDatabase(token,&graphic_context[n]->border_color, &image->exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("class",keyword) == 0) { const char *mvg_class; GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } mvg_class=(const char *) GetValueFromSplayTree(macros,token); if (mvg_class != (const char *) NULL) { char *elements; ssize_t offset; /* Inject class elements in stream. */ offset=(ssize_t) (p-primitive); elements=AcquireString(primitive); elements[offset]='\0'; (void) ConcatenateString(&elements,mvg_class); (void) ConcatenateString(&elements,"\n"); (void) ConcatenateString(&elements,q); primitive=DestroyString(primitive); primitive=elements; q=primitive+offset; } break; } if (LocaleCompare("clip-path",keyword) == 0) { const char *clip_path; /* Take a node from within the MVG document, and duplicate it here. */ GetNextToken(q,&q,extent,token); if (*token == '\0') { status=MagickFalse; break; } (void) CloneString(&graphic_context[n]->clip_mask,token); clip_path=(const char *) GetValueFromSplayTree(macros,token); if (clip_path != (const char *) NULL) { if (graphic_context[n]->clipping_mask != (Image *) NULL) graphic_context[n]->clipping_mask= DestroyImage(graphic_context[n]->clipping_mask); graphic_context[n]->clipping_mask=DrawClippingMask(image, graphic_context[n],token,clip_path,&image->exception); if (draw_info->compliance != SVGCompliance) status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask); } break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } if (LocaleCompare("compliance",keyword) == 0) { /* MVG compliance associates a clipping mask with an image; SVG compliance associates a clipping mask with a graphics context. */ GetNextToken(q,&q,extent,token); graphic_context[n]->compliance=(ComplianceType) ParseCommandOption( MagickComplianceOptions,MagickFalse,token); break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) { status=MagickFalse; break; } graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MaxTextExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern); else { status&=QueryColorDatabase(token,&graphic_context[n]->fill, &image->exception); if (graphic_context[n]->fill_opacity != OpaqueOpacity) graphic_context[n]->fill.opacity=ClampToQuantum( graphic_context[n]->fill_opacity); } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(image,token); graphic_context[n]->fill_opacity=(QuantumRange- graphic_context[n]->fill_opacity)*(1.0-opacity); if (graphic_context[n]->fill.opacity != TransparentOpacity) graphic_context[n]->fill.opacity=graphic_context[n]->fill_opacity; else graphic_context[n]->fill.opacity=ClampToQuantum(QuantumRange* (1.0-opacity)); break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) { status=MagickFalse; break; } graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) { status=MagickFalse; break; } graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) { status=MagickFalse; break; } graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) { status=MagickFalse; break; } graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) { status=MagickFalse; break; } graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("interword-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("letter-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); clone_info->text=AcquireString(" "); status&=GetTypeMetrics(image,clone_info,&metrics); graphic_context[n]->kerning=metrics.width* StringToDouble(token,&next_token); clone_info=DestroyDrawInfo(clone_info); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("line",keyword) == 0) { primitive_type=LinePrimitive; break; } status=MagickFalse; break; } case 'm': case 'M': { if (LocaleCompare("mask",keyword) == 0) { const char *mask_path; /* Take a node from within the MVG document, and duplicate it here. */ GetNextToken(q,&q,extent,token); mask_path=(const char *) GetValueFromSplayTree(macros,token); if (mask_path != (const char *) NULL) { if (graphic_context[n]->composite_mask != (Image *) NULL) graphic_context[n]->composite_mask= DestroyImage(graphic_context[n]->composite_mask); graphic_context[n]->composite_mask=DrawCompositeMask(image, graphic_context[n],token,mask_path,&image->exception); if (draw_info->compliance != SVGCompliance) status=SetImageMask(image,graphic_context[n]->composite_mask); } break; } if (LocaleCompare("matte",keyword) == 0) { primitive_type=MattePrimitive; break; } status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(image,token); graphic_context[n]->fill_opacity=(QuantumRange- graphic_context[n]->fill_opacity)*(1.0-opacity); if (graphic_context[n]->fill_opacity != OpaqueOpacity) graphic_context[n]->fill.opacity=ClampToQuantum( graphic_context[n]->fill_opacity); graphic_context[n]->stroke_opacity=(QuantumRange- graphic_context[n]->stroke_opacity)*(1.0-opacity); if (graphic_context[n]->stroke_opacity != OpaqueOpacity) graphic_context[n]->stroke.opacity=ClampToQuantum( graphic_context[n]->stroke_opacity); break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("class",token) == 0) break; if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) { defsDepth--; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(&image->exception, GetMagickModule(),DrawError, "UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if ((graphic_context[n]->clip_mask != (char *) NULL) && (draw_info->compliance != SVGCompliance)) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) status=SetImageClipMask(image,(Image *) NULL); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("mask",token) == 0) break; if (LocaleCompare("pattern",token) == 0) break; if (LocaleCompare("symbol",token) == 0) { symbolDepth--; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("class",token) == 0) { /* Class context. */ for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"class") != 0) continue; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("clip-path",token) == 0) { char name[MaxTextExtent]; const char *clip_path; GetNextToken(q,&q,extent,token); (void) FormatLocaleString(name,MaxTextExtent,"%s",token); clip_path=(const char *) GetValueFromSplayTree(macros,name); if (clip_path != (const char *) NULL) (void) SetImageArtifact(image,name,clip_path); break; } if (LocaleCompare("defs",token) == 0) { defsDepth++; graphic_context[n]->render=defsDepth > 0 ? MagickFalse : MagickTrue; break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MaxTextExtent], name[MaxTextExtent], type[MaxTextExtent]; SegmentInfo segment; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MaxTextExtent); GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MaxTextExtent); GetNextToken(q,&q,extent,token); segment.x1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y1=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.x2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y2=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (LocaleCompare(type,"radial") == 0) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MaxTextExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MaxTextExtent,"%s-type",name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name); (void) FormatLocaleString(geometry,MaxTextExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(&image->exception, GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","`%s'",image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); if (*q == '"') GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("mask",token) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { RectangleInfo bounds; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MaxTextExtent); GetNextToken(q,&q,extent,token); bounds.x=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.y=(ssize_t) ceil(StringToDouble(token,&next_token)-0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.width=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); bounds.height=(size_t) floor(StringToDouble(token,&next_token)+ 0.5); if (token == next_token) ThrowPointExpectedException(image,token); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((q == (char *) NULL) || (p == (char *) NULL) || ((q-4) < p)) { status=MagickFalse; break; } (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MaxTextExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MaxTextExtent,"%s-geometry",name); (void) FormatLocaleString(geometry,MaxTextExtent, "%.20gx%.20g%+.20g%+.20g",(double) bounds.width,(double) bounds.height,(double) bounds.x,(double) bounds.y); (void) SetImageArtifact(image,key,geometry); GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("symbol",token) == 0) { symbolDepth++; graphic_context[n]->render=symbolDepth > 0 ? MagickFalse : MagickTrue; break; } status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("skewX",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { GradientType type; PixelPacket stop_color; GetNextToken(q,&q,extent,token); status&=QueryColorDatabase(token,&stop_color,&image->exception); type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,&start_color,&stop_color); start_color=stop_color; GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; (void) FormatLocaleString(pattern,MaxTextExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern); else { status&=QueryColorDatabase(token,&graphic_context[n]->stroke, &image->exception); if (graphic_context[n]->stroke_opacity != OpaqueOpacity) graphic_context[n]->stroke.opacity=ClampToQuantum( graphic_context[n]->stroke_opacity); } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *p; p=q; GetNextToken(p,&p,extent,token); if (*token == ',') GetNextToken(p,&p,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { GetNextToken(p,&p,extent,token); if (*token == ',') GetNextToken(p,&p,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2*x+4), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(&image->exception, GetMagickModule(),ResourceLimitError, "MemoryAllocationFailed","`%s'",image->filename); status=MagickFalse; break; } (void) memset(graphic_context[n]->dash_pattern,0,(size_t) (2*x+4)*sizeof(*graphic_context[n]->dash_pattern)); for (j=0; j < x; j++) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) { status=MagickFalse; break; } graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) { status=MagickFalse; break; } graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { double opacity; GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; opacity=MagickMin(MagickMax(factor* StringToDouble(token,&next_token),0.0),1.0); if (token == next_token) ThrowPointExpectedException(image,token); graphic_context[n]->stroke_opacity=(QuantumRange- graphic_context[n]->stroke_opacity)*(1.0-opacity); if (graphic_context[n]->stroke.opacity != TransparentOpacity) graphic_context[n]->stroke.opacity= graphic_context[n]->stroke_opacity; else graphic_context[n]->stroke.opacity=ClampToQuantum(QuantumRange* (1.0-opacity)); break; } if (LocaleCompare("stroke-width",keyword) == 0) { GetNextToken(q,&q,extent,token); if (graphic_context[n]->clip_path != MagickFalse) break; graphic_context[n]->stroke_width=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; cursor=0.0; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) { status=MagickFalse; break; } graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { GetNextToken(q,&q,extent,token); status&=QueryColorDatabase(token,&graphic_context[n]->undercolor, &image->exception); break; } if (LocaleCompare("translate",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'u': case 'U': { if (LocaleCompare("use",keyword) == 0) { const char *use; /* Get a macro from the MVG document, and "use" it here. */ GetNextToken(q,&q,extent,token); use=(const char *) GetValueFromSplayTree(macros,token); if (use != (const char *) NULL) { clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); (void) CloneString(&clone_info->primitive,use); status=RenderMVGContent(image,clone_info,depth+1); clone_info=DestroyDrawInfo(clone_info); } break; } break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } case 'w': case 'W': { if (LocaleCompare("word-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) ThrowPointExpectedException(image,token); break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= MagickEpsilon) || (fabs(affine.rx) >= MagickEpsilon) || (fabs(affine.ry) >= MagickEpsilon) || (fabs(affine.sy-1.0) >= MagickEpsilon) || (fabs(affine.tx) >= MagickEpsilon) || (fabs(affine.ty) >= MagickEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1),p); continue; } /* Parse the primitive attributes. */ i=0; mvg_info.offset=i; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; primitive_info[0].coordinates=0; primitive_info[0].method=FloodfillMethod; primitive_info[0].closed_subpath=MagickFalse; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; primitive_info[i].closed_subpath=MagickFalse; i++; mvg_info.offset=i; if (i < (ssize_t) number_points) continue; status&=CheckPrimitiveExtent(&mvg_info,number_points); } if (status == MagickFalse) break; primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].closed_subpath=MagickFalse; primitive_info[j].text=(char *) NULL; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ coordinates=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { coordinates*=5.0; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); coordinates*=5.0; coordinates+=2.0*((size_t) ceil((double) MagickPI*radius))+6.0* BezierQuantum+360.0; break; } case BezierPrimitive: { coordinates=(double) (BezierQuantum*primitive_info[j].coordinates); if (primitive_info[j].coordinates > (107*BezierQuantum)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), DrawError,"TooManyBezierCoordinates","`%s'",token); status=MagickFalse; break; } break; } case PathPrimitive: { char *s, *t; GetNextToken(q,&q,extent,token); coordinates=1.0; t=token; for (s=token; *s != '\0'; s=t) { double value; value=StringToDouble(s,&t); (void) value; if (s == t) { t++; continue; } coordinates++; } for (s=token; *s != '\0'; s++) if (strspn(s,"AaCcQqSsTt") != 0) coordinates+=(20.0*BezierQuantum)+360.0; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot(alpha,beta); coordinates=2.0*(ceil(MagickPI*radius))+6.0*BezierQuantum+360.0; if (coordinates > (MaxBezierCoordinates/4)) { (void) ThrowMagickException(&image->exception,GetMagickModule(), DrawError,"TooManyBezierCoordinates","`%s'",token); status=MagickFalse; } break; } default: break; } if (coordinates > MaxBezierCoordinates) { (void) ThrowMagickException(&image->exception,GetMagickModule(), DrawError,"TooManyBezierCoordinates","`%s'",token); status=MagickFalse; } if (status == MagickFalse) break; if (((size_t) (i+coordinates)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=coordinates+1; if (number_points < (size_t) coordinates) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } mvg_info.offset=i; status&=CheckPrimitiveExtent(&mvg_info,number_points); } status&=CheckPrimitiveExtent(&mvg_info,PrimitiveExtentPad); if (status == MagickFalse) break; mvg_info.offset=j; switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } status&=TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+2].point.x < 0.0) || (primitive_info[j+2].point.y < 0.0)) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x-primitive_info[j].point.x) < 0.0) { status=MagickFalse; break; } if ((primitive_info[j+1].point.y-primitive_info[j].point.y) < 0.0) { status=MagickFalse; break; } status&=TraceRoundRectangle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type=UndefinedPrimitive; break; } status&=TraceArc(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } if ((primitive_info[j+1].point.x < 0.0) || (primitive_info[j+1].point.y < 0.0)) { status=MagickFalse; break; } status&=TraceEllipse(&mvg_info,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } status&=TraceCircle(&mvg_info,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: { if (primitive_info[j].coordinates < 1) { status=MagickFalse; break; } break; } case PolygonPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; primitive_info[j].closed_subpath=MagickTrue; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } status&=TraceBezier(&mvg_info,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { coordinates=(double) TracePath(image,&mvg_info,token); if (coordinates == 0) { status=MagickFalse; break; } i=(ssize_t) (j+coordinates); break; } case ColorPrimitive: case MattePrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) { status=MagickFalse; break; } primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { char geometry[MagickPathExtent]; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); /* Compute text cursor offset. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,graphic_context[n]); if ((fabs(mvg_info.point.x-primitive_info->point.x) < MagickEpsilon) && (fabs(mvg_info.point.y-primitive_info->point.y) < MagickEpsilon)) { mvg_info.point=primitive_info->point; primitive_info->point.x+=cursor; } else { mvg_info.point=primitive_info->point; cursor=0.0; } (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); clone_info->render=MagickFalse; clone_info->text=AcquireString(token); status&=GetTypeMetrics(image,clone_info,&metrics); clone_info=DestroyDrawInfo(clone_info); cursor+=metrics.width; break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); (void) CloneString(&primitive_info[j].text,token); break; } } mvg_info.offset=i; if (primitive_info == (PrimitiveInfo *) NULL) break; if ((image->debug != MagickFalse) && (q > p)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p-1), p); if (status == MagickFalse) break; primitive_info[i].primitive=UndefinedPrimitive; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (draw_info->compliance != SVGCompliance) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask); status&=DrawPrimitive(image,graphic_context[n],primitive_info); } proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ macros=DestroySplayTree(macros); token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) { for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) if ((primitive_info[i].primitive == TextPrimitive) || (primitive_info[i].primitive == ImagePrimitive)) if (primitive_info[i].text != (char *) NULL) primitive_info[i].text=DestroyString(primitive_info[i].text); primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); } primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryImageException(DrawError, "NonconformingDrawingPrimitiveDefinition",keyword); return(status != 0 ? MagickTrue : MagickFalse); } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info) { return(RenderMVGContent(image,draw_info,1)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern) { char property[MaxTextExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MaxTextExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MaxTextExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info); image_info=DestroyImageInfo(image_info); (void) QueryColorDatabase("#00000000",&(*pattern)->background_color, &image->exception); (void) SetImageBackgroundColor(*pattern); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill_pattern=NewImageList(); clone_info->stroke_pattern=NewImageList(); (void) FormatLocaleString(property,MaxTextExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=RenderMVGContent(*pattern,clone_info,1); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) memset(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(draw_info,primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetOpacityPixel(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_opacity) { double alpha, beta, distance, subpath_opacity; PointInfo delta; register EdgeInfo *p; register const PointInfo *q; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_opacity=0.0; subpath_opacity=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta <= 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta >= alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=PerceptibleReciprocal(alpha); beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_opacity < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_opacity=1.0; else { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_opacity < ((alpha-0.25)*(alpha-0.25))) *stroke_opacity=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_opacity >= 1.0)) continue; if (distance <= 0.0) { subpath_opacity=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < MagickEpsilon) { beta=1.0; if (fabs(distance-1.0) >= MagickEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_opacity < (alpha*alpha)) subpath_opacity=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_opacity >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) (p->number_points-1); i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_opacity); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { CacheView *image_view; double mid; ExceptionInfo *exception; MagickBooleanType fill, status; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates <= 1) return(MagickTrue); /* Compute bounding box. */ polygon_info=AcquirePolygonThreadSet(draw_info,primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) { status=DrawBoundingRectangles(image,draw_info,polygon_info[0]); if (status == MagickFalse) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(status); } } RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.y1-=(mid+1.0); bounds.x2+=(mid+1.0); bounds.y2+=(mid+1.0); if ((bounds.x1 >= (double) image->columns) || (bounds.y1 >= (double) image->rows) || (bounds.x2 <= 0.0) || (bounds.y2 <= 0.0)) { polygon_info=DestroyPolygonThreadSet(polygon_info); return(MagickTrue); /* virtual polygon */ } bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x1; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y1; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns-1.0 ? (double) image->columns-1.0 : bounds.x2; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows-1.0 ? (double) image->rows-1.0 : bounds.y2; status=MagickTrue; exception=(&image->exception); image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) (void) GetFillColor(draw_info,x-start_x,y-start_y,q); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); double fill_opacity, stroke_opacity; PixelPacket fill_color, stroke_color; register PixelPacket *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { /* Fill and/or stroke. */ fill_opacity=GetOpacityPixel(polygon_info[id],mid,fill, draw_info->fill_rule,x,y,&stroke_opacity); if (draw_info->stroke_antialias == MagickFalse) { fill_opacity=fill_opacity > 0.25 ? 1.0 : 0.0; stroke_opacity=stroke_opacity > 0.25 ? 1.0 : 0.0; } (void) GetFillColor(draw_info,x-start_x,y-start_y,&fill_color); fill_opacity=(double) (QuantumRange-fill_opacity*(QuantumRange- fill_color.opacity)); MagickCompositeOver(&fill_color,(MagickRealType) fill_opacity,q, (MagickRealType) q->opacity,q); (void) GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color); stroke_opacity=(double) (QuantumRange-stroke_opacity*(QuantumRange- stroke_color.opacity)); MagickCompositeOver(&stroke_color,(MagickRealType) stroke_opacity,q, (MagickRealType) q->opacity,q); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, q, point; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case MattePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "MattePrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= MagickEpsilon) || (fabs(q.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= MagickEpsilon) || (fabs(p.y-point.y) >= MagickEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { CacheView *image_view; ExceptionInfo *exception; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } exception=(&image->exception); status=MagickTrue; if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelGray(&draw_info->fill) == MagickFalse) || (IsPixelGray(&draw_info->stroke) == MagickFalse))) status=SetImageColorspace(image,sRGBColorspace); if (draw_info->compliance == SVGCompliance) { status&=SetImageClipMask(image,draw_info->clipping_mask); status&=SetImageMask(image,draw_info->composite_mask); } x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelPacket *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; (void) GetFillColor(draw_info,x,y,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelPacket target; status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) == MagickFalse) { q++; continue; } (void) GetFillColor(draw_info,x,y,q); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { MagickPixelPacket target; (void) GetOneVirtualMagickPixel(image,x,y,&target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(MagickRealType) draw_info->border_color.red; target.green=(MagickRealType) draw_info->border_color.green; target.blue=(MagickRealType) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,DefaultChannels,draw_info,&target,x, y,primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue); break; } case ResetMethod: { MagickBooleanType sync; for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) GetFillColor(draw_info,x,y,q); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case MattePrimitive: { if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); switch (primitive_info->method) { case PointMethod: default: { PixelPacket pixel; PixelPacket *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; (void) GetFillColor(draw_info,x,y,&pixel); SetPixelOpacity(q,pixel.opacity); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelPacket pixel, target; status&=GetOneCacheViewVirtualPixel(image_view,x,y,&target,exception); for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) == MagickFalse) { q++; continue; } (void) GetFillColor(draw_info,x,y,&pixel); SetPixelOpacity(q,pixel.opacity); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { MagickPixelPacket target; (void) GetOneVirtualMagickPixel(image,x,y,&target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(MagickRealType) draw_info->border_color.red; target.green=(MagickRealType) draw_info->border_color.green; target.blue=(MagickRealType) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,OpacityChannel,draw_info,&target,x, y,primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue); break; } case ResetMethod: { MagickBooleanType sync; PixelPacket pixel; for (y=0; y < (ssize_t) image->rows; y++) { register PixelPacket *magick_restrict q; register ssize_t x; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) GetFillColor(draw_info,x,y,&pixel); SetPixelOpacity(q,pixel.opacity); q++; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MaxTextExtent]; Image *composite_image, *composite_images; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_images=ReadInlineImage(clone_info,primitive_info->text, &image->exception); else { (void) CopyMagickString(clone_info->filename,primitive_info->text, MaxTextExtent); composite_images=ReadImage(clone_info,&image->exception); } clone_info=DestroyImageInfo(clone_info); if (composite_images == (Image *) NULL) { status=0; break; } composite_image=RemoveFirstImageFromList(&composite_images); composite_images=DestroyImageList(composite_images); (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { char geometry[MaxTextExtent]; /* Resize image. */ (void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g!", primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; (void) TransformImage(&composite_image,(char *) NULL,geometry); } if (composite_image->matte == MagickFalse) (void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel); if (draw_info->opacity != OpaqueOpacity) (void) SetImageOpacity(composite_image,draw_info->opacity); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MaxTextExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry, &image->exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; if ((draw_info->compose == OverCompositeOp) || (draw_info->compose == SrcOverCompositeOp)) (void) DrawAffineImage(image,composite_image,&affine); else (void) CompositeImage(image,draw_info->compose,composite_image, geometry.x,geometry.y); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelPacket fill_color; PixelPacket *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (PixelPacket *) NULL) break; (void) GetFillColor(draw_info,x,y,&fill_color); MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q, (MagickRealType) q->opacity,q); status&=SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MaxTextExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= MagickEpsilon) && (fabs(scale*draw_info->stroke_width) >= MagickEpsilon) && (draw_info->stroke.opacity != (Quantum) TransparentOpacity)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.opacity=(Quantum) TransparentOpacity; status&=DrawPolygonPrimitive(image,clone_info,primitive_info); clone_info=DestroyDrawInfo(clone_info); (void) DrawDashPolygon(draw_info,primitive_info,image); break; } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; if ((mid > 1.0) && ((draw_info->stroke.opacity != (Quantum) TransparentOpacity) || (draw_info->stroke_pattern != (Image *) NULL))) { double x, y; MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ closed_path=primitive_info[0].closed_subpath; i=(ssize_t) primitive_info[0].coordinates; x=fabs(primitive_info[i-1].point.x-primitive_info[0].point.x); y=fabs(primitive_info[i-1].point.y-primitive_info[0].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) closed_path=MagickTrue; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { (void) DrawPolygonPrimitive(image,draw_info,primitive_info); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.opacity=(Quantum) TransparentOpacity; status&=DrawPolygonPrimitive(image,clone_info,primitive_info); clone_info=DestroyDrawInfo(clone_info); status&=DrawStrokePolygon(image,draw_info,primitive_info); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info); break; } } image_view=DestroyCacheView(image_view); if (draw_info->compliance == SVGCompliance) { status&=SetImageClipMask(image,(Image *) NULL); status&=SetImageMask(image,(Image *) NULL); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static MagickBooleanType DrawRoundLinecap(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*MagickEpsilon; linecap[2].point.x+=2.0*MagickEpsilon; linecap[2].point.y+=2.0*MagickEpsilon; linecap[3].point.y+=2.0*MagickEpsilon; linecap[4].primitive=UndefinedPrimitive; return(DrawPolygonPrimitive(image,draw_info,linecap)); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,&clone_info->stroke_pattern->exception); clone_info->stroke.opacity=(Quantum) TransparentOpacity; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { if (p->coordinates == 1) continue; stroke_polygon=TraceStrokePolygon(image,draw_info,p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon); stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); if (status == 0) break; q=p+p->coordinates-1; closed_path=p->closed_subpath; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { status&=DrawRoundLinecap(image,draw_info,p); status&=DrawRoundLinecap(image,draw_info,q); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) memset(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) memset(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorDatabase("#000F",&draw_info->fill,exception); (void) QueryColorDatabase("#FFF0",&draw_info->stroke,exception); draw_info->stroke_antialias=clone_info->antialias; draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->opacity=OpaqueOpacity; draw_info->fill_opacity=OpaqueOpacity; draw_info->stroke_opacity=OpaqueOpacity; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; draw_info->pointsize=12.0; if (fabs(clone_info->pointsize) >= MagickEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->undercolor.opacity=(Quantum) TransparentOpacity; draw_info->border_color=clone_info->border_color; draw_info->compose=OverCompositeOp; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); draw_info->render=MagickTrue; draw_info->clip_path=MagickFalse; draw_info->debug=IsEventLogging(); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&draw_info->fill,exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&draw_info->stroke,exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorDatabase(option,&draw_info->undercolor,exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static MagickBooleanType TraceArc(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radius; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radius.x=fabs(center.x-start.x); radius.y=fabs(center.y-start.y); return(TraceEllipse(mvg_info,center,radius,degrees)); } static MagickBooleanType TraceArcPath(MVGInfo *mvg_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; MagickBooleanType status; PointInfo center, points[3], radii; register double cosine, sine; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; ssize_t offset; offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) return(TracePoint(primitive_info,end)); radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(TraceLine(primitive_info,start,end)); cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < MagickEpsilon) return(TraceLine(primitive_info,start,end)); if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+ MagickEpsilon)))); p=primitive_info; status=MagickTrue; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; status&=TraceBezier(mvg_info,4); if (status == MagickFalse) break; p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; p+=p->coordinates; } mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(status); } static MagickBooleanType TraceBezier(MVGInfo *mvg_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coefficients. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) quantum) quantum=(size_t) alpha; } } quantum=(size_t) MagickMin((double) quantum/number_coordinates, (double) BezierQuantum); control_points=quantum*number_coordinates; if (CheckPrimitiveExtent(mvg_info,control_points+1) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; coefficients=(double *) AcquireQuantumMemory((size_t) number_coordinates,sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory((size_t) control_points, sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { if (TracePoint(p,points[i]) == MagickFalse) return(MagickFalse); p+=p->coordinates; } if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); return(MagickTrue); } static MagickBooleanType TraceCircle(MVGInfo *mvg_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; return(TraceEllipse(mvg_info,start,offset,degrees)); } static MagickBooleanType TraceEllipse(MVGInfo *mvg_info,const PointInfo center, const PointInfo radii,const PointInfo arc) { double coordinates, delta, step, x, y; PointInfo angle, point; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; primitive_info->coordinates=0; if ((fabs(radii.x) < MagickEpsilon) || (fabs(radii.y) < MagickEpsilon)) return(MagickTrue); delta=2.0*PerceptibleReciprocal(MagickMax(radii.x,radii.y)); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/4.0/(MagickPI*PerceptibleReciprocal(delta)/2.0); angle.x=DegreesToRadians(arc.x); y=arc.y; while (y < arc.x) y+=360.0; angle.y=DegreesToRadians(y); coordinates=ceil((angle.y-angle.x)/step+1.0); if ((coordinates > (double) SSIZE_MAX) || (coordinates > (double) GetMaxMemoryRequest())) { (void) ThrowMagickException(mvg_info->exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",""); return(MagickFalse); } if (CheckPrimitiveExtent(mvg_info,(size_t) coordinates) == MagickFalse) return(MagickFalse); primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*radii.x+center.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*radii.y+center.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickFalse; x=fabs(primitive_info[0].point.x- primitive_info[primitive_info->coordinates-1].point.x); y=fabs(primitive_info[0].point.y- primitive_info[primitive_info->coordinates-1].point.y); if ((x < MagickEpsilon) && (y < MagickEpsilon)) primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceLine(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { if (TracePoint(primitive_info,start) == MagickFalse) return(MagickFalse); if ((fabs(start.x-end.x) < MagickEpsilon) && (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return(MagickTrue); } if (TracePoint(primitive_info+1,end) == MagickFalse) return(MagickFalse); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; primitive_info->closed_subpath=MagickFalse; return(MagickTrue); } static size_t TracePath(Image *image,MVGInfo *mvg_info,const char *path) { char *next_token, token[MaxTextExtent]; const char *p; double x, y; int attribute, last_attribute; MagickBooleanType status; PointInfo end = {0.0, 0.0}, points[4] = { {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0}, {0.0, 0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; ssize_t subpath_offset; subpath_offset=mvg_info->offset; primitive_info=(*mvg_info->primitive_info)+mvg_info->offset; status=MagickTrue; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { if (status == MagickFalse) break; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle = 0.0; MagickBooleanType large_arc = MagickFalse, sweep = MagickFalse; PointInfo arc = {0.0, 0.0}; /* Elliptical arc. */ do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); arc.x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); arc.y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); angle=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); status&=TraceArcPath(mvg_info,point,end,arc,angle,large_arc,sweep); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Cubic Bézier curve. */ do { points[0]=point; for (i=1; i < 4; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { /* Line to. */ do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { /* Move to. */ if (mvg_info->offset != subpath_offset) { primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; } i=0; do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Quadratic Bézier curve. */ do { points[0]=point; for (i=1; i < 3; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Cubic Bézier curve. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,4) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Quadratic Bézier curve. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); x=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (status == MagickFalse) break; if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; if (TraceBezier(mvg_info,3) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=q->coordinates; q+=q->coordinates; point=end; last_attribute=attribute; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { /* Line to. */ do { GetNextToken(p,&p,MaxTextExtent,token); if (*token == ',') GetNextToken(p,&p,MaxTextExtent,token); y=StringToDouble(token,&next_token); if (token == next_token) ThrowPointExpectedException(image,token); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { /* Close path. */ point=start; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(0); q=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(q,point) == MagickFalse) return(0); mvg_info->offset+=q->coordinates; q+=q->coordinates; primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); primitive_info->closed_subpath=MagickTrue; number_coordinates+=primitive_info->coordinates; primitive_info=q; subpath_offset=mvg_info->offset; z_count++; break; } default: { ThrowPointExpectedException(image,token); break; } } } if (status == MagickFalse) return(0); primitive_info=(*mvg_info->primitive_info)+subpath_offset; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return(number_coordinates); } static MagickBooleanType TraceRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; if ((fabs(start.x-end.x) < MagickEpsilon) || (fabs(start.y-end.y) < MagickEpsilon)) { primitive_info->coordinates=0; return(MagickTrue); } p=primitive_info; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=start.x; point.y=end.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,end) == MagickFalse) return(MagickFalse); p+=p->coordinates; point.x=end.x; point.y=start.y; if (TracePoint(p,point) == MagickFalse) return(MagickFalse); p+=p->coordinates; if (TracePoint(p,start) == MagickFalse) return(MagickFalse); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceRoundRectangle(MVGInfo *mvg_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, point, segment; PrimitiveInfo *primitive_info; register PrimitiveInfo *p; register ssize_t i; ssize_t offset; offset=mvg_info->offset; segment.x=fabs(end.x-start.x); segment.y=fabs(end.y-start.y); if ((segment.x < MagickEpsilon) || (segment.y < MagickEpsilon)) { (*mvg_info->primitive_info+mvg_info->offset)->coordinates=0; return(MagickTrue); } if (arc.x > (0.5*segment.x)) arc.x=0.5*segment.x; if (arc.y > (0.5*segment.y)) arc.y=0.5*segment.y; point.x=start.x+segment.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+segment.x-arc.x; point.y=start.y+segment.y-arc.y; degrees.x=0.0; degrees.y=90.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+segment.y-arc.y; degrees.x=90.0; degrees.y=180.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; if (TraceEllipse(mvg_info,point,arc,degrees) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; mvg_info->offset+=p->coordinates; if (CheckPrimitiveExtent(mvg_info,PrimitiveExtentPad) == MagickFalse) return(MagickFalse); p=(*mvg_info->primitive_info)+mvg_info->offset; if (TracePoint(p,(*mvg_info->primitive_info+offset)->point) == MagickFalse) return(MagickFalse); p+=p->coordinates; mvg_info->offset=offset; primitive_info=(*mvg_info->primitive_info)+offset; primitive_info->coordinates=(size_t) (p-primitive_info); primitive_info->closed_subpath=MagickTrue; for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } return(MagickTrue); } static MagickBooleanType TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= MagickEpsilon) || (fabs((double) dy) >= MagickEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); return(MagickTrue); } static PrimitiveInfo *TraceStrokePolygon(const Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) { #define CheckPathExtent(pad) \ if ((q+(pad)) >= (ssize_t) max_strokes) \ { \ if (~max_strokes < (pad)) \ { \ path_p=(PointInfo *) RelinquishMagickMemory(path_p); \ path_q=(PointInfo *) RelinquishMagickMemory(path_q); \ } \ else \ { \ max_strokes+=(pad); \ path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, \ sizeof(*path_p)); \ path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, \ sizeof(*path_q)); \ } \ if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) \ { \ if (path_p != (PointInfo *) NULL) \ path_p=(PointInfo *) RelinquishMagickMemory(path_p); \ if (path_q != (PointInfo *) NULL) \ path_q=(PointInfo *) RelinquishMagickMemory(path_q); \ polygon_primitive=(PrimitiveInfo *) \ RelinquishMagickMemory(polygon_primitive); \ return((PrimitiveInfo *) NULL); \ } \ } typedef struct _LineSegment { double p, q; } LineSegment; double delta_theta, dot_product, mid, miterlimit; LineSegment dx = {0,0}, dy = {0,0}, inverse_slope = {0,0}, slope = {0,0}, theta = {0,0}; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *path_p, *path_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, max_strokes, number_vertices; ssize_t j, n, p, q; /* Allocate paths. */ number_vertices=primitive_info->coordinates; max_strokes=2*number_vertices+6*BezierQuantum+360; polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if (polygon_primitive == (PrimitiveInfo *) NULL) return((PrimitiveInfo *) NULL); (void) memcpy(polygon_primitive,primitive_info,(size_t) number_vertices* sizeof(*polygon_primitive)); closed_path=primitive_info[0].closed_subpath; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= MagickEpsilon) || (fabs(dy.p) >= MagickEpsilon)) break; } if (n == (ssize_t) number_vertices) { if ((draw_info->linecap != RoundCap) || (closed_path != MagickFalse)) { /* Zero length subpath. */ stroke_polygon=(PrimitiveInfo *) AcquireCriticalMemory( sizeof(*stroke_polygon)); stroke_polygon[0]=polygon_primitive[0]; stroke_polygon[0].coordinates=0; polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return(stroke_polygon); } n=(ssize_t) number_vertices-1L; } path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_p)); if (path_p == (PointInfo *) NULL) { polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return((PrimitiveInfo *) NULL); } path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_q)); if (path_q == (PointInfo *) NULL) { path_p=(PointInfo *) RelinquishMagickMemory(path_p); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return((PrimitiveInfo *) NULL); } slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < MagickEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.p=dy.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.p) < MagickEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*SaneStrokeWidth(image,draw_info)/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) (void) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; path_q[p++]=box_q[0]; path_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < MagickEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else slope.q=dy.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else if (fabs(dy.q) < MagickEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/MagickEpsilon : 1.0/MagickEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/MagickEpsilon : -1.0/MagickEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < MagickEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } CheckPathExtent(6*BezierQuantum+360); dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+6*BezierQuantum+360); path_q[q].x=box_q[1].x; path_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } path_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); CheckPathExtent(arc_segments+6*BezierQuantum+360); path_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } path_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } path_p[p++]=box_p[1]; path_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
omp_for_collapse_mini.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" /* Utility function to check that i is increasing monotonically with each call */ static int check_i_islarger (int i) { static int last_i; int islarger; if (i==1) last_i=0; islarger = ((i >= last_i)&&(i - last_i<=1)); last_i = i; return (islarger); } int test_omp_for_collapse() { int is_larger = 1; #pragma omp parallel { int i,j; int my_islarger = 1; #pragma omp for private(i,j) schedule(static,1) collapse(2) ordered for (i = 1; i < 5; i++) { for (j =1; j < 5; j++) { #pragma omp ordered my_islarger = check_i_islarger(i)&&my_islarger; } } #pragma omp critical is_larger = is_larger && my_islarger; } return (is_larger); } int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_collapse()) { num_failed++; } } return num_failed; }
linalg.h
/* Software SPAMS v2.1 - Copyright 2009-2011 Julien Mairal * * This file is part of SPAMS. * * SPAMS is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * SPAMS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with SPAMS. If not, see <http://www.gnu.org/licenses/>. */ /* \file * toolbox Linalg * * by Julien Mairal * julien.mairal@inria.fr * * File linalg.h * \brief Contains Matrix, Vector classes */ #ifndef LINALG_H #define LINALG_H #include "misc.h" #ifdef USE_BLAS_LIB #include "cblas_alt_template.h" #else #include "cblas_template.h" // this is obsolete #endif #include <fstream> #ifdef WINDOWS #include <string> #else #include <cstring> #endif #include <list> #include <vector> #ifdef NEW_MATLAB typedef ptrdiff_t INTT; #else typedef int INTT; #endif #include <utils.h> #undef max #undef min /// Dense Matrix class template<typename T> class Matrix; /// Sparse Matrix class template<typename T> class SpMatrix; /// Dense Vector class template<typename T> class Vector; /// Sparse Vector class template<typename T> class SpVector; typedef std::list< int > group; typedef std::list< group > list_groups; typedef std::vector< group > vector_groups; template <typename T> static inline bool isZero(const T lambda) { return static_cast<double>(abs<T>(lambda)) < 1e-99; } template <typename T> static inline bool isEqual(const T lambda1, const T lambda2) { return static_cast<double>(abs<T>(lambda1-lambda2)) < 1e-99; } template <typename T> static inline T softThrs(const T x, const T lambda) { if (x > lambda) { return x-lambda; } else if (x < -lambda) { return x+lambda; } else { return 0; } }; template <typename T> static inline T hardThrs(const T x, const T lambda) { return (x > lambda || x < -lambda) ? x : 0; }; template <typename T> static inline T alt_log(const T x); template <> inline double alt_log<double>(const double x) { return log(x); }; template <> inline float alt_log<float>(const float x) { return logf(x); }; template <typename T> static inline T xlogx(const T x) { if (x < -1e-20) { return INFINITY; } else if (x < 1e-20) { return 0; } else { return x*alt_log<T>(x); } } template <typename T> static inline T logexp(const T x) { if (x < -30) { return 0; } else if (x < 30) { return alt_log<T>( T(1.0) + exp_alt<T>( x ) ); } else { return x; } } /// Data class, abstract class, useful in the class image. template <typename T> class Data { public: virtual void getData(Vector<T>& data, const int i) const = 0; virtual void getGroup(Matrix<T>& data, const vector_groups& groups, const int i) const = 0; virtual inline T operator[](const int index) const = 0; virtual int n() const = 0; virtual int m() const = 0; virtual int V() const = 0; virtual void norm_2sq_cols(Vector<T>& norms) const { }; virtual ~Data() { }; }; /// Abstract matrix class template <typename T> class AbstractMatrixB { public: virtual int n() const = 0; virtual int m() const = 0; /// b <- alpha A'x + beta b virtual void multTrans(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const = 0; /// perform b = alpha*A*x + beta*b, when x is sparse virtual void mult(const SpVector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const = 0; virtual void mult(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const = 0; /// perform C = a*A*B + b*C, possibly transposing A or B. virtual void mult(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const = 0; virtual void mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const = 0; /// perform C = a*B*A + b*C, possibly transposing A or B. virtual void multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const = 0; /// XtX = A'*A virtual void XtX(Matrix<T>& XtX) const = 0; virtual void copyRow(const int i, Vector<T>& x) const = 0; virtual void copyTo(Matrix<T>& copy) const = 0; virtual T dot(const Matrix<T>& x) const = 0; virtual void print(const string& name) const = 0; virtual ~AbstractMatrixB() { }; }; /// Abstract matrix class template <typename T> class AbstractMatrix { public: virtual int n() const = 0; virtual int m() const = 0; /// copy X(:,i) into Xi virtual void copyCol(const int i, Vector<T>& Xi) const = 0; /// compute X(:,i)<- X(:,i)+a*col; virtual void add_rawCol(const int i, T* col, const T a) const = 0; /// copy X(:,i) into Xi virtual void extract_rawCol(const int i,T* Xi) const = 0; /// extract diagonal virtual void diag(Vector<T>& diag) const = 0; //// extract X(index1,index2) virtual inline T operator()(const int index1, const int index2) const = 0; virtual ~AbstractMatrix() { }; }; /// Class Matrix template<typename T> class Matrix : public Data<T>, public AbstractMatrix<T>, public AbstractMatrixB<T> { friend class SpMatrix<T>; public: /// Constructor with existing data X of an m x n matrix Matrix(T* X, int m, int n); /// Constructor for a new m x n matrix Matrix(int m, int n); /// Empty constructor Matrix(); /// Destructor virtual ~Matrix(); /// Accessors /// Number of rows inline int m() const { return _m; }; /// Number of columns inline int n() const { return _n; }; /// Return a modifiable reference to X(i,j) inline T& operator()(const int i, const int j); /// Return the value X(i,j) inline T operator()(const int i, const int j) const; /// Return a modifiable reference to X(i) (1D indexing) inline T& operator[](const int index) { return _X[index]; }; /// Return the value X(i) (1D indexing) inline T operator[](const int index) const { return _X[index]; }; /// Copy the column i into x inline void copyCol(const int i, Vector<T>& x) const; /// Copy the column i into x inline void copyRow(const int i, Vector<T>& x) const; /// Copy the column i into x inline void extract_rawCol(const int i, T* x) const; /// Copy the column i into x virtual void add_rawCol(const int i, T* DtXi, const T a) const; /// Copy the column i into x inline void getData(Vector<T>& data, const int i) const; /// extract the group i virtual void getGroup(Matrix<T>& data, const vector_groups& groups, const int i) const; /// Reference the column i into the vector x inline void refCol(int i, Vector<T>& x) const; /// Reference the column i to i+n into the Matrix mat inline void refSubMat(int i, int n, Matrix<T>& mat) const; /// extract a sub-matrix of a symmetric matrix inline void subMatrixSym(const Vector<int>& indices, Matrix<T>& subMatrix) const; /// reference a modifiable reference to the data, DANGEROUS inline T* rawX() const { return _X; }; /// return a non-modifiable reference to the data inline const T* X() const { return _X; }; /// make a copy of the matrix mat in the current matrix inline void copy(const Matrix<T>& mat); /// make a copy of the matrix mat in the current matrix inline void copyTo(Matrix<T>& mat) const { mat.copy(*this); }; /// make a copy of the matrix mat in the current matrix inline void copyRef(const Matrix<T>& mat); /// Debugging function /// Print the matrix to std::cout inline void print(const string& name) const; /// Modifiers /// clean a dictionary matrix inline void clean(); /// Resize the matrix inline void resize(int m, int n); /// Change the data in the matrix inline void setData(T* X, int m, int n); /// modify _m inline void setm(const int m) { _m = m; }; //DANGEROUS /// modify _n inline void setn(const int n) { _n = n; }; //DANGEROUS /// Set all the values to zero inline void setZeros(); /// Set all the values to a scalar inline void set(const T a); /// Clear the matrix inline void clear(); /// Put white Gaussian noise in the matrix inline void setAleat(); /// set the matrix to the identity; inline void eye(); /// Normalize all columns to unit l2 norm inline void normalize(); /// Normalize all columns which l2 norm is greater than one. inline void normalize2(); /// center the columns of the matrix inline void center(); /// center the columns of the matrix inline void center_rows(); /// center the columns of the matrix and keep the center values inline void center(Vector<T>& centers); /// scale the matrix by the a inline void scal(const T a); /// make the matrix symmetric by copying the upper-right part /// into the lower-left part inline void fillSymmetric(); inline void fillSymmetric2(); /// change artificially the size of the matrix, DANGEROUS inline void fakeSize(const int m, const int n) { _n = n; _m=m;}; /// whiten inline void whiten(const int V); /// whiten inline void whiten(Vector<T>& mean, const bool pattern = false); /// whiten inline void whiten(Vector<T>& mean, const Vector<T>& mask); /// whiten inline void unwhiten(Vector<T>& mean, const bool pattern = false); /// whiten inline void sum_cols(Vector<T>& sum) const; /// Analysis functions /// Check wether the columns of the matrix are normalized or not inline bool isNormalized() const; /// return the 1D-index of the value of greatest magnitude inline int fmax() const; /// return the 1D-index of the value of greatest magnitude inline T fmaxval() const; /// return the 1D-index of the value of lowest magnitude inline int fmin() const; // Algebric operations /// Transpose the current matrix and put the result in the matrix /// trans inline void transpose(Matrix<T>& trans); /// A <- -A inline void neg(); /// add one to the diagonal inline void incrDiag(); inline void addDiag(const Vector<T>& diag); inline void addDiag(const T diag); inline void addToCols(const Vector<T>& diag); inline void addVecToCols(const Vector<T>& diag, const T a = 1.0); /// perform a rank one approximation uv' using the power method /// u0 is an initial guess for u (can be empty). inline void svdRankOne(const Vector<T>& u0, Vector<T>& u, Vector<T>& v) const; inline void singularValues(Vector<T>& u) const; inline void svd(Matrix<T>& U, Vector<T>& S, Matrix<T>&V) const; /// find the eigenvector corresponding to the largest eigenvalue /// when the current matrix is symmetric. u0 is the initial guess. /// using two iterations of the power method inline void eigLargestSymApprox(const Vector<T>& u0, Vector<T>& u) const; /// find the eigenvector corresponding to the eivenvalue with the /// largest magnitude when the current matrix is symmetric, /// using the power method. It /// returns the eigenvalue. u0 is an initial guess for the /// eigenvector. inline T eigLargestMagnSym(const Vector<T>& u0, Vector<T>& u) const; /// returns the value of the eigenvalue with the largest magnitude /// using the power iteration. inline T eigLargestMagnSym() const; /// inverse the matrix when it is symmetric inline void invSym(); /// perform b = alpha*A'x + beta*b inline void multTrans(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A'x + beta*b inline void multTrans(const Vector<T>& x, Vector<T>& b, const Vector<bool>& active) const; /// perform b = A'x, when x is sparse inline void multTrans(const SpVector<T>& x, Vector<T>& b, const T alpha =1.0, const T beta = 0.0) const; /// perform b = alpha*A*x+beta*b inline void mult(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A*x + beta*b, when x is sparse inline void mult(const SpVector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform C = a*A*B + b*C, possibly transposing A or B. inline void mult(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. inline void multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = A*B, when B is sparse inline void mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// mult by a diagonal matrix on the left inline void multDiagLeft(const Vector<T>& diag); /// mult by a diagonal matrix on the right inline void multDiagRight(const Vector<T>& diag); /// C = A .* B, elementwise multiplication inline void mult_elementWise(const Matrix<T>& B, Matrix<T>& C) const; inline void div_elementWise(const Matrix<T>& B, Matrix<T>& C) const; /// XtX = A'*A inline void XtX(Matrix<T>& XtX) const; /// XXt = A*A' inline void XXt(Matrix<T>& XXt) const; /// XXt = A*A' where A is an upper triangular matrix inline void upperTriXXt(Matrix<T>& XXt, const int L) const; /// extract the diagonal inline void diag(Vector<T>& d) const; /// set the diagonal inline void setDiag(const Vector<T>& d); /// set the diagonal inline void setDiag(const T val); /// each element of the matrix is replaced by its exponential inline void exp(); /// each element of the matrix is replaced by its square root inline void Sqrt(); inline void Invsqrt(); /// return vec1'*A*vec2, where vec2 is sparse inline T quad(const Vector<T>& vec1, const SpVector<T>& vec2) const; /// return vec1'*A*vec2, where vec2 is sparse inline void quad_mult(const Vector<T>& vec1, const SpVector<T>& vec2, Vector<T>& y, const T a = 1.0, const T b = 0.0) const; /// return vec'*A*vec when vec is sparse inline T quad(const SpVector<T>& vec) const; /// add alpha*mat to the current matrix inline void add(const Matrix<T>& mat, const T alpha = 1.0); /// add alpha to the current matrix inline void add(const T alpha); /// add alpha*mat to the current matrix inline T dot(const Matrix<T>& mat) const; /// substract the matrix mat to the current matrix inline void sub(const Matrix<T>& mat); /// inverse the elements of the matrix inline void inv_elem(); /// inverse the elements of the matrix inline void inv() { this->inv_elem(); }; /// return the trace of the matrix inline T trace() const; /// compute the sum of the magnitude of the matrix values inline T asum() const; /// return ||A||_F inline T normF() const; /// whiten inline T mean() const; /// return ||A||_F^2 inline T normFsq() const; /// return ||A||_F^2 inline T nrm2sq() const { return this->normFsq(); }; /// return ||At||_{inf,2} (max of l2 norm of the columns) inline T norm_inf_2_col() const; /// return ||At||_{1,2} (max of l2 norm of the columns) inline T norm_1_2_col() const; /// returns the l2 norms of the columns inline void norm_2_cols(Vector<T>& norms) const; /// returns the l2 norms of the columns inline void norm_2_rows(Vector<T>& norms) const; /// returns the linf norms of the columns inline void norm_inf_cols(Vector<T>& norms) const; /// returns the linf norms of the columns inline void norm_inf_rows(Vector<T>& norms) const; /// returns the linf norms of the columns inline void norm_l1_rows(Vector<T>& norms) const; /// returns the l2 norms ^2 of the columns inline void norm_2sq_cols(Vector<T>& norms) const; /// returns the l2 norms of the columns inline void norm_2sq_rows(Vector<T>& norms) const; inline void thrsmax(const T nu); inline void thrsmin(const T nu); inline void thrsabsmin(const T nu); /// perform soft-thresholding of the matrix, with the threshold nu inline void softThrshold(const T nu); inline void hardThrshold(const T nu); /// perform soft-thresholding of the matrix, with the threshold nu inline void thrsPos(); /// perform A <- A + alpha*vec1*vec2' inline void rank1Update(const Vector<T>& vec1, const Vector<T>& vec2, const T alpha = 1.0); /// perform A <- A + alpha*vec1*vec2', when vec1 is sparse inline void rank1Update(const SpVector<T>& vec1, const Vector<T>& vec2, const T alpha = 1.0); /// perform A <- A + alpha*vec1*vec2', when vec2 is sparse inline void rank1Update(const Vector<T>& vec1, const SpVector<T>& vec2, const T alpha = 1.0); inline void rank1Update_mult(const Vector<T>& vec1, const Vector<T>& vec1b, const SpVector<T>& vec2, const T alpha = 1.0); /// perform A <- A + alpha*vec*vec', when vec2 is sparse inline void rank1Update(const SpVector<T>& vec, const T alpha = 1.0); /// perform A <- A + alpha*vec*vec', when vec2 is sparse inline void rank1Update(const SpVector<T>& vec, const SpVector<T>& vec2, const T alpha = 1.0); /// Compute the mean of the columns inline void meanCol(Vector<T>& mean) const; /// Compute the mean of the rows inline void meanRow(Vector<T>& mean) const; /// fill the matrix with the row given inline void fillRow(const Vector<T>& row); /// fill the matrix with the row given inline void extractRow(const int i, Vector<T>& row) const; inline void setRow(const int i, const Vector<T>& row); inline void addRow(const int i, const Vector<T>& row, const T a=1.0); /// compute x, such that b = Ax, WARNING this function needs to be u /// updated inline void conjugateGradient(const Vector<T>& b, Vector<T>& x, const T tol = 1e-4, const int = 4) const; /// compute x, such that b = Ax, WARNING this function needs to be u /// updated, the temporary vectors are given. inline void drop(char* fileName) const; /// compute a Nadaraya Watson estimator inline void NadarayaWatson(const Vector<int>& ind, const T sigma); /// performs soft-thresholding of the vector inline void blockThrshold(const T nu, const int sizeGroup); /// performs sparse projections of the columns inline void sparseProject(Matrix<T>& out, const T thrs, const int mode = 1, const T lambda1 = 0, const T lambda2 = 0, const T lambda3 = 0, const bool pos = false, const int numThreads=-1); inline void transformFilter(); /// Conversion /// make a sparse copy of the current matrix inline void toSparse(SpMatrix<T>& matrix) const; /// make a sparse copy of the current matrix inline void toSparseTrans(SpMatrix<T>& matrixTrans); /// make a reference of the matrix to a vector vec inline void toVect(Vector<T>& vec) const; /// Accessor inline int V() const { return 1;}; /// merge two dictionaries inline void merge(const Matrix<T>& B, Matrix<T>& C) const; /// extract the rows of a matrix corresponding to a binary mask inline void copyMask(Matrix<T>& out, Vector<bool>& mask) const; protected: /// Forbid lazy copies explicit Matrix<T>(const Matrix<T>& matrix); /// Forbid lazy copies Matrix<T>& operator=(const Matrix<T>& matrix); /// is the data allocation external or not bool _externAlloc; /// pointer to the data T* _X; /// number of rows int _m; /// number of columns int _n; }; /// Class for dense vector template<typename T> class Vector { friend class SpMatrix<T>; friend class Matrix<T>; friend class SpVector<T>; public: /// Empty constructor Vector(); /// Constructor. Create a new vector of size n Vector(int n); /// Constructor with existing data Vector(T* X, int n); /// Copy constructor explicit Vector<T>(const Vector<T>& vec); /// Destructor virtual ~Vector(); /// Accessors /// Print the vector to std::cout inline void print(const char* name) const; /// returns the index of the largest value inline int max() const; /// returns the index of the minimum value inline int min() const; /// returns the maximum value inline T maxval() const; /// returns the minimum value inline T minval() const; /// returns the index of the value with largest magnitude inline int fmax() const; /// returns the index of the value with smallest magnitude inline int fmin() const; /// returns the maximum magnitude inline T fmaxval() const; /// returns the minimum magnitude inline T fminval() const; /// returns a reference to X[index] inline T& operator[](const int index); /// returns X[index] inline T operator[](const int index) const; /// make a copy of x inline void copy(const Vector<T>& x); /// returns the size of the vector inline int n() const { return _n; }; /// returns a modifiable reference of the data, DANGEROUS inline T* rawX() const { return _X; }; /// change artificially the size of the vector, DANGEROUS inline void fakeSize(const int n) { _n = n; }; /// generate logarithmically spaced values inline void logspace(const int n, const T a, const T b); inline int nnz() const; /// Modifiers /// Set all values to zero inline void setZeros(); /// resize the vector inline void resize(const int n); /// change the data of the vector inline void setPointer(T* X, const int n); inline void setData(T* X, const int n) { this->setPointer(X,n); }; /// put a random permutation of size n (for integral vectors) inline void randperm(int n); /// put random values in the vector (White Gaussian Noise) inline void setAleat(); /// clear the vector inline void clear(); /// performs soft-thresholding of the vector inline void softThrshold(const T nu); inline void hardThrshold(const T nu); /// performs soft-thresholding of the vector inline void thrsmax(const T nu); inline void thrsmin(const T nu); inline void thrsabsmin(const T nu); /// performs soft-thresholding of the vector inline void thrshold(const T nu); /// performs soft-thresholding of the vector inline void thrsPos(); /// set each value of the vector to val inline void set(const T val); inline void setn(const int n) { _n = n; }; //DANGEROUS inline bool alltrue() const; inline bool allfalse() const; /// Algebric operations /// returns ||A||_2 inline T nrm2() const; /// returns ||A||_2^2 inline T nrm2sq() const; /// returns A'x inline T dot(const Vector<T>& x) const; /// returns A'x, when x is sparse inline T dot(const SpVector<T>& x) const; /// A <- A + a*x inline void add(const Vector<T>& x, const T a = 1.0); /// A <- A + a*x inline void add(const SpVector<T>& x, const T a = 1.0); /// adds a to each value in the vector inline void add(const T a); /// A <- A - x inline void sub(const Vector<T>& x); /// A <- A + a*x inline void sub(const SpVector<T>& x); /// A <- A ./ x inline void div(const Vector<T>& x); /// A <- x ./ y inline void div(const Vector<T>& x, const Vector<T>& y); /// A <- x .^ 2 inline void sqr(const Vector<T>& x); /// A <- 1 ./ sqrt(x) inline void Sqrt(const Vector<T>& x); /// A <- 1 ./ sqrt(x) inline void Sqrt(); /// A <- 1 ./ sqrt(x) inline void Invsqrt(const Vector<T>& x); /// A <- 1 ./ sqrt(A) inline void Invsqrt(); /// A <- 1./x inline void inv(const Vector<T>& x); /// A <- 1./A inline void inv(); /// A <- x .* y inline void mult(const Vector<T>& x, const Vector<T>& y); inline void mult_elementWise(const Vector<T>& B, Vector<T>& C) const { C.mult(*this,B); }; /// normalize the vector inline void normalize(); /// normalize the vector inline void normalize2(); /// whiten inline void whiten(Vector<T>& mean, const bool pattern = false); /// whiten inline void whiten(Vector<T>& mean, const Vector<T>& mask); /// whiten inline void whiten(const int V); /// whiten inline T mean(); /// whiten inline T std(); /// compute the Kuhlback-Leiber divergence inline T KL(const Vector<T>& X); /// whiten inline void unwhiten(Vector<T>& mean, const bool pattern = false); /// scale the vector by a inline void scal(const T a); /// A <- -A inline void neg(); /// replace each value by its exponential inline void exp(); /// replace each value by its logarithm inline void log(); /// replace each value by its exponential inline void logexp(); /// replace each value by its exponential inline T softmax(const int y); /// computes the sum of the magnitudes of the vector inline T asum() const; inline T lzero() const; /// compute the sum of the differences inline T afused() const; /// returns the sum of the vector inline T sum() const; /// puts in signs, the sign of each point in the vector inline void sign(Vector<T>& signs) const; /// projects the vector onto the l1 ball of radius thrs, /// returns true if the returned vector is null inline void l1project(Vector<T>& out, const T thrs, const bool simplex = false) const; inline void l1project_weighted(Vector<T>& out, const Vector<T>& weights, const T thrs, const bool residual = false) const; inline void l1l2projectb(Vector<T>& out, const T thrs, const T gamma, const bool pos = false, const int mode = 1); inline void sparseProject(Vector<T>& out, const T thrs, const int mode = 1, const T lambda1 = 0, const T lambda2 = 0, const T lambda3 = 0, const bool pos = false); inline void project_sft(const Vector<int>& labels, const int clas); inline void project_sft_binary(const Vector<T>& labels); /// projects the vector onto the l1 ball of radius thrs, /// projects the vector onto the l1 ball of radius thrs, /// returns true if the returned vector is null inline void l1l2project(Vector<T>& out, const T thrs, const T gamma, const bool pos = false) const; inline void fusedProject(Vector<T>& out, const T lambda1, const T lambda2, const int itermax); inline void fusedProjectHomotopy(Vector<T>& out, const T lambda1,const T lambda2,const T lambda3 = 0, const bool penalty = true); /// projects the vector onto the l1 ball of radius thrs, /// sort the vector inline void sort(Vector<T>& out, const bool mode) const; /// sort the vector inline void sort(const bool mode); //// sort the vector inline void sort2(Vector<T>& out, Vector<int>& key, const bool mode) const; /// sort the vector inline void sort2(Vector<int>& key, const bool mode); /// sort the vector inline void applyBayerPattern(const int offset); /// Conversion /// make a sparse copy inline void toSparse(SpVector<T>& vec) const; /// extract the rows of a matrix corresponding to a binary mask inline void copyMask(Vector<T>& out, Vector<bool>& mask) const; private: /// = operator, Vector<T>& operator=(const Vector<T>& vec); /// if the data has been externally allocated bool _externAlloc; /// data T* _X; /// size of the vector int _n; }; /// Sparse Matrix class, CSC format template<typename T> class SpMatrix : public Data<T>, public AbstractMatrixB<T> { friend class Matrix<T>; friend class SpVector<T>; public: /// Constructor, CSC format, existing data SpMatrix(T* v, int* r, int* pB, int* pE, int m, int n, int nzmax); /// Constructor, new m x n matrix, with at most nzmax non-zeros values SpMatrix(int m, int n, int nzmax); /// Empty constructor SpMatrix(); /// Destructor ~SpMatrix(); /// Accessors /// reference the column i into vec inline void refCol(int i, SpVector<T>& vec) const; /// returns pB[i] inline int pB(const int i) const { return _pB[i]; }; /// returns r[i] inline int r(const int i) const { return _r[i]; }; /// returns v[i] inline T v(const int i) const { return _v[i]; }; /// returns the maximum number of non-zero elements inline int nzmax() const { return _nzmax; }; /// returns the number of rows inline int n() const { return _n; }; /// returns the number of columns inline int m() const { return _m; }; /// returns the number of columns inline int V() const { return 1; }; /// returns X[index] inline T operator[](const int index) const; void getData(Vector<T>& data, const int index) const; void getGroup(Matrix<T>& data, const vector_groups& groups, const int i) const ; /// print the sparse matrix inline void print(const string& name) const; /// compute the sum of the matrix elements inline T asum() const; /// compute the sum of the matrix elements inline T normFsq() const; /// Direct access to _pB inline int* pB() const { return _pB; }; /// Direct access to _pE inline int* pE() const { return _pE; }; /// Direct access to _r inline int* r() const { return _r; }; /// Direct access to _v inline T* v() const { return _v; }; /// number of nonzeros elements inline int nnz() const { return _pB[_n]; }; inline void add_direct(const SpMatrix<T>& mat, const T a); inline void copy_direct(const SpMatrix<T>& mat); inline T dot_direct(const SpMatrix<T>& mat) const; /// Modifiers /// clear the matrix inline void clear(); /// resize the matrix inline void resize(const int m, const int n, const int nzmax); /// scale the matrix by a inline void scal(const T a) const; /// Algebraic operations /// aat <- A*A' inline void AAt(Matrix<T>& aat) const; /// aat <- A(:,indices)*A(:,indices)' inline void AAt(Matrix<T>& aat, const Vector<int>& indices) const; /// aat <- sum_i w_i A(:,i)*A(:,i)' inline void wAAt(const Vector<T>& w, Matrix<T>& aat) const; /// XAt <- X*A' inline void XAt(const Matrix<T>& X, Matrix<T>& XAt) const; /// XAt <- X(:,indices)*A(:,indices)' inline void XAt(const Matrix<T>& X, Matrix<T>& XAt, const Vector<int>& indices) const; /// XAt <- sum_i w_i X(:,i)*A(:,i)' inline void wXAt( const Vector<T>& w, const Matrix<T>& X, Matrix<T>& XAt, const int numthreads=-1) const; inline void XtX(Matrix<T>& XtX) const; /// y <- A'*x inline void multTrans(const Vector<T>& x, Vector<T>& y, const T alpha = 1.0, const T beta = 0.0) const; inline void multTrans(const SpVector<T>& x, Vector<T>& y, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A*x + beta*b, when x is sparse inline void mult(const SpVector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A*x + beta*b, when x is sparse inline void mult(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform C = a*A*B + b*C, possibly transposing A or B. inline void mult(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. inline void multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. inline void mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// make a copy of the matrix mat in the current matrix inline void copyTo(Matrix<T>& mat) const { this->toFull(mat); }; /// dot product; inline T dot(const Matrix<T>& x) const; inline void copyRow(const int i, Vector<T>& x) const; inline void sum_cols(Vector<T>& sum) const; inline void copy(const SpMatrix<T>& mat); /// Conversions /// copy the sparse matrix into a dense matrix inline void toFull(Matrix<T>& matrix) const; /// copy the sparse matrix into a dense transposed matrix inline void toFullTrans(Matrix<T>& matrix) const; /// use the data from v, r for _v, _r inline void convert(const Matrix<T>&v, const Matrix<int>& r, const int K); /// use the data from v, r for _v, _r inline void convert2(const Matrix<T>&v, const Vector<int>& r, const int K); /// returns the l2 norms ^2 of the columns inline void norm_2sq_cols(Vector<T>& norms) const; /// returns the l0 norms of the columns inline void norm_0_cols(Vector<T>& norms) const; /// returns the l1 norms of the columns inline void norm_1_cols(Vector<T>& norms) const; inline void addVecToCols(const Vector<T>& diag, const T a = 1.0); inline void addVecToColsWeighted(const Vector<T>& diag, const T* weights, const T a = 1.0); private: /// forbid copy constructor explicit SpMatrix(const SpMatrix<T>& matrix); SpMatrix<T>& operator=(const SpMatrix<T>& matrix); /// if the data has been externally allocated bool _externAlloc; /// data T* _v; /// row indices int* _r; /// indices of the beginning of columns int* _pB; /// indices of the end of columns int* _pE; /// number of rows int _m; /// number of columns int _n; /// number of non-zero values int _nzmax; }; /// Sparse vector class template <typename T> class SpVector { friend class Matrix<T>; friend class SpMatrix<T>; friend class Vector<T>; public: /// Constructor, of the sparse vector of size L. SpVector(T* v, int* r, int L, int nzmax); /// Constructor, allocates nzmax slots SpVector(int nzmax); /// Empty constructor SpVector(); /// Destructor ~SpVector(); /// Accessors /// returns the length of the vector inline T nzmax() const { return _nzmax; }; /// returns the length of the vector inline T length() const { return _L; }; /// computes the sum of the magnitude of the elements inline T asum() const; /// computes the l2 norm ^2 of the vector inline T nrm2sq() const; /// computes the l2 norm of the vector inline T nrm2() const; /// computes the linf norm of the vector inline T fmaxval() const; /// print the vector to std::cerr inline void print(const string& name) const; /// create a reference on the vector r inline void refIndices(Vector<int>& indices) const; /// creates a reference on the vector val inline void refVal(Vector<T>& val) const; /// access table r inline int r(const int i) const { return _r[i]; }; /// access table r inline T v(const int i) const { return _v[i]; }; inline T* rawX() const { return _v; }; /// inline int L() const { return _L; }; /// inline void setL(const int L) { _L=L; }; /// a <- a.^2 inline void sqr(); /// dot product inline T dot(const SpVector<T>& vec) const; /// Modifiers /// clears the vector inline void clear(); /// resizes the vector inline void resize(const int nzmax); /// resize the vector as a sparse matrix void inline toSpMatrix(SpMatrix<T>& out, const int m, const int n) const; /// resize the vector as a sparse matrix void inline toFull(Vector<T>& out) const; private: /// forbids lazy copies explicit SpVector(const SpVector<T>& vector); SpVector<T>& operator=(const SpVector<T>& vector); /// external allocation bool _externAlloc; /// data T* _v; /// indices int* _r; /// length int _L; /// maximum number of nonzeros elements int _nzmax; }; /// Class representing the product of two matrices template<typename T> class ProdMatrix : public AbstractMatrix<T> { public: ProdMatrix(); /// Constructor. Matrix D'*D is represented ProdMatrix(const Matrix<T>& D, const bool high_memory = true); /// Constructor. Matrix D'*X is represented ProdMatrix(const Matrix<T>& D, const Matrix<T>& X, const bool high_memory = true); /// Constructor, D'*X is represented, with optional transpositions /*ProdMatrix(const SpMatrix<T>& D, const Matrix<T>& X, const bool transD = false, const bool transX = false);*/ /// Destructor ~ProdMatrix() { delete(_DtX);} ; /// set_matrices inline void setMatrices(const Matrix<T>& D, const bool high_memory=true); inline void setMatrices(const Matrix<T>& D, const Matrix<T>& X, const bool high_memory=true); /// compute DtX(:,i) inline void copyCol(const int i, Vector<T>& DtXi) const; /// compute DtX(:,i) inline void extract_rawCol(const int i,T* DtXi) const; /// compute DtX(:,i) virtual void add_rawCol(const int i, T* DtXi, const T a) const; /// add something to the diagonal void inline addDiag(const T diag); /// add something to the diagonal void inline diag(Vector<T>& diag) const; /// returns the number of columns inline int n() const { return _n;}; /// returns the number of rows inline int m() const { return _m;}; /// returns the value of an index inline T operator()(const int index1, const int index2) const; /// returns the value of an index inline T operator[](const int index) const; private: /// Depending on the mode, DtX is a matrix, or two matrices Matrix<T>* _DtX; const Matrix<T>* _X; const Matrix<T>* _D; bool _high_memory; int _n; int _m; T _addDiag; }; /* ************************************ * Implementation of the class Matrix * ************************************/ /// Constructor with existing data X of an m x n matrix template <typename T> Matrix<T>::Matrix(T* X, int m, int n) : _externAlloc(true), _X(X), _m(m), _n(n) { }; /// Constructor for a new m x n matrix template <typename T> Matrix<T>::Matrix(int m, int n) : _externAlloc(false), _m(m), _n(n) { #pragma omp critical { _X= new T[_n*_m]; } }; /// Empty constructor template <typename T> Matrix<T>::Matrix() : _externAlloc(false), _X(NULL), _m(0), _n(0) { }; /// Destructor template <typename T> Matrix<T>::~Matrix() { clear(); }; /// Return a modifiable reference to X(i,j) template <typename T> inline T& Matrix<T>::operator()(const int i, const int j) { return _X[j*_m+i]; }; /// Return the value X(i,j) template <typename T> inline T Matrix<T>::operator()(const int i, const int j) const { return _X[j*_m+i]; }; /// Print the matrix to std::cout template <typename T> inline void Matrix<T>::print(const string& name) const { std::cerr << name << std::endl; std::cerr << _m << " x " << _n << std::endl; for (int i = 0; i<_m; ++i) { for (int j = 0; j<_n; ++j) { printf("%10.5g ",static_cast<double>(_X[j*_m+i])); // std::cerr << _X[j*_m+i] << " "; } printf("\n "); //std::cerr << std::endl; } printf("\n "); }; /// Copy the column i into x template <typename T> inline void Matrix<T>::copyCol(const int i, Vector<T>& x) const { assert(i >= 0 && i<_n); x.resize(_m); cblas_copy<T>(_m,_X+i*_m,1,x._X,1); }; /// Copy the column i into x template <typename T> inline void Matrix<T>::copyRow(const int i, Vector<T>& x) const { assert(i >= 0 && i<_m); x.resize(_n); cblas_copy<T>(_n,_X+i,_m,x._X,1); }; /// Copy the column i into x template <typename T> inline void Matrix<T>::extract_rawCol(const int i, T* x) const { assert(i >= 0 && i<_n); cblas_copy<T>(_m,_X+i*_m,1,x,1); }; /// Copy the column i into x template <typename T> inline void Matrix<T>::add_rawCol(const int i, T* x, const T a) const { assert(i >= 0 && i<_n); cblas_axpy<T>(_m,a,_X+i*_m,1,x,1); }; /// Copy the column i into x template <typename T> inline void Matrix<T>::getData(Vector<T>& x, const int i) const { this->copyCol(i,x); }; template <typename T> inline void Matrix<T>::getGroup(Matrix<T>& data, const vector_groups& groups, const int i) const { const group& gr = groups[i]; const int N = gr.size(); data.resize(_m,N); int count=0; for (group::const_iterator it = gr.begin(); it != gr.end(); ++it) { cblas_copy<T>(_m,_X+(*it)*_m,1,data._X+count*_m,1); ++count; } }; /// Reference the column i into the vector x template <typename T> inline void Matrix<T>::refCol(int i, Vector<T>& x) const { assert(i >= 0 && i<_n); x.clear(); x._X=_X+i*_m; x._n=_m; x._externAlloc=true; }; /// Reference the column i to i+n into the Matrix mat template <typename T> inline void Matrix<T>::refSubMat(int i, int n, Matrix<T>& mat) const { mat.setData(_X+i*_m,_m,n); } /// Check wether the columns of the matrix are normalized or not template <typename T> inline bool Matrix<T>::isNormalized() const { for (int i = 0; i<_n; ++i) { T norm=cblas_nrm2<T>(_m,_X+_m*i,1); if (fabs(norm - 1.0) > 1e-6) return false; } return true; }; /// clean a dictionary matrix template <typename T> inline void Matrix<T>::clean() { this->normalize(); Matrix<T> G; this->XtX(G); T* prG = G._X; /// remove the diagonal for (int i = 0; i<_n; ++i) { for (int j = i+1; j<_n; ++j) { if (prG[i*_n+j] > 0.99) { // remove nasty column j and put random values inside Vector<T> col; this->refCol(j,col); col.setAleat(); col.normalize(); } } } }; /// return the 1D-index of the value of greatest magnitude template <typename T> inline int Matrix<T>::fmax() const { return cblas_iamax<T>(_n*_m,_X,1); }; /// return the value of greatest magnitude template <typename T> inline T Matrix<T>::fmaxval() const { return _X[cblas_iamax<T>(_n*_m,_X,1)]; }; /// return the 1D-index of the value of lowest magnitude template <typename T> inline int Matrix<T>::fmin() const { return cblas_iamin<T>(_n*_m,_X,1); }; /// extract a sub-matrix of a symmetric matrix template <typename T> inline void Matrix<T>::subMatrixSym( const Vector<int>& indices, Matrix<T>& subMatrix) const { int L = indices.n(); subMatrix.resize(L,L); T* out = subMatrix._X; int* rawInd = indices.rawX(); for (int i = 0; i<L; ++i) for (int j = 0; j<=i; ++j) out[i*L+j]=_X[rawInd[i]*_n+rawInd[j]]; subMatrix.fillSymmetric(); }; /// Resize the matrix template <typename T> inline void Matrix<T>::resize(int m, int n) { if (_n==n && _m==m) return; clear(); _n=n; _m=m; _externAlloc=false; #pragma omp critical { _X=new T[_n*_m]; } setZeros(); }; /// Change the data in the matrix template <typename T> inline void Matrix<T>::setData(T* X, int m, int n) { clear(); _X=X; _m=m; _n=n; _externAlloc=true; }; /// Set all the values to zero template <typename T> inline void Matrix<T>::setZeros() { memset(_X,0,_n*_m*sizeof(T)); }; /// Set all the values to a scalar template <typename T> inline void Matrix<T>::set(const T a) { for (int i = 0; i<_n*_m; ++i) _X[i]=a; }; /// Clear the matrix template <typename T> inline void Matrix<T>::clear() { if (!_externAlloc) delete[](_X); _n=0; _m=0; _X=NULL; _externAlloc=true; }; /// Put white Gaussian noise in the matrix template <typename T> inline void Matrix<T>::setAleat() { for (int i = 0; i<_n*_m; ++i) _X[i]=normalDistrib<T>(); }; /// set the matrix to the identity template <typename T> inline void Matrix<T>::eye() { this->setZeros(); for (int i = 0; i<MIN(_n,_m); ++i) _X[i*_m+i] = T(1.0); }; /// Normalize all columns to unit l2 norm template <typename T> inline void Matrix<T>::normalize() { //T constant = 1.0/sqrt(_m); for (int i = 0; i<_n; ++i) { T norm=cblas_nrm2<T>(_m,_X+_m*i,1); if (norm > 1e-10) { T invNorm=1.0/norm; cblas_scal<T>(_m,invNorm,_X+_m*i,1); } else { // for (int j = 0; j<_m; ++j) _X[_m*i+j]=constant; Vector<T> d; this->refCol(i,d); d.setAleat(); d.normalize(); } } }; /// Normalize all columns which l2 norm is greater than one. template <typename T> inline void Matrix<T>::normalize2() { for (int i = 0; i<_n; ++i) { T norm=cblas_nrm2<T>(_m,_X+_m*i,1); if (norm > 1.0) { T invNorm=1.0/norm; cblas_scal<T>(_m,invNorm,_X+_m*i,1); } } }; /// center the matrix template <typename T> inline void Matrix<T>::center() { for (int i = 0; i<_n; ++i) { Vector<T> col; this->refCol(i,col); T sum = col.sum(); col.add(-sum/static_cast<T>(_m)); } }; /// center the matrix template <typename T> inline void Matrix<T>::center_rows() { Vector<T> mean_rows(_m); mean_rows.setZeros(); for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) mean_rows[j] += _X[i*_m+j]; mean_rows.scal(T(1.0)/_n); for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) _X[i*_m+j] -= mean_rows[j]; }; /// center the matrix and keep the center values template <typename T> inline void Matrix<T>::center(Vector<T>& centers) { centers.resize(_n); for (int i = 0; i<_n; ++i) { Vector<T> col; this->refCol(i,col); T sum = col.sum()/static_cast<T>(_m); centers[i]=sum; col.add(-sum); } }; /// scale the matrix by the a template <typename T> inline void Matrix<T>::scal(const T a) { cblas_scal<T>(_n*_m,a,_X,1); }; /// make a copy of the matrix mat in the current matrix template <typename T> inline void Matrix<T>::copy(const Matrix<T>& mat) { resize(mat._m,mat._n); cblas_copy<T>(_m*_n,mat._X,1,_X,1); }; /// make a copy of the matrix mat in the current matrix template <typename T> inline void Matrix<T>::copyRef(const Matrix<T>& mat) { this->setData(mat.rawX(),mat.m(),mat.n()); }; /// make the matrix symmetric by copying the upper-right part /// into the lower-left part template <typename T> inline void Matrix<T>::fillSymmetric() { for (int i = 0; i<_n; ++i) { for (int j =0; j<i; ++j) { _X[j*_m+i]=_X[i*_m+j]; } } }; template <typename T> inline void Matrix<T>::fillSymmetric2() { for (int i = 0; i<_n; ++i) { for (int j =0; j<i; ++j) { _X[i*_m+j]=_X[j*_m+i]; } } }; template <typename T> inline void Matrix<T>::whiten(const int V) { const int sizePatch=_m/V; for (int i = 0; i<_n; ++i) { for (int j = 0; j<V; ++j) { T mean = 0; for (int k = 0; k<sizePatch; ++k) { mean+=_X[i*_m+sizePatch*j+k]; } mean /= sizePatch; for (int k = 0; k<sizePatch; ++k) { _X[i*_m+sizePatch*j+k]-=mean; } } } }; template <typename T> inline void Matrix<T>::whiten(Vector<T>& mean, const bool pattern) { mean.setZeros(); if (pattern) { const int n =static_cast<int>(sqrt(static_cast<T>(_m))); int count[4]; for (int i = 0; i<4; ++i) count[i]=0; for (int i = 0; i<_n; ++i) { int offsetx=0; for (int j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; int offsety=0; for (int k = 0; k<n; ++k) { offsety= (offsety+1) % 2; mean[2*offsetx+offsety]+=_X[i*_m+j*n+k]; count[2*offsetx+offsety]++; } } } for (int i = 0; i<4; ++i) mean[i] /= count[i]; for (int i = 0; i<_n; ++i) { int offsetx=0; for (int j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; int offsety=0; for (int k = 0; k<n; ++k) { offsety= (offsety+1) % 2; _X[i*_m+j*n+k]-=mean[2*offsetx+offsety]; } } } } else { const int V = mean.n(); const int sizePatch=_m/V; for (int i = 0; i<_n; ++i) { for (int j = 0; j<V; ++j) { for (int k = 0; k<sizePatch; ++k) { mean[j]+=_X[i*_m+sizePatch*j+k]; } } } mean.scal(T(1.0)/(_n*sizePatch)); for (int i = 0; i<_n; ++i) { for (int j = 0; j<V; ++j) { for (int k = 0; k<sizePatch; ++k) { _X[i*_m+sizePatch*j+k]-=mean[j]; } } } } }; template <typename T> inline void Matrix<T>::whiten(Vector<T>& mean, const Vector<T>& mask) { const int V = mean.n(); const int sizePatch=_m/V; mean.setZeros(); for (int i = 0; i<_n; ++i) { for (int j = 0; j<V; ++j) { for (int k = 0; k<sizePatch; ++k) { mean[j]+=_X[i*_m+sizePatch*j+k]; } } } for (int i = 0; i<V; ++i) mean[i] /= _n*cblas_asum(sizePatch,mask._X+i*sizePatch,1); for (int i = 0; i<_n; ++i) { for (int j = 0; j<V; ++j) { for (int k = 0; k<sizePatch; ++k) { if (mask[sizePatch*j+k]) _X[i*_m+sizePatch*j+k]-=mean[j]; } } } }; template <typename T> inline void Matrix<T>::unwhiten(Vector<T>& mean, const bool pattern) { if (pattern) { const int n =static_cast<int>(sqrt(static_cast<T>(_m))); for (int i = 0; i<_n; ++i) { int offsetx=0; for (int j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; int offsety=0; for (int k = 0; k<n; ++k) { offsety= (offsety+1) % 2; _X[i*_m+j*n+k]+=mean[2*offsetx+offsety]; } } } } else { const int V = mean.n(); const int sizePatch=_m/V; for (int i = 0; i<_n; ++i) { for (int j = 0; j<V; ++j) { for (int k = 0; k<sizePatch; ++k) { _X[i*_m+sizePatch*j+k]+=mean[j]; } } } } }; /// Transpose the current matrix and put the result in the matrix /// trans template <typename T> inline void Matrix<T>::transpose(Matrix<T>& trans) { trans.resize(_n,_m); T* out = trans._X; for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) out[j*_n+i] = _X[i*_m+j]; }; /// A <- -A template <typename T> inline void Matrix<T>::neg() { for (int i = 0; i<_n*_m; ++i) _X[i]=-_X[i]; }; template <typename T> inline void Matrix<T>::incrDiag() { for (int i = 0; i<MIN(_n,_m); ++i) ++_X[i*_m+i]; }; template <typename T> inline void Matrix<T>::addDiag( const Vector<T>& diag) { T* d= diag.rawX(); for (int i = 0; i<MIN(_n,_m); ++i) _X[i*_m+i] += d[i]; }; template <typename T> inline void Matrix<T>::addDiag( const T diag) { for (int i = 0; i<MIN(_n,_m); ++i) _X[i*_m+i] += diag; }; template <typename T> inline void Matrix<T>::addToCols( const Vector<T>& cent) { Vector<T> col; for (int i = 0; i<_n; ++i) { this->refCol(i,col); col.add(cent[i]); } }; template <typename T> inline void Matrix<T>::addVecToCols( const Vector<T>& vec, const T a) { Vector<T> col; for (int i = 0; i<_n; ++i) { this->refCol(i,col); col.add(vec,a); } }; /// perform a rank one approximation uv' using the power method /// u0 is an initial guess for u (can be empty). template <typename T> inline void Matrix<T>::svdRankOne(const Vector<T>& u0, Vector<T>& u, Vector<T>& v) const { int i; const int max_iter=MAX(_m,MAX(_n,200)); const T eps=1e-10; u.resize(_m); v.resize(_n); T norm=u0.nrm2(); Vector<T> up(u0); if (norm < EPSILON) up.setAleat(); up.normalize(); multTrans(up,v); for (i = 0; i<max_iter; ++i) { mult(v,u); norm=u.nrm2(); u.scal(1.0/norm); multTrans(u,v); T theta=u.dot(up); if (i > 10 && (1 - fabs(theta)) < eps) break; up.copy(u); } }; template <typename T> inline void Matrix<T>::singularValues(Vector<T>& u) const { u.resize(MIN(_m,_n)); if (_m > 10*_n) { Matrix<T> XtX; this->XtX(XtX); syev<T>(no,lower,_n,XtX.rawX(),_n,u.rawX()); u.thrsPos(); u.Sqrt(); } else if (_n > 10*_m) { Matrix<T> XXt; this->XXt(XXt); syev<T>(no,lower,_m,XXt.rawX(),_m,u.rawX()); u.thrsPos(); u.Sqrt(); } else { T* vu, *vv; Matrix<T> copyX; copyX.copy(*this); gesvd<T>(no,no,_m,_n,copyX._X,_m,u.rawX(),vu,1,vv,1); } }; template <typename T> inline void Matrix<T>::svd(Matrix<T>& U, Vector<T>& S, Matrix<T>&V) const { const int num_eig=MIN(_m,_n); S.resize(num_eig); U.resize(_m,num_eig); V.resize(num_eig,_n); if (_m > 10*_n) { Matrix<T> Vt(_n,_n); this->XtX(Vt); syev<T>(allV,lower,_n,Vt.rawX(),_n,S.rawX()); S.thrsPos(); S.Sqrt(); this->mult(Vt,U); Vt.transpose(V); Vector<T> inveigs; inveigs.copy(S); for (int i = 0; i<num_eig; ++i) if (S[i] > 1e-10) { inveigs[i]=T(1.0)/S[i]; } else { inveigs[i]=T(1.0); } U.multDiagRight(inveigs); } else if (_n > 10*_m) { this->XXt(U); syev<T>(allV,lower,_m,U.rawX(),_m,S.rawX()); S.thrsPos(); S.Sqrt(); U.mult(*this,V,true,false); Vector<T> inveigs; inveigs.copy(S); for (int i = 0; i<num_eig; ++i) if (S[i] > 1e-10) { inveigs[i]=T(1.0)/S[i]; } else { inveigs[i]=T(1.0); } V.multDiagLeft(inveigs); } else { Matrix<T> copyX; copyX.copy(*this); gesvd<T>(reduced,reduced,_m,_n,copyX._X,_m,S.rawX(),U.rawX(),_m,V.rawX(),num_eig); } }; /// find the eigenvector corresponding to the largest eigenvalue /// when the current matrix is symmetric. u0 is the initial guess. /// using two iterations of the power method template <typename T> inline void Matrix<T>::eigLargestSymApprox( const Vector<T>& u0, Vector<T>& u) const { int i,j; const int max_iter=100; const T eps=10e-6; u.copy(u0); T norm = u.nrm2(); T theta; u.scal(1.0/norm); Vector<T> up(u); Vector<T> uor(u); T lambda=T(); for (j = 0; j<2;++j) { up.copy(u); for (i = 0; i<max_iter; ++i) { mult(up,u); norm = u.nrm2(); u.scal(1.0/norm); theta=u.dot(up); if ((1 - fabs(theta)) < eps) break; up.copy(u); } lambda+=theta*norm; if isnan(lambda) { std::cerr << "eigLargestSymApprox failed" << std::endl; exit(1); } if (j == 1 && lambda < eps) { u.copy(uor); break; } if (theta >= 0) break; u.copy(uor); for (i = 0; i<_m; ++i) _X[i*_m+i]-=lambda; } }; /// find the eigenvector corresponding to the eivenvalue with the /// largest magnitude when the current matrix is symmetric, /// using the power method. It /// returns the eigenvalue. u0 is an initial guess for the /// eigenvector. template <typename T> inline T Matrix<T>::eigLargestMagnSym( const Vector<T>& u0, Vector<T>& u) const { const int max_iter=1000; const T eps=10e-6; u.copy(u0); T norm = u.nrm2(); u.scal(1.0/norm); Vector<T> up(u); T lambda=T(); for (int i = 0; i<max_iter; ++i) { mult(u,up); u.copy(up); norm=u.nrm2(); if (norm > 0) u.scal(1.0/norm); if (norm == 0 || fabs(norm-lambda)/norm < eps) break; lambda=norm; } return norm; }; /// returns the value of the eigenvalue with the largest magnitude /// using the power iteration. template <typename T> inline T Matrix<T>::eigLargestMagnSym() const { const int max_iter=1000; const T eps=10e-6; Vector<T> u(_m); u.setAleat(); T norm = u.nrm2(); u.scal(1.0/norm); Vector<T> up(u); T lambda=T(); for (int i = 0; i<max_iter; ++i) { mult(u,up); u.copy(up); norm=u.nrm2(); if (fabs(norm-lambda) < eps) break; lambda=norm; u.scal(1.0/norm); } return norm; }; /// inverse the matrix when it is symmetric template <typename T> inline void Matrix<T>::invSym() { // int lwork=2*_n; // T* work; //#ifdef USE_BLAS_LIB // INTT* ipiv; //#else // int* ipiv; //#endif //#pragma omp critical // { // work= new T[lwork]; //#ifdef USE_BLAS_LIB /// ipiv= new INTT[lwork]; //#else // ipiv= new int[lwork]; //#endif // } // sytrf<T>(upper,_n,_X,_n,ipiv,work,lwork); // sytri<T>(upper,_n,_X,_n,ipiv,work); // sytrf<T>(upper,_n,_X,_n); sytri<T>(upper,_n,_X,_n); this->fillSymmetric(); // delete[](work); // delete[](ipiv); }; /// perform b = alpha*A'x + beta*b template <typename T> inline void Matrix<T>::multTrans(const Vector<T>& x, Vector<T>& b, const T a, const T c) const { b.resize(_n); // assert(x._n == _m && b._n == _n); cblas_gemv<T>(CblasColMajor,CblasTrans,_m,_n,a,_X,_m,x._X,1,c,b._X,1); }; /// perform b = A'x, when x is sparse template <typename T> inline void Matrix<T>::multTrans(const SpVector<T>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_n); Vector<T> col; if (beta) { for (int i = 0; i<_n; ++i) { refCol(i,col); b._X[i] = alpha*col.dot(x); } } else { for (int i = 0; i<_n; ++i) { refCol(i,col); b._X[i] = beta*b._X[i]+alpha*col.dot(x); } } }; template <typename T> inline void Matrix<T>::multTrans( const Vector<T>& x, Vector<T>& b, const Vector<bool>& active) const { b.setZeros(); Vector<T> col; bool* pr_active=active.rawX(); for (int i = 0; i<_n; ++i) { if (pr_active[i]) { this->refCol(i,col); b._X[i]=col.dot(x); } } }; /// perform b = alpha*A*x+beta*b template <typename T> inline void Matrix<T>::mult(const Vector<T>& x, Vector<T>& b, const T a, const T c) const { // assert(x._n == _n && b._n == _m); b.resize(_m); cblas_gemv<T>(CblasColMajor,CblasNoTrans,_m,_n,a,_X,_m,x._X,1,c,b._X,1); }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> inline void Matrix<T>::mult(const SpVector<T>& x, Vector<T>& b, const T a, const T a2) const { if (!a2) { b.setZeros(); } else if (a2 != 1.0) { b.scal(a2); } if (a == 1.0) { for (int i = 0; i<x._L; ++i) { cblas_axpy<T>(_m,x._v[i],_X+x._r[i]*_m,1,b._X,1); } } else { for (int i = 0; i<x._L; ++i) { cblas_axpy<T>(_m,a*x._v[i],_X+x._r[i]*_m,1,b._X,1); } } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T> inline void Matrix<T>::mult(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { CBLAS_TRANSPOSE trA,trB; int m,k,n; if (transA) { trA = CblasTrans; m = _n; k = _m; } else { trA= CblasNoTrans; m = _m; k = _n; } if (transB) { trB = CblasTrans; n = B._m; // assert(B._n == k); } else { trB = CblasNoTrans; n = B._n; // assert(B._m == k); } C.resize(m,n); cblas_gemm<T>(CblasColMajor,trA,trB,m,n,k,a,_X,_m,B._X,B._m, b,C._X,C._m); }; /// perform C = a*B*A + b*C, possibly transposing A or B. template <typename T> inline void Matrix<T>::multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { B.mult(*this,C,transB,transA,a,b); }; /// perform C = A*B, when B is sparse template <typename T> inline void Matrix<T>::mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { if (transA) { if (transB) { C.resize(_n,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> rowC(B.m()); Vector<T> colA; for (int i = 0; i<_n; ++i) { this->refCol(i,colA); B.mult(colA,rowC,a); C.addRow(i,rowC,a); } } else { C.resize(_n,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> colC; SpVector<T> colB; for (int i = 0; i<B.n(); ++i) { C.refCol(i,colC); B.refCol(i,colB); this->multTrans(colB,colC,a,T(1.0)); } } } else { if (transB) { C.resize(_m,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> colA; SpVector<T> colB; for (int i = 0; i<_n; ++i) { this->refCol(i,colA); B.refCol(i,colB); C.rank1Update(colA,colB,a); } } else { C.resize(_m,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> colC; SpVector<T> colB; for (int i = 0; i<B.n(); ++i) { C.refCol(i,colC); B.refCol(i,colB); this->mult(colB,colC,a,T(1.0)); } } }; } /// mult by a diagonal matrix on the left template <typename T> inline void Matrix<T>::multDiagLeft(const Vector<T>& diag) { if (diag.n() != _m) return; T* d = diag.rawX(); for (int i = 0; i< _n; ++i) { for (int j = 0; j<_m; ++j) { _X[i*_m+j] *= d[j]; } } }; /// mult by a diagonal matrix on the right template <typename T> inline void Matrix<T>::multDiagRight( const Vector<T>& diag) { if (diag.n() != _n) return; T* d = diag.rawX(); for (int i = 0; i< _n; ++i) { for (int j = 0; j<_m; ++j) { _X[i*_m+j] *= d[i]; } } }; /// C = A .* B, elementwise multiplication template <typename T> inline void Matrix<T>::mult_elementWise( const Matrix<T>& B, Matrix<T>& C) const { assert(_n == B._n && _m == B._m); C.resize(_m,_n); vMul<T>(_n*_m,_X,B._X,C._X); }; /// C = A .* B, elementwise multiplication template <typename T> inline void Matrix<T>::div_elementWise( const Matrix<T>& B, Matrix<T>& C) const { assert(_n == B._n && _m == B._m); C.resize(_m,_n); vDiv<T>(_n*_m,_X,B._X,C._X); }; /// XtX = A'*A template <typename T> inline void Matrix<T>::XtX(Matrix<T>& xtx) const { xtx.resize(_n,_n); cblas_syrk<T>(CblasColMajor,CblasUpper,CblasTrans,_n,_m,T(1.0), _X,_m,T(),xtx._X,_n); xtx.fillSymmetric(); }; /// XXt = A*At template <typename T> inline void Matrix<T>::XXt(Matrix<T>& xxt) const { xxt.resize(_m,_m); cblas_syrk<T>(CblasColMajor,CblasUpper,CblasNoTrans,_m,_n,T(1.0), _X,_m,T(),xxt._X,_m); xxt.fillSymmetric(); }; /// XXt = A*A' where A is an upper triangular matrix template <typename T> inline void Matrix<T>::upperTriXXt(Matrix<T>& XXt, const int L) const { XXt.resize(L,L); for (int i = 0; i<L; ++i) { cblas_syr<T>(CblasColMajor,CblasUpper,i+1,T(1.0),_X+i*_m,1,XXt._X,L); } XXt.fillSymmetric(); } /// extract the diagonal template <typename T> inline void Matrix<T>::diag(Vector<T>& dv) const { int size_diag=MIN(_n,_m); dv.resize(size_diag); T* const d = dv.rawX(); for (int i = 0; i<size_diag; ++i) d[i]=_X[i*_m+i]; }; /// set the diagonal template <typename T> inline void Matrix<T>::setDiag(const Vector<T>& dv) { int size_diag=MIN(_n,_m); T* const d = dv.rawX(); for (int i = 0; i<size_diag; ++i) _X[i*_m+i]=d[i]; }; /// set the diagonal template <typename T> inline void Matrix<T>::setDiag(const T val) { int size_diag=MIN(_n,_m); for (int i = 0; i<size_diag; ++i) _X[i*_m+i]=val; }; /// each element of the matrix is replaced by its exponential template <typename T> inline void Matrix<T>::exp() { vExp<T>(_n*_m,_X,_X); }; template <typename T> inline void Matrix<T>::Sqrt() { vSqrt<T>(_n*_m,_X,_X); }; template <typename T> inline void Matrix<T>::Invsqrt() { vInvSqrt<T>(_n*_m,_X,_X); }; /// return vec1'*A*vec2, where vec2 is sparse template <typename T> inline T Matrix<T>::quad( const SpVector<T>& vec) const { T sum = T(); int L = vec._L; int* r = vec._r; T* v = vec._v; for (int i = 0; i<L; ++i) for (int j = 0; j<L; ++j) sum += _X[r[i]*_m+r[j]]*v[i]*v[j]; return sum; }; template <typename T> inline void Matrix<T>::quad_mult(const Vector<T>& vec1, const SpVector<T>& vec2, Vector<T>& y, const T a, const T b) const { const int size_y= y.n(); const int nn = _n/size_y; //y.resize(size_y); //y.setZeros(); Matrix<T> tmp; for (int i = 0; i<size_y; ++i) { tmp.setData(_X+(i*nn)*_m,_m,nn); y[i]=b*y[i]+a*tmp.quad(vec1,vec2); } } /// return vec'*A*vec when vec is sparse template <typename T> inline T Matrix<T>::quad( const Vector<T>& vec1, const SpVector<T>& vec) const { T sum = T(); int L = vec._L; int* r = vec._r; T* v = vec._v; Vector<T> col; for (int i = 0; i<L; ++i) { this->refCol(r[i],col); sum += v[i]*col.dot(vec1); } return sum; }; /// add alpha*mat to the current matrix template <typename T> inline void Matrix<T>::add(const Matrix<T>& mat, const T alpha) { assert(mat._m == _m && mat._n == _n); cblas_axpy<T>(_n*_m,alpha,mat._X,1,_X,1); }; /// add alpha*mat to the current matrix template <typename T> inline T Matrix<T>::dot(const Matrix<T>& mat) const { assert(mat._m == _m && mat._n == _n); return cblas_dot<T>(_n*_m,mat._X,1,_X,1); }; /// add alpha to the current matrix template <typename T> inline void Matrix<T>::add(const T alpha) { for (int i = 0; i<_n*_m; ++i) _X[i]+=alpha; }; /// substract the matrix mat to the current matrix template <typename T> inline void Matrix<T>::sub(const Matrix<T>& mat) { vSub<T>(_n*_m,_X,mat._X,_X); }; /// compute the sum of the magnitude of the matrix values template <typename T> inline T Matrix<T>::asum() const { return cblas_asum<T>(_n*_m,_X,1); }; /// returns the trace of the matrix template <typename T> inline T Matrix<T>::trace() const { T sum=T(); int m = MIN(_n,_m); for (int i = 0; i<m; ++i) sum += _X[i*_m+i]; return sum; }; /// return ||A||_F template <typename T> inline T Matrix<T>::normF() const { return cblas_nrm2<T>(_n*_m,_X,1); }; template <typename T> inline T Matrix<T>::mean() const { Vector<T> vec; this->toVect(vec); return vec.mean(); }; /// return ||A||_F^2 template <typename T> inline T Matrix<T>::normFsq() const { return cblas_dot<T>(_n*_m,_X,1,_X,1); }; /// return ||At||_{inf,2} template <typename T> inline T Matrix<T>::norm_inf_2_col() const { Vector<T> col; T max = -1.0; for (int i = 0; i<_n; ++i) { refCol(i,col); T norm_col = col.nrm2(); if (norm_col > max) max = norm_col; } return max; }; /// return ||At||_{1,2} template <typename T> inline T Matrix<T>::norm_1_2_col() const { Vector<T> col; T sum = 0.0; for (int i = 0; i<_n; ++i) { refCol(i,col); sum += col.nrm2(); } return sum; }; /// returns the l2 norms of the columns template <typename T> inline void Matrix<T>::norm_2_rows( Vector<T>& norms) const { norms.resize(_m); norms.setZeros(); for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) norms[j] += _X[i*_m+j]*_X[i*_m+j]; for (int j = 0; j<_m; ++j) norms[j]=sqrt(norms[j]); }; /// returns the l2 norms of the columns template <typename T> inline void Matrix<T>::norm_2sq_rows( Vector<T>& norms) const { norms.resize(_m); norms.setZeros(); for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) norms[j] += _X[i*_m+j]*_X[i*_m+j]; }; /// returns the l2 norms of the columns template <typename T> inline void Matrix<T>::norm_2_cols( Vector<T>& norms) const { norms.resize(_n); Vector<T> col; for (int i = 0; i<_n; ++i) { refCol(i,col); norms[i] = col.nrm2(); } }; /// returns the linf norms of the columns template <typename T> inline void Matrix<T>::norm_inf_cols(Vector<T>& norms) const { norms.resize(_n); Vector<T> col; for (int i = 0; i<_n; ++i) { refCol(i,col); norms[i] = col.fmaxval(); } }; /// returns the linf norms of the columns template <typename T> inline void Matrix<T>::norm_inf_rows(Vector<T>& norms) const { norms.resize(_m); norms.setZeros(); for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) norms[j] = MAX(abs<T>(_X[i*_m+j]),norms[j]); }; /// returns the linf norms of the columns template <typename T> inline void Matrix<T>::norm_l1_rows(Vector<T>& norms) const { norms.resize(_m); norms.setZeros(); for (int i = 0; i<_n; ++i) for (int j = 0; j<_m; ++j) norms[j] += abs<T>(_X[i*_m+j]); }; /// returns the l2 norms of the columns template <typename T> inline void Matrix<T>::norm_2sq_cols( Vector<T>& norms) const { norms.resize(_n); Vector<T> col; for (int i = 0; i<_n; ++i) { refCol(i,col); norms[i] = col.nrm2sq(); } }; template <typename T> inline void Matrix<T>::sum_cols(Vector<T>& sum) const { sum.resize(_m); sum.setZeros(); Vector<T> tmp; for (int i = 0; i<_n; ++i) { this->refCol(i,tmp); sum.add(tmp); } }; /// Compute the mean of the columns template <typename T> inline void Matrix<T>::meanCol(Vector<T>& mean) const { Vector<T> ones(_n); ones.set(T(1.0/_n)); this->mult(ones,mean,1.0,0.0); }; /// Compute the mean of the rows template <typename T> inline void Matrix<T>::meanRow(Vector<T>& mean) const { Vector<T> ones(_m); ones.set(T(1.0/_m)); this->multTrans(ones,mean,1.0,0.0); }; /// fill the matrix with the row given template <typename T> inline void Matrix<T>::fillRow(const Vector<T>& row) { for (int i = 0; i<_n; ++i) { T val = row[i]; for (int j = 0; j<_m; ++j) { _X[i*_m+j]=val; } } }; /// fill the matrix with the row given template <typename T> inline void Matrix<T>::extractRow(const int j, Vector<T>& row) const { row.resize(_n); for (int i = 0; i<_n; ++i) { row[i]=_X[i*_m+j]; } }; /// fill the matrix with the row given template <typename T> inline void Matrix<T>::setRow(const int j, const Vector<T>& row) { for (int i = 0; i<_n; ++i) { _X[i*_m+j]=row[i]; } }; /// fill the matrix with the row given template <typename T> inline void Matrix<T>::addRow(const int j, const Vector<T>& row, const T a) { if (a==1.0) { for (int i = 0; i<_n; ++i) { _X[i*_m+j]+=row[i]; } } else { for (int i = 0; i<_n; ++i) { _X[i*_m+j]+=a*row[i]; } } }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::softThrshold(const T nu) { Vector<T> vec; toVect(vec); vec.softThrshold(nu); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::hardThrshold(const T nu) { Vector<T> vec; toVect(vec); vec.hardThrshold(nu); }; /// perform thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::thrsmax(const T nu) { Vector<T> vec; toVect(vec); vec.thrsmax(nu); }; /// perform thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::thrsmin(const T nu) { Vector<T> vec; toVect(vec); vec.thrsmin(nu); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::inv_elem() { Vector<T> vec; toVect(vec); vec.inv(); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::blockThrshold(const T nu, const int sizeGroup) { for (int i = 0; i<_n; ++i) { int j; for (j = 0; j<_m-sizeGroup+1; j+=sizeGroup) { T nrm=0; for (int k = 0; k<sizeGroup; ++k) nrm += _X[i*_m +j+k]*_X[i*_m +j+k]; nrm=sqrt(nrm); if (nrm < nu) { for (int k = 0; k<sizeGroup; ++k) _X[i*_m +j+k]=0; } else { T scal = (nrm-nu)/nrm; for (int k = 0; k<sizeGroup; ++k) _X[i*_m +j+k]*=scal; } } j -= sizeGroup; for ( ; j<_m; ++j) _X[j]=softThrs<T>(_X[j],nu); } } template <typename T> inline void Matrix<T>::sparseProject(Matrix<T>& Y, const T thrs, const int mode, const T lambda1, const T lambda2, const T lambda3, const bool pos, const int numThreads) { int NUM_THREADS=init_omp(numThreads); Vector<T>* XXT= new Vector<T>[NUM_THREADS]; for (int i = 0; i<NUM_THREADS; ++i) { XXT[i].resize(_m); } int i; #pragma omp parallel for private(i) for (i = 0; i< _n; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif Vector<T> Xi; this->refCol(i,Xi); Vector<T> Yi; Y.refCol(i,Yi); Vector<T>& XX = XXT[numT]; XX.copy(Xi); XX.sparseProject(Yi,thrs,mode,lambda1,lambda2,lambda3,pos); } delete[](XXT); }; /// perform soft-thresholding of the matrix, with the threshold nu template <typename T> inline void Matrix<T>::thrsPos() { Vector<T> vec; toVect(vec); vec.thrsPos(); }; /// perform A <- A + alpha*vec1*vec2' template <typename T> inline void Matrix<T>::rank1Update( const Vector<T>& vec1, const Vector<T>& vec2, const T alpha) { cblas_ger<T>(CblasColMajor,_m,_n,alpha,vec1._X,1,vec2._X,1,_X,_m); }; /// perform A <- A + alpha*vec1*vec2', when vec1 is sparse template <typename T> inline void Matrix<T>::rank1Update( const SpVector<T>& vec1, const Vector<T>& vec2, const T alpha) { int* r = vec1._r; T* v = vec1._v; T* X2 = vec2._X; assert(vec2._n == _n); if (alpha == 1.0) { for (int i = 0; i<_n; ++i) { for (int j = 0; j<vec1._L; ++j) { _X[i*_m+r[j]] += v[j]*X2[i]; } } } else { for (int i = 0; i<_n; ++i) { for (int j = 0; j<vec1._L; ++j) { _X[i*_m+r[j]] += alpha*v[j]*X2[i]; } } } }; template <typename T> inline void Matrix<T>::rank1Update_mult(const Vector<T>& vec1, const Vector<T>& vec1b, const SpVector<T>& vec2, const T alpha) { const int nn = vec1b.n(); const int size_A = _n/nn; Matrix<T> tmp; for (int i = 0; i<nn; ++i) { tmp.setData(_X+i*size_A*_m,_m,size_A); tmp.rank1Update(vec1,vec2,alpha*vec1b[i]); } }; /// perform A <- A + alpha*vec1*vec2', when vec1 is sparse template <typename T> inline void Matrix<T>::rank1Update( const SpVector<T>& vec1, const SpVector<T>& vec2, const T alpha) { int* r = vec1._r; T* v = vec1._v; T* v2 = vec2._v; int* r2 = vec2._r; if (alpha == 1.0) { for (int i = 0; i<vec2._L; ++i) { for (int j = 0; j<vec1._L; ++j) { _X[r2[i]*_m+r[j]] += v[j]*v2[i]; } } } else { for (int i = 0; i<vec2._L; ++i) { for (int j = 0; j<vec1._L; ++j) { _X[r[i]*_m+r[j]] += alpha*v[j]*v2[i]; } } } }; /// perform A <- A + alpha*vec1*vec2', when vec2 is sparse template <typename T> inline void Matrix<T>::rank1Update( const Vector<T>& vec1, const SpVector<T>& vec2, const T alpha) { int* r = vec2._r; T* v = vec2._v; Vector<T> Xi; for (int i = 0; i<vec2._L; ++i) { this->refCol(r[i],Xi); Xi.add(vec1,v[i]*alpha); } }; /// perform A <- A + alpha*vec1*vec1', when vec1 is sparse template <typename T> inline void Matrix<T>::rank1Update( const SpVector<T>& vec1, const T alpha) { int* r = vec1._r; T* v = vec1._v; if (alpha == 1.0) { for (int i = 0; i<vec1._L; ++i) { for (int j = 0; j<vec1._L; ++j) { _X[r[i]*_m+r[j]] += v[j]*v[i]; } } } else { for (int i = 0; i<vec1._L; ++i) { for (int j = 0; j<vec1._L; ++j) { _X[_m*r[i]+r[j]] += alpha*v[j]*v[i]; } } } }; /// compute x, such that b = Ax, template <typename T> inline void Matrix<T>::conjugateGradient( const Vector<T>& b, Vector<T>& x, const T tol, const int itermax) const { Vector<T> R,P,AP; R.copy(b); this->mult(x,R,T(-1.0),T(1.0)); P.copy(R); int k = 0; T normR = R.nrm2sq(); T alpha; while (normR > tol && k < itermax) { this->mult(P,AP); alpha = normR/P.dot(AP); x.add(P,alpha); R.add(AP,-alpha); T tmp = R.nrm2sq(); P.scal(tmp/normR); normR = tmp; P.add(R,T(1.0)); ++k; }; }; template <typename T> inline void Matrix<T>::drop(char* fileName) const { std::ofstream f; f.precision(12); f.flags(std::ios_base::scientific); f.open(fileName, ofstream::trunc); std::cout << "Matrix written in " << fileName << std::endl; for (int i = 0; i<_n; ++i) { for (int j = 0; j<_m; ++j) f << _X[i*_m+j] << " "; f << std::endl; } f.close(); }; /// compute a Nadaraya Watson estimator template <typename T> inline void Matrix<T>::NadarayaWatson( const Vector<int>& ind, const T sigma) { if (ind.n() != _n) return; init_omp(MAX_THREADS); const int Ngroups=ind.maxval(); int i; #pragma omp parallel for private(i) for (i = 1; i<=Ngroups; ++i) { Vector<int> indicesGroup(_n); int count = 0; for (int j = 0; j<_n; ++j) if (ind[j] == i) indicesGroup[count++]=j; Matrix<T> Xm(_m,count); Vector<T> col, col2; for (int j= 0; j<count; ++j) { this->refCol(indicesGroup[j],col); Xm.refCol(j,col2); col2.copy(col); } Vector<T> norms; Xm.norm_2sq_cols(norms); Matrix<T> weights; Xm.XtX(weights); weights.scal(T(-2.0)); Vector<T> ones(Xm.n()); ones.set(T(1.0)); weights.rank1Update(ones,norms); weights.rank1Update(norms,ones); weights.scal(-sigma); weights.exp(); Vector<T> den; weights.mult(ones,den); den.inv(); weights.multDiagRight(den); Matrix<T> num; Xm.mult(weights,num); for (int j= 0; j<count; ++j) { this->refCol(indicesGroup[j],col); num.refCol(j,col2); col.copy(col2); } } }; /// make a sparse copy of the current matrix template <typename T> inline void Matrix<T>::toSparse(SpMatrix<T>& out) const { out.clear(); int count=0; int* pB; #pragma omp critical { pB=new int[_n+1]; } int* pE=pB+1; for (int i = 0; i<_n*_m; ++i) if (_X[i] != 0) ++count; int* r; T* v; #pragma omp critical { r=new int[count]; v=new T[count]; } count=0; for (int i = 0; i<_n; ++i) { pB[i]=count; for (int j = 0; j<_m; ++j) { if (_X[i*_m+j] != 0) { v[count]=_X[i*_m+j]; r[count++]=j; } } pE[i]=count; } out._v=v; out._r=r; out._pB=pB; out._pE=pE; out._m=_m; out._n=_n; out._nzmax=count; out._externAlloc=false; }; /// make a sparse copy of the current matrix template <typename T> inline void Matrix<T>::toSparseTrans( SpMatrix<T>& out) { out.clear(); int count=0; int* pB; #pragma omp critical { pB=new int[_m+1]; } int* pE=pB+1; for (int i = 0; i<_n*_m; ++i) if (_X[i] != 0) ++count; int* r; T* v; #pragma omp critical { r=new int[count]; v=new T[count]; } count=0; for (int i = 0; i<_m; ++i) { pB[i]=count; for (int j = 0; j<_n; ++j) { if (_X[i+j*_m] != 0) { v[count]=_X[j*_m+i]; r[count++]=j; } } pE[i]=count; } out._v=v; out._r=r; out._pB=pB; out._pE=pE; out._m=_n; out._n=_m; out._nzmax=count; out._externAlloc=false; }; /// make a reference of the matrix to a vector vec template <typename T> inline void Matrix<T>::toVect( Vector<T>& vec) const { vec.clear(); vec._externAlloc=true; vec._n=_n*_m; vec._X=_X; }; /// merge two dictionaries template <typename T> inline void Matrix<T>::merge(const Matrix<T>& B, Matrix<T>& C) const { const int K =_n; Matrix<T> G; this->mult(B,G,true,false); std::list<int> list; for (int i = 0; i<G.n(); ++i) { Vector<T> g; G.refCol(i,g); T fmax=g.fmaxval(); if (fmax < 0.995) list.push_back(i); } C.resize(_m,K+list.size()); for (int i = 0; i<K; ++i) { Vector<T> d, d2; C.refCol(i,d); this->refCol(i,d2); d.copy(d2); } int count=0; for (std::list<int>::const_iterator it = list.begin(); it != list.end(); ++it) { Vector<T> d, d2; C.refCol(K+count,d); B.refCol(*it,d2); d.copy(d2); ++count; } }; /* *********************************** * Implementation of the class Vector * ***********************************/ /// Empty constructor template <typename T> Vector<T>::Vector() : _externAlloc(true), _X(NULL), _n(0) { }; /// Constructor. Create a new vector of size n template <typename T> Vector<T>::Vector(int n) : _externAlloc(false), _n(n) { #pragma omp critical { _X=new T[_n]; } }; /// Constructor with existing data template <typename T> Vector<T>::Vector(T* X, int n) : _externAlloc(true), _X(X), _n(n) { }; /// Copy constructor template <typename T> Vector<T>::Vector(const Vector<T>& vec) : _externAlloc(false), _n(vec._n) { #pragma omp critical { _X=new T[_n]; } cblas_copy<T>(_n,vec._X,1,_X,1); }; /// Destructor template <typename T> Vector<T>::~Vector() { clear(); }; /// Print the vector to std::cout template <> inline void Vector<double>::print(const char* name) const { printf("%s, %d\n",name,_n); for (int i = 0; i<_n; ++i) { printf("%g ",_X[i]); } printf("\n"); }; /// Print the vector to std::cout template <> inline void Vector<float>::print(const char* name) const { printf("%s, %d\n",name,_n); for (int i = 0; i<_n; ++i) { printf("%g ",_X[i]); } printf("\n"); }; /// Print the vector to std::cout template <> inline void Vector<int>::print(const char* name) const { printf("%s, %d\n",name,_n); for (int i = 0; i<_n; ++i) { printf("%d ",_X[i]); } printf("\n"); }; /// Print the vector to std::cout template <> inline void Vector<bool>::print(const char* name) const { printf("%s, %d\n",name,_n); for (int i = 0; i<_n; ++i) { printf("%d ",_X[i] ? 1 : 0); } printf("\n"); }; /// returns the index of the largest value template <typename T> inline int Vector<T>::max() const { int imax=0; T max=_X[0]; for (int j = 1; j<_n; ++j) { T cur = _X[j]; if (cur > max) { imax=j; max = cur; } } return imax; }; /// returns the index of the minimum value template <typename T> inline int Vector<T>::min() const { int imin=0; T min=_X[0]; for (int j = 1; j<_n; ++j) { T cur = _X[j]; if (cur < min) { imin=j; min = cur; } } return imin; }; /// returns the maximum value template <typename T> inline T Vector<T>::maxval() const { return _X[this->max()]; }; /// returns the minimum value template <typename T> inline T Vector<T>::minval() const { return _X[this->min()]; }; /// returns the maximum magnitude template <typename T> inline T Vector<T>::fmaxval() const { return fabs(_X[this->fmax()]); }; /// returns the minimum magnitude template <typename T> inline T Vector<T>::fminval() const { return fabs(_X[this->fmin()]); }; template <typename T> inline void Vector<T>::logspace(const int n, const T a, const T b) { T first=log10(a); T last=log10(b); T step = (last-first)/(n-1); this->resize(n); _X[0]=first; for (int i = 1; i<_n; ++i) _X[i]=_X[i-1]+step; for (int i = 0; i<_n; ++i) _X[i]=pow(T(10.0),_X[i]); } template <typename T> inline int Vector<T>::nnz() const { int sum=0; for (int i = 0; i<_n; ++i) if (_X[i] != T()) ++sum; return sum; }; /// generate logarithmically spaced values template <> inline void Vector<int>::logspace(const int n, const int a, const int b) { Vector<double> tmp(n); tmp.logspace(n,double(a),double(b)); this->resize(n); _X[0]=a; _X[n-1]=b; for (int i = 1; i<_n-1; ++i) { int candidate=static_cast<int>(floor(static_cast<double>(tmp[i]))); _X[i]= candidate > _X[i-1] ? candidate : _X[i-1]+1; } } /// returns the index of the value with largest magnitude template <typename T> inline int Vector<T>::fmax() const { return cblas_iamax<T>(_n,_X,1); }; /// returns the index of the value with smallest magnitude template <typename T> inline int Vector<T>::fmin() const { return cblas_iamin<T>(_n,_X,1); }; /// returns a reference to X[index] template <typename T> inline T& Vector<T>::operator[] (const int i) { assert(i>=0 && i<_n); return _X[i]; }; /// returns X[index] template <typename T> inline T Vector<T>::operator[] (const int i) const { assert(i>=0 && i<_n); return _X[i]; }; /// make a copy of x template <typename T> inline void Vector<T>::copy(const Vector<T>& x) { this->resize(x.n()); cblas_copy<T>(_n,x._X,1,_X,1); }; /// Set all values to zero template <typename T> inline void Vector<T>::setZeros() { memset(_X,0,_n*sizeof(T)); }; /// resize the vector template <typename T> inline void Vector<T>::resize(const int n) { if (_n == n) return; clear(); #pragma omp critical { _X=new T[n]; } _n=n; _externAlloc=false; this->setZeros(); }; /// change the data of the vector template <typename T> inline void Vector<T>::setPointer(T* X, const int n) { clear(); _externAlloc=true; _X=X; _n=n; }; /// put a random permutation of size n (for integral vectors) template <> inline void Vector<int>::randperm(int n) { resize(n); Vector<int> table(n); for (int i = 0; i<n; ++i) table[i]=i; int size=n; for (int i = 0; i<n; ++i) { const int ind=random() % size; _X[i]=table[ind]; table[ind]=table[size-1]; --size; } }; /// put random values in the vector (white Gaussian Noise) template <typename T> inline void Vector<T>::setAleat() { for (int i = 0; i<_n; ++i) _X[i]=normalDistrib<T>(); }; /// clear the vector template <typename T> inline void Vector<T>::clear() { if (!_externAlloc) delete[](_X); _n=0; _X=NULL; _externAlloc=true; }; /// performs soft-thresholding of the vector template <typename T> inline void Vector<T>::softThrshold(const T nu) { for (int i = 0; i<_n; ++i) { if (_X[i] > nu) { _X[i] -= nu; } else if (_X[i] < -nu) { _X[i] += nu; } else { _X[i] = T(); } } }; /// performs soft-thresholding of the vector template <typename T> inline void Vector<T>::hardThrshold(const T nu) { for (int i = 0; i<_n; ++i) { if (!(_X[i] > nu || _X[i] < -nu)) { _X[i] = 0; } } }; /// performs thresholding of the vector template <typename T> inline void Vector<T>::thrsmax(const T nu) { for (int i = 0; i<_n; ++i) _X[i]=MAX(_X[i],nu); } /// performs thresholding of the vector template <typename T> inline void Vector<T>::thrsmin(const T nu) { for (int i = 0; i<_n; ++i) _X[i]=MIN(_X[i],nu); } /// performs thresholding of the vector template <typename T> inline void Vector<T>::thrsabsmin(const T nu) { for (int i = 0; i<_n; ++i) _X[i]=MAX(MIN(_X[i],nu),-nu); } /// performs thresholding of the vector template <typename T> inline void Vector<T>::thrshold(const T nu) { for (int i = 0; i<_n; ++i) if (abs<T>(_X[i]) < nu) _X[i]=0; } /// performs soft-thresholding of the vector template <typename T> inline void Vector<T>::thrsPos() { for (int i = 0; i<_n; ++i) { if (_X[i] < 0) _X[i]=0; } }; template <> inline bool Vector<bool>::alltrue() const { for (int i = 0; i<_n; ++i) { if (!_X[i]) return false; } return true; }; template <> inline bool Vector<bool>::allfalse() const { for (int i = 0; i<_n; ++i) { if (_X[i]) return false; } return true; }; /// set each value of the vector to val template <typename T> inline void Vector<T>::set(const T val) { for (int i = 0; i<_n; ++i) _X[i]=val; }; /// returns ||A||_2 template <typename T> inline T Vector<T>::nrm2() const { return cblas_nrm2<T>(_n,_X,1); }; /// returns ||A||_2^2 template <typename T> inline T Vector<T>::nrm2sq() const { return cblas_dot<T>(_n,_X,1,_X,1); }; /// returns A'x template <typename T> inline T Vector<T>::dot(const Vector<T>& x) const { assert(_n == x._n); return cblas_dot<T>(_n,_X,1,x._X,1); }; /// returns A'x, when x is sparse template <typename T> inline T Vector<T>::dot(const SpVector<T>& x) const { T sum=0; const T* v = x._v; const int* r = x._r; for (int i = 0; i<x._L; ++i) { sum += _X[r[i]]*v[i]; } return sum; }; /// A <- A + a*x template <typename T> inline void Vector<T>::add(const Vector<T>& x, const T a) { assert(_n == x._n); cblas_axpy<T>(_n,a,x._X,1,_X,1); }; /// A <- A + a*x template <typename T> inline void Vector<T>::add(const SpVector<T>& x, const T a) { if (a == 1.0) { for (int i = 0; i<x._L; ++i) _X[x._r[i]]+=x._v[i]; } else { for (int i = 0; i<x._L; ++i) _X[x._r[i]]+=a*x._v[i]; } }; /// adds a to each value in the vector template <typename T> inline void Vector<T>::add(const T a) { for (int i = 0; i<_n; ++i) _X[i]+=a; }; /// A <- A - x template <typename T> inline void Vector<T>::sub(const Vector<T>& x) { assert(_n == x._n); vSub<T>(_n,_X,x._X,_X); }; /// A <- A + a*x template <typename T> inline void Vector<T>::sub(const SpVector<T>& x) { for (int i = 0; i<x._L; ++i) _X[x._r[i]]-=x._v[i]; }; /// A <- A ./ x template <typename T> inline void Vector<T>::div(const Vector<T>& x) { assert(_n == x._n); vDiv<T>(_n,_X,x._X,_X); }; /// A <- x ./ y template <typename T> inline void Vector<T>::div(const Vector<T>& x, const Vector<T>& y) { assert(_n == x._n); vDiv<T>(_n,x._X,y._X,_X); }; /// A <- x .^ 2 template <typename T> inline void Vector<T>::sqr(const Vector<T>& x) { this->resize(x._n); vSqr<T>(_n,x._X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::Invsqrt(const Vector<T>& x) { this->resize(x._n); vInvSqrt<T>(_n,x._X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::Sqrt(const Vector<T>& x) { this->resize(x._n); vSqrt<T>(_n,x._X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::Invsqrt() { vInvSqrt<T>(_n,_X,_X); } /// A <- x .^ 2 template <typename T> inline void Vector<T>::Sqrt() { vSqrt<T>(_n,_X,_X); } /// A <- 1./x template <typename T> inline void Vector<T>::inv(const Vector<T>& x) { this->resize(x.n()); vInv<T>(_n,x._X,_X); }; /// A <- 1./A template <typename T> inline void Vector<T>::inv() { vInv<T>(_n,_X,_X); }; /// A <- x .* y template <typename T> inline void Vector<T>::mult(const Vector<T>& x, const Vector<T>& y) { this->resize(x.n()); vMul<T>(_n,x._X,y._X,_X); }; ; /// normalize the vector template <typename T> inline void Vector<T>::normalize() { T norm=nrm2(); if (norm > EPSILON) scal(1.0/norm); }; /// normalize the vector template <typename T> inline void Vector<T>::normalize2() { T norm=nrm2(); if (norm > T(1.0)) scal(1.0/norm); }; /// whiten template <typename T> inline void Vector<T>::whiten( Vector<T>& meanv, const bool pattern) { if (pattern) { const int n =static_cast<int>(sqrt(static_cast<T>(_n))); int count[4]; for (int i = 0; i<4; ++i) count[i]=0; int offsetx=0; for (int j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; int offsety=0; for (int k = 0; k<n; ++k) { offsety= (offsety+1) % 2; meanv[2*offsetx+offsety]+=_X[j*n+k]; count[2*offsetx+offsety]++; } } for (int i = 0; i<4; ++i) meanv[i] /= count[i]; offsetx=0; for (int j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; int offsety=0; for (int k = 0; k<n; ++k) { offsety= (offsety+1) % 2; _X[j*n+k]-=meanv[2*offsetx+offsety]; } } } else { const int V = meanv.n(); const int sizePatch=_n/V; for (int j = 0; j<V; ++j) { T mean = 0; for (int k = 0; k<sizePatch; ++k) { mean+=_X[sizePatch*j+k]; } mean /= sizePatch; for (int k = 0; k<sizePatch; ++k) { _X[sizePatch*j+k]-=mean; } meanv[j]=mean; } } }; /// whiten template <typename T> inline void Vector<T>::whiten( Vector<T>& meanv, const Vector<T>& mask) { const int V = meanv.n(); const int sizePatch=_n/V; for (int j = 0; j<V; ++j) { T mean = 0; for (int k = 0; k<sizePatch; ++k) { mean+=_X[sizePatch*j+k]; } mean /= cblas_asum(sizePatch,mask._X+j*sizePatch,1); for (int k = 0; k<sizePatch; ++k) { if (mask[sizePatch*j+k]) _X[sizePatch*j+k]-=mean; } meanv[j]=mean; } }; /// whiten template <typename T> inline void Vector<T>::whiten(const int V) { const int sizePatch=_n/V; for (int j = 0; j<V; ++j) { T mean = 0; for (int k = 0; k<sizePatch; ++k) { mean+=_X[sizePatch*j+k]; } mean /= sizePatch; for (int k = 0; k<sizePatch; ++k) { _X[sizePatch*j+k]-=mean; } } }; template <typename T> inline T Vector<T>::KL(const Vector<T>& Y) { T sum = 0; T* prY = Y.rawX(); // Y.print("Y"); // this->print("X"); // stop(); for (int i = 0; i<_n; ++i) { if (_X[i] > 1e-20) { if (prY[i] < 1e-60) { sum += 1e200; } else { sum += _X[i]*log_alt<T>(_X[i]/prY[i]); } //sum += _X[i]*log_alt<T>(_X[i]/(prY[i]+1e-100)); } } sum += T(-1.0) + Y.sum(); return sum; }; /// unwhiten template <typename T> inline void Vector<T>::unwhiten( Vector<T>& meanv, const bool pattern) { if (pattern) { const int n =static_cast<int>(sqrt(static_cast<T>(_n))); int offsetx=0; for (int j = 0; j<n; ++j) { offsetx= (offsetx+1) % 2; int offsety=0; for (int k = 0; k<n; ++k) { offsety= (offsety+1) % 2; _X[j*n+k]+=meanv[2*offsetx+offsety]; } } } else { const int V = meanv.n(); const int sizePatch=_n/V; for (int j = 0; j<V; ++j) { T mean = meanv[j]; for (int k = 0; k<sizePatch; ++k) { _X[sizePatch*j+k]+=mean; } } } }; /// return the mean template <typename T> inline T Vector<T>::mean() { return this->sum()/_n; } /// return the std template <typename T> inline T Vector<T>::std() { T E = this->mean(); T std=0; for (int i = 0; i<_n; ++i) { T tmp=_X[i]-E; std += tmp*tmp; } std /= _n; return sqr_alt<T>(std); } /// scale the vector by a template <typename T> inline void Vector<T>::scal(const T a) { return cblas_scal<T>(_n,a,_X,1); }; /// A <- -A template <typename T> inline void Vector<T>::neg() { for (int i = 0; i<_n; ++i) _X[i]=-_X[i]; }; /// replace each value by its exponential template <typename T> inline void Vector<T>::exp() { vExp<T>(_n,_X,_X); }; /// replace each value by its logarithm template <typename T> inline void Vector<T>::log() { for (int i=0; i<_n; ++i) _X[i]=alt_log<T>(_X[i]); }; /// replace each value by its exponential template <typename T> inline void Vector<T>::logexp() { for (int i = 0; i<_n; ++i) { if (_X[i] < -30) { _X[i]=0; } else if (_X[i] < 30) { _X[i]= alt_log<T>( T(1.0) + exp_alt<T>( _X[i] ) ); } } }; /// replace each value by its exponential template <typename T> inline T Vector<T>::softmax(const int y) { this->add(-_X[y]); _X[y]=-INFINITY; T max=this->maxval(); if (max > 30) { return max; } else if (max < -30) { return 0; } else { _X[y]=T(0.0); this->exp(); return alt_log<T>(this->sum()); } }; /// computes the sum of the magnitudes of the vector template <typename T> inline T Vector<T>::asum() const { return cblas_asum<T>(_n,_X,1); }; template <typename T> inline T Vector<T>::lzero() const { int count=0; for (int i = 0; i<_n; ++i) if (_X[i] != 0) ++count; return count; }; template <typename T> inline T Vector<T>::afused() const { T sum = 0; for (int i = 1; i<_n; ++i) { sum += abs<T>(_X[i]-_X[i-1]); } return sum; } /// returns the sum of the vector template <typename T> inline T Vector<T>::sum() const { T sum=T(); for (int i = 0; i<_n; ++i) sum +=_X[i]; return sum; }; /// puts in signs, the sign of each point in the vector template <typename T> inline void Vector<T>::sign(Vector<T>& signs) const { T* prSign=signs.rawX(); for (int i = 0; i<_n; ++i) { if (_X[i] == 0) { prSign[i]=0.0; } else { prSign[i] = _X[i] > 0 ? 1.0 : -1.0; } } }; /// projects the vector onto the l1 ball of radius thrs, /// returns true if the returned vector is null template <typename T> inline void Vector<T>::l1project(Vector<T>& out, const T thrs, const bool simplex) const { out.copy(*this); if (simplex) { out.thrsPos(); } else { vAbs<T>(_n,out._X,out._X); } T norm1 = out.sum(); if (norm1 <= thrs) { if (!simplex) out.copy(*this); return; } T* prU = out._X; int sizeU = _n; T sum = T(); int sum_card = 0; while (sizeU > 0) { // put the pivot in prU[0] swap(prU[0],prU[sizeU/2]); T pivot = prU[0]; int sizeG=1; T sumG=pivot; for (int i = 1; i<sizeU; ++i) { if (prU[i] >= pivot) { sumG += prU[i]; swap(prU[sizeG++],prU[i]); } } if (sum + sumG - pivot*(sum_card + sizeG) <= thrs) { sum_card += sizeG; sum += sumG; prU +=sizeG; sizeU -= sizeG; } else { ++prU; sizeU = sizeG-1; } } T lambda = (sum-thrs)/sum_card; out.copy(*this); if (simplex) { out.thrsPos(); } out.softThrshold(lambda); }; /// projects the vector onto the l1 ball of radius thrs, /// returns true if the returned vector is null template <typename T> inline void Vector<T>::l1project_weighted(Vector<T>& out, const Vector<T>& weights, const T thrs, const bool residual) const { out.copy(*this); if (thrs==0) { out.setZeros(); return; } vAbs<T>(_n,out._X,out._X); out.div(weights); Vector<int> keys(_n); for (int i = 0; i<_n; ++i) keys[i]=i; out.sort2(keys,false); T sum1=0; T sum2=0; T lambda=0; for (int i = 0; i<_n; ++i) { const T lambda_old=lambda; const T fact=weights[keys[i]]*weights[keys[i]]; lambda=out[i]; sum2 += fact; sum1 += fact*lambda; if (sum1 - lambda*sum2 >= thrs) { sum2-=fact; sum1-=fact*lambda; lambda=lambda_old; break; } } lambda=MAX(0,(sum1-thrs)/sum2); if (residual) { for (int i = 0; i<_n; ++i) { out._X[i]=_X[i] > 0 ? MIN(_X[i],lambda*weights[i]) : MAX(_X[i],-lambda*weights[i]); } } else { for (int i = 0; i<_n; ++i) { out._X[i]=_X[i] > 0 ? MAX(0,_X[i]-lambda*weights[i]) : MIN(0,_X[i]+lambda*weights[i]); } } }; template <typename T> inline void Vector<T>::project_sft_binary(const Vector<T>& y) { T mean = this->mean(); T thrs=mean; while (abs(mean) > EPSILON) { int n_seuils=0; for (int i = 0; i< _n; ++i) { _X[i] = _X[i]-thrs; const T val = y[i]*_X[i]; if (val > 0) { ++n_seuils; _X[i]=0; } else if (val < -1.0) { ++n_seuils; _X[i] = -y[i]; } } mean = this->mean(); thrs= mean * _n/(_n-n_seuils); } }; template <typename T> inline void Vector<T>::project_sft(const Vector<int>& labels, const int clas) { T mean = this->mean(); T thrs=mean; while (abs(mean) > EPSILON) { int n_seuils=0; for (int i = 0; i< _n; ++i) { _X[i] = _X[i]-thrs; if (labels[i]==clas) { if (_X[i] < -1.0) { _X[i]=-1.0; ++n_seuils; } } else { if (_X[i] < 0) { ++n_seuils; _X[i]=0; } } } mean = this->mean(); thrs= mean * _n/(_n-n_seuils); } }; template <typename T> inline void Vector<T>::sparseProject(Vector<T>& out, const T thrs, const int mode, const T lambda1, const T lambda2, const T lambda3, const bool pos) { if (mode == 1) { /// min_u ||b-u||_2^2 / ||u||_1 <= thrs this->l1project(out,thrs,pos); } else if (mode == 2) { /// min_u ||b-u||_2^2 / ||u||_2^2 + lambda1||u||_1 <= thrs if (lambda1 > 1e-10) { this->scal(lambda1); this->l1l2project(out,thrs,2.0/(lambda1*lambda1),pos); this->scal(T(1.0/lambda1)); out.scal(T(1.0/lambda1)); } else { out.copy(*this); out.normalize2(); out.scal(sqrt(thrs)); } } else if (mode == 3) { /// min_u ||b-u||_2^2 / ||u||_1 + (lambda1/2) ||u||_2^2 <= thrs this->l1l2project(out,thrs,lambda1,pos); } else if (mode == 4) { /// min_u 0.5||b-u||_2^2 + lambda1||u||_1 / ||u||_2^2 <= thrs out.copy(*this); if (pos) out.thrsPos(); out.softThrshold(lambda1); T nrm=out.nrm2sq(); if (nrm > thrs) out.scal(sqr_alt<T>(thrs/nrm)); } else if (mode == 5) { /// min_u 0.5||b-u||_2^2 + lambda1||u||_1 +lambda2 Fused(u) / ||u||_2^2 <= thrs // this->fusedProject(out,lambda1,lambda2,100); // T nrm=out.nrm2sq(); // if (nrm > thrs) // out.scal(sqr_alt<T>(thrs/nrm)); // } else if (mode == 6) { /// min_u 0.5||b-u||_2^2 + lambda1||u||_1 +lambda2 Fused(u) +0.5lambda_3 ||u||_2^2 this->fusedProjectHomotopy(out,lambda1,lambda2,lambda3,true); } else if (mode==6) { /// min_u ||b-u||_2^2 / lambda1||u||_1 +lambda2 Fused(u) + 0.5lambda3||u||_2^2 <= thrs this->fusedProjectHomotopy(out,lambda1/thrs,lambda2/thrs,lambda3/thrs,false); } else { /// min_u ||b-u||_2^2 / (1-lambda1)*||u||_2^2 + lambda1||u||_1 <= thrs if (lambda1 < 1e-10) { out.copy(*this); if (pos) out.thrsPos(); out.normalize2(); out.scal(sqrt(thrs)); } else if (lambda1 > 0.999999) { this->l1project(out,thrs,pos); } else { this->sparseProject(out,thrs/(1.0-lambda1),2,lambda1/(1-lambda1),0,0,pos); } } }; /// returns true if the returned vector is null template <typename T> inline void Vector<T>::l1l2projectb(Vector<T>& out, const T thrs, const T gamma, const bool pos, const int mode) { if (mode == 1) { /// min_u ||b-u||_2^2 / ||u||_2^2 + gamma ||u||_1 <= thrs this->scal(gamma); this->l1l2project(out,thrs,2.0/(gamma*gamma),pos); this->scal(T(1.0/gamma)); out.scal(T(1.0/gamma)); } else if (mode == 2) { /// min_u ||b-u||_2^2 / ||u||_1 + (gamma/2) ||u||_2^2 <= thrs this->l1l2project(out,thrs,gamma,pos); } else if (mode == 3) { /// min_u 0.5||b-u||_2^2 + gamma||u||_1 / ||u||_2^2 <= thrs out.copy(*this); if (pos) out.thrsPos(); out.softThrshold(gamma); T nrm=out.nrm2(); if (nrm > thrs) out.scal(thrs/nrm); } } /// returns true if the returned vector is null /// min_u ||b-u||_2^2 / ||u||_1 + (gamma/2) ||u||_2^2 <= thrs template <typename T> inline void Vector<T>::l1l2project(Vector<T>& out, const T thrs, const T gamma, const bool pos) const { if (gamma == 0) return this->l1project(out,thrs,pos); out.copy(*this); if (pos) { out.thrsPos(); } else { vAbs<T>(_n,out._X,out._X); } T norm = out.sum() + gamma*out.nrm2sq(); if (norm <= thrs) { if (!pos) out.copy(*this); return; } /// BEGIN T* prU = out._X; int sizeU = _n; T sum = 0; int sum_card = 0; while (sizeU > 0) { // put the pivot in prU[0] swap(prU[0],prU[sizeU/2]); T pivot = prU[0]; int sizeG=1; T sumG=pivot+0.5*gamma*pivot*pivot; for (int i = 1; i<sizeU; ++i) { if (prU[i] >= pivot) { sumG += prU[i]+0.5*gamma*prU[i]*prU[i]; swap(prU[sizeG++],prU[i]); } } if (sum + sumG - pivot*(1+0.5*gamma*pivot)*(sum_card + sizeG) < thrs*(1+gamma*pivot)*(1+gamma*pivot)) { sum_card += sizeG; sum += sumG; prU +=sizeG; sizeU -= sizeG; } else { ++prU; sizeU = sizeG-1; } } T a = gamma*gamma*thrs+0.5*gamma*sum_card; T b = 2*gamma*thrs+sum_card; T c=thrs-sum; T delta = b*b-4*a*c; T lambda = (-b+sqrt(delta))/(2*a); out.copy(*this); if (pos) { out.thrsPos(); } out.softThrshold(lambda); out.scal(T(1.0/(1+lambda*gamma))); }; template <typename T> static inline T fusedHomotopyAux(const bool& sign1, const bool& sign2, const bool& sign3, const T& c1, const T& c2) { if (sign1) { if (sign2) { return sign3 ? 0 : c2; } else { return sign3 ? -c2-c1 : -c1; } } else { if (sign2) { return sign3 ? c1 : c1+c2; } else { return sign3 ? -c2 : 0; } } }; template <typename T> inline void Vector<T>::fusedProjectHomotopy(Vector<T>& alpha, const T lambda1,const T lambda2,const T lambda3, const bool penalty) { T* pr_DtR=_X; const int K = _n; alpha.setZeros(); Vector<T> u(K); // regularization path for gamma Vector<T> Du(K); // regularization path for alpha Vector<T> DDu(K); // regularization path for alpha Vector<T> gamma(K); // auxiliary variable Vector<T> c(K); // auxiliary variables Vector<T> scores(K); // auxiliary variables gamma.setZeros(); T* pr_gamma = gamma.rawX(); T* pr_u = u.rawX(); T* pr_Du = Du.rawX(); T* pr_DDu = DDu.rawX(); T* pr_c = c.rawX(); T* pr_scores = scores.rawX(); Vector<int> ind(K+1); Vector<bool> signs(K); ind.set(K); int* pr_ind = ind.rawX(); bool* pr_signs = signs.rawX(); /// Computation of DtR T sumBeta = this->sum(); /// first element is selected, gamma and alpha are updated pr_gamma[0]=sumBeta/K; /// update alpha alpha.set(pr_gamma[0]); /// update DtR this->sub(alpha); for (int j = K-2; j>=0; --j) pr_DtR[j] += pr_DtR[j+1]; pr_DtR[0]=0; pr_ind[0]=0; pr_signs[0] = pr_DtR[0] > 0; pr_c[0]=T(1.0)/K; int currentInd=this->fmax(); T currentLambda=abs<T>(pr_DtR[currentInd]); bool newAtom = true; /// Solve the Lasso using simplified LARS for (int i = 1; i<K; ++i) { /// exit if constraints are satisfied /// min_u ||b-u||_2^2 + lambda1||u||_1 +lambda2 Fused(u) + 0.5lambda3||u||_2^2 if (penalty && currentLambda <= lambda2) break; if (!penalty) { /// min_u ||b-u||_2^2 / lambda1||u||_1 +lambda2 Fused(u) + 0.5lambda3||u||_2^2 <= 1.0 scores.copy(alpha); scores.softThrshold(lambda1*currentLambda/lambda2); scores.scal(T(1.0/(1.0+lambda3*currentLambda/lambda2))); if (lambda1*scores.asum()+lambda2*scores.afused()+0.5* lambda3*scores.nrm2sq() >= T(1.0)) break; } /// Update pr_ind and pr_c if (newAtom) { int j; for (j = 1; j<i; ++j) if (pr_ind[j] > currentInd) break; for (int k = i; k>j; --k) { pr_ind[k]=pr_ind[k-1]; pr_c[k]=pr_c[k-1]; pr_signs[k]=pr_signs[k-1]; } pr_ind[j]=currentInd; pr_signs[j]=pr_DtR[currentInd] > 0; pr_c[j-1]=T(1.0)/(pr_ind[j]-pr_ind[j-1]); pr_c[j]=T(1.0)/(pr_ind[j+1]-pr_ind[j]); } // Compute u pr_u[0]= pr_signs[1] ? -pr_c[0] : pr_c[0]; if (i == 1) { pr_u[1]=pr_signs[1] ? pr_c[0]+pr_c[1] : -pr_c[0]-pr_c[1]; } else { pr_u[1]=pr_signs[1] ? pr_c[0]+pr_c[1] : -pr_c[0]-pr_c[1]; pr_u[1]+=pr_signs[2] ? -pr_c[1] : pr_c[1]; for (int j = 2; j<i; ++j) { pr_u[j]=2*fusedHomotopyAux<T>(pr_signs[j-1], pr_signs[j],pr_signs[j+1], pr_c[j-1],pr_c[j]); } pr_u[i] = pr_signs[i-1] ? -pr_c[i-1] : pr_c[i-1]; pr_u[i] += pr_signs[i] ? pr_c[i-1]+pr_c[i] : -pr_c[i-1]-pr_c[i]; } // Compute Du pr_Du[0]=pr_u[0]; for (int k = 1; k<pr_ind[1]; ++k) pr_Du[k]=pr_Du[0]; for (int j = 1; j<=i; ++j) { pr_Du[pr_ind[j]]=pr_Du[pr_ind[j]-1]+pr_u[j]; for (int k = pr_ind[j]+1; k<pr_ind[j+1]; ++k) pr_Du[k]=pr_Du[pr_ind[j]]; } /// Compute DDu DDu.copy(Du); for (int j = K-2; j>=0; --j) pr_DDu[j] += pr_DDu[j+1]; /// Check constraints T max_step1 = INFINITY; if (penalty) { max_step1 = currentLambda-lambda2; } /// Check changes of sign T max_step2 = INFINITY; int step_out = -1; for (int j = 1; j<=i; ++j) { T ratio = -pr_gamma[pr_ind[j]]/pr_u[j]; if (ratio > 0 && ratio <= max_step2) { max_step2=ratio; step_out=j; } } T max_step3 = INFINITY; /// Check new variables entering the active set for (int j = 1; j<K; ++j) { T sc1 = (currentLambda-pr_DtR[j])/(T(1.0)-pr_DDu[j]); T sc2 = (currentLambda+pr_DtR[j])/(T(1.0)+pr_DDu[j]); if (sc1 <= 1e-10) sc1=INFINITY; if (sc2 <= 1e-10) sc2=INFINITY; pr_scores[j]= MIN(sc1,sc2); } for (int j = 0; j<=i; ++j) { pr_scores[pr_ind[j]]=INFINITY; } currentInd = scores.fmin(); max_step3 = pr_scores[currentInd]; T step = MIN(max_step1,MIN(max_step3,max_step2)); if (step == 0 || step == INFINITY) break; /// Update gamma, alpha, DtR, currentLambda for (int j = 0; j<=i; ++j) { pr_gamma[pr_ind[j]]+=step*pr_u[j]; } alpha.add(Du,step); this->add(DDu,-step); currentLambda -= step; if (step == max_step2) { /// Update signs,pr_ind, pr_c for (int k = step_out; k<=i; ++k) pr_ind[k]=pr_ind[k+1]; pr_ind[i]=K; for (int k = step_out; k<=i; ++k) pr_signs[k]=pr_signs[k+1]; pr_c[step_out-1]=T(1.0)/(pr_ind[step_out]-pr_ind[step_out-1]); pr_c[step_out]=T(1.0)/(pr_ind[step_out+1]-pr_ind[step_out]); i-=2; newAtom=false; } else { newAtom=true; } } if (penalty) { alpha.softThrshold(lambda1); alpha.scal(T(1.0/(1.0+lambda3))); } else { alpha.softThrshold(lambda1*currentLambda/lambda2); alpha.scal(T(1.0/(1.0+lambda3*currentLambda/lambda2))); } }; template <typename T> inline void Vector<T>::fusedProject(Vector<T>& alpha, const T lambda1, const T lambda2, const int itermax) { T* pr_alpha= alpha.rawX(); T* pr_beta=_X; const int K = alpha.n(); T total_alpha =alpha.sum(); /// Modification of beta for (int i = K-2; i>=0; --i) pr_beta[i]+=pr_beta[i+1]; for (int i = 0; i<itermax; ++i) { T sum_alpha=0; T sum_diff = 0; /// Update first coordinate T gamma_old=pr_alpha[0]; pr_alpha[0]=(K*gamma_old+pr_beta[0]- total_alpha)/K; T diff = pr_alpha[0]-gamma_old; sum_diff += diff; sum_alpha += pr_alpha[0]; total_alpha +=K*diff; /// Update alpha_j for (int j = 1; j<K; ++j) { pr_alpha[j]+=sum_diff; T gamma_old=pr_alpha[j]-pr_alpha[j-1]; T gamma_new=softThrs((K-j)*gamma_old+pr_beta[j]- (total_alpha-sum_alpha),lambda2)/(K-j); pr_alpha[j]=pr_alpha[j-1]+gamma_new; T diff = gamma_new-gamma_old; sum_diff += diff; sum_alpha+=pr_alpha[j]; total_alpha +=(K-j)*diff; } } alpha.softThrshold(lambda1); }; /// sort the vector template <typename T> inline void Vector<T>::sort(const bool mode) { if (mode) { lasrt<T>(incr,_n,_X); } else { lasrt<T>(decr,_n,_X); } }; /// sort the vector template <typename T> inline void Vector<T>::sort(Vector<T>& out, const bool mode) const { out.copy(*this); out.sort(mode); }; template <typename T> inline void Vector<T>::sort2(Vector<int>& key, const bool mode) { quick_sort(key.rawX(),_X,0,_n-1,mode); }; template <typename T> inline void Vector<T>::sort2(Vector<T>& out, Vector<int>& key, const bool mode) const { out.copy(*this); out.sort2(key,mode); } template <typename T> inline void Vector<T>::applyBayerPattern(const int offset) { int sizePatch=_n/3; int n = static_cast<int>(sqrt(static_cast<T>(sizePatch))); if (offset == 0) { // R for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 1 : 2; const int off = (i % 2) ? 0 : 1; for (int j = off; j<n; j+=step) { _X[i*n+j]=0; } } // G for (int i = 0; i<n; ++i) { const int step = 2; const int off = (i % 2) ? 1 : 0; for (int j = off; j<n; j+=step) { _X[sizePatch+i*n+j]=0; } } // B for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 2 : 1; const int off = 0; for (int j = off; j<n; j+=step) { _X[2*sizePatch+i*n+j]=0; } } } else if (offset == 1) { // R for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 2 : 1; const int off = (i % 2) ? 1 : 0; for (int j = off; j<n; j+=step) { _X[i*n+j]=0; } } // G for (int i = 0; i<n; ++i) { const int step = 2; const int off = (i % 2) ? 0 : 1; for (int j = off; j<n; j+=step) { _X[sizePatch+i*n+j]=0; } } // B for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 1 : 2; const int off = 0; for (int j = off; j<n; j+=step) { _X[2*sizePatch+i*n+j]=0; } } } else if (offset == 2) { // R for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 1 : 2; const int off = 0; for (int j = off; j<n; j+=step) { _X[i*n+j]=0; } } // G for (int i = 0; i<n; ++i) { const int step = 2; const int off = (i % 2) ? 0 : 1; for (int j = off; j<n; j+=step) { _X[sizePatch+i*n+j]=0; } } // B for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 2 : 1; const int off = (i % 2) ? 1 : 0; for (int j = off; j<n; j+=step) { _X[2*sizePatch+i*n+j]=0; } } } else if (offset == 3) { // R for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 2 : 1; const int off = 0; for (int j = off; j<n; j+=step) { _X[i*n+j]=0; } } // G for (int i = 0; i<n; ++i) { const int step = 2; const int off = (i % 2) ? 1 : 0; for (int j = off; j<n; j+=step) { _X[sizePatch+i*n+j]=0; } } // B for (int i = 0; i<n; ++i) { const int step = (i % 2) ? 1 : 2; const int off = (i % 2) ? 0 : 1; for (int j = off; j<n; j+=step) { _X[2*sizePatch+i*n+j]=0; } } } }; /// make a sparse copy template <typename T> inline void Vector<T>::toSparse( SpVector<T>& vec) const { int L=0; T* v = vec._v; int* r = vec._r; for (int i = 0; i<_n; ++i) { if (_X[i] != T()) { v[L]=_X[i]; r[L++]=i; } } vec._L=L; }; template <typename T> inline void Vector<T>::copyMask(Vector<T>& out, Vector<bool>& mask) const { out.resize(_n); int pointer=0; for (int i = 0; i<_n; ++i) { if (mask[i]) out[pointer++]=_X[i]; } out.setn(pointer); }; template <typename T> inline void Matrix<T>::copyMask(Matrix<T>& out, Vector<bool>& mask) const { out.resize(_m,_n); int count=0; for (int i = 0; i<mask.n(); ++i) if (mask[i]) ++count; out.setm(count); for (int i = 0; i<_n; ++i) { int pointer=0; for (int j = 0; j<_m; ++j) { if (mask[j]) { out[i*count+pointer]=_X[i*_m+j]; ++pointer; } } } }; /* **************************** * Implementation of SpMatrix * ****************************/ /// Constructor, CSC format, existing data template <typename T> SpMatrix<T>::SpMatrix(T* v, int* r, int* pB, int* pE, int m, int n, int nzmax) : _externAlloc(true), _v(v), _r(r), _pB(pB), _pE(pE), _m(m), _n(n), _nzmax(nzmax) { }; /// Constructor, new m x n matrix, with at most nzmax non-zeros values template <typename T> SpMatrix<T>::SpMatrix(int m, int n, int nzmax) : _externAlloc(false), _m(m), _n(n), _nzmax(nzmax) { #pragma omp critical { _v=new T[nzmax]; _r=new int[nzmax]; _pB=new int[_n+1]; } _pE=_pB+1; }; /// Empty constructor template <typename T> SpMatrix<T>::SpMatrix() : _externAlloc(true), _v(NULL), _r(NULL), _pB(NULL), _pE(NULL), _m(0),_n(0),_nzmax(0) { }; template <typename T> inline void SpMatrix<T>::copy(const SpMatrix<T>& mat) { this->resize(mat._m,mat._n,mat._nzmax); memcpy(_v,mat._v,_nzmax*sizeof(T)); memcpy(_r,mat._r,_nzmax*sizeof(int)); memcpy(_pB,mat._pB,(_n+1)*sizeof(int)); } /// Destructor template <typename T> SpMatrix<T>::~SpMatrix() { clear(); }; /// reference the column i into vec template <typename T> inline void SpMatrix<T>::refCol(int i, SpVector<T>& vec) const { if (vec._nzmax > 0) vec.clear(); vec._v=_v+_pB[i]; vec._r=_r+_pB[i]; vec._externAlloc=true; vec._L=_pE[i]-_pB[i]; vec._nzmax=vec._L; }; /// print the sparse matrix template<typename T> inline void SpMatrix<T>::print(const string& name) const { cerr << name << endl; cerr << _m << " x " << _n << " , " << _nzmax << endl; for (int i = 0; i<_n; ++i) { for (int j = _pB[i]; j<_pE[i]; ++j) { cerr << "(" <<_r[j] << "," << i << ") = " << _v[j] << endl; } } }; template<typename T> inline T SpMatrix<T>::operator[](const int index) const { const int num_col=(index/_m); const int num_row=index -num_col*_m; T val = 0; for (int j = _pB[num_col]; j<_pB[num_col+1]; ++j) { if (_r[j]==num_row) { val=_v[j]; break; } } return val; }; template<typename T> void SpMatrix<T>::getData(Vector<T>& data, const int index) const { data.resize(_m); data.setZeros(); for (int i = _pB[index]; i< _pB[index+1]; ++i) data[_r[i]]=_v[i]; }; template<typename T> void SpMatrix<T>::getGroup(Matrix<T>& data, const vector_groups& groups, const int i) const { const group& gr = groups[i]; const int N = gr.size(); data.resize(_m,N); int count=0; Vector<T> col; for (group::const_iterator it = gr.begin(); it != gr.end(); ++it) { data.refCol(count,col); this->getData(col,*it); ++count; } }; /// compute the sum of the matrix elements template <typename T> inline T SpMatrix<T>::asum() const { return cblas_asum<T>(_pB[_n],_v,1); }; /// compute the sum of the matrix elements template <typename T> inline T SpMatrix<T>::normFsq() const { return cblas_dot<T>(_pB[_n],_v,1,_v,1); }; template <typename T> inline void SpMatrix<T>::add_direct(const SpMatrix<T>& mat, const T a) { Vector<T> v2(mat._v,mat._nzmax); Vector<T> v1(_v,_nzmax); v1.add(v2,a); } template <typename T> inline void SpMatrix<T>::copy_direct(const SpMatrix<T>& mat) { Vector<T> v2(mat._v,_pB[_n]); Vector<T> v1(_v,_pB[_n]); v1.copy(v2); } template <typename T> inline T SpMatrix<T>::dot_direct(const SpMatrix<T>& mat) const { Vector<T> v2(mat._v,_pB[_n]); Vector<T> v1(_v,_pB[_n]); return v1.dot(v2); } /// clear the matrix template <typename T> inline void SpMatrix<T>::clear() { if (!_externAlloc) { delete[](_r); delete[](_v); delete[](_pB); } _n=0; _m=0; _nzmax=0; _v=NULL; _r=NULL; _pB=NULL; _pE=NULL; _externAlloc=true; }; /// resize the matrix template <typename T> inline void SpMatrix<T>::resize(const int m, const int n, const int nzmax) { if (n == _n && m == _m && nzmax == _nzmax) return; this->clear(); _n=n; _m=m; _nzmax=nzmax; _externAlloc=false; #pragma omp critical { _v = new T[nzmax]; _r = new int[nzmax]; _pB = new int[_n+1]; } _pE = _pB+1; for (int i = 0; i<=_n; ++i) _pB[i]=0; }; /// resize the matrix template <typename T> inline void SpMatrix<T>::scal(const T a) const { cblas_scal<T>(_pB[_n],a,_v,1); }; /// y <- A'*x template <typename T> inline void SpMatrix<T>::multTrans(const Vector<T>& x, Vector<T>& y, const T alpha, const T beta) const { y.resize(_n); if (beta) { y.scal(beta); } else { y.setZeros(); } const T* prX = x.rawX(); for (int i = 0; i<_n; ++i) { T sum=T(); for (int j = _pB[i]; j<_pE[i]; ++j) { sum+=_v[j]*prX[_r[j]]; } y[i] += alpha*sum; } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> inline void SpMatrix<T>::multTrans(const SpVector<T>& x, Vector<T>& y, const T alpha, const T beta) const { y.resize(_n); if (beta) { y.scal(beta); } else { y.setZeros(); } T* prY = y.rawX(); SpVector<T> col; for (int i = 0; i<_n; ++i) { this->refCol(i,col); prY[i] += alpha*x.dot(col); } }; /// y <- A*x template <typename T> inline void SpMatrix<T>::mult(const Vector<T>& x, Vector<T>& y, const T alpha, const T beta) const { y.resize(_m); if (beta) { y.scal(beta); } else { y.setZeros(); } const T* prX = x.rawX(); for (int i = 0; i<_n; ++i) { T sca=alpha* prX[i]; for (int j = _pB[i]; j<_pE[i]; ++j) { y[_r[j]] += sca*_v[j]; } } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> inline void SpMatrix<T>::mult(const SpVector<T>& x, Vector<T>& y, const T alpha, const T beta) const { y.resize(_m); if (beta) { y.scal(beta); } else { y.setZeros(); } T* prY = y.rawX(); for (int i = 0; i<x.L(); ++i) { int ind=x.r(i); T val = alpha * x.v(i); for (int j = _pB[ind]; j<_pE[ind]; ++j) { prY[_r[j]] += val *_v[j]; } } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T> inline void SpMatrix<T>::mult(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { if (transA) { if (transB) { C.resize(_n,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T> tmp; Vector<T> row(B.m()); for (int i = 0; i<_n; ++i) { this->refCol(i,tmp); B.mult(tmp,row); C.addRow(i,row,a); } } else { C.resize(_n,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T> tmp; Vector<T> row(B.n()); for (int i = 0; i<_n; ++i) { this->refCol(i,tmp); B.multTrans(tmp,row); C.addRow(i,row,a); } } } else { if (transB) { C.resize(_m,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> row(B.n()); Vector<T> col; for (int i = 0; i<B.m(); ++i) { B.copyRow(i,row); C.refCol(i,col); this->mult(row,col,a,T(1.0)); } } else { C.resize(_m,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } Vector<T> colB; Vector<T> colC; for (int i = 0; i<B.n(); ++i) { B.refCol(i,colB); C.refCol(i,colC); this->mult(colB,colC,a,T(1.0)); } } } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T> inline void SpMatrix<T>::mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { if (transA) { if (transB) { C.resize(_n,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T> tmp; Vector<T> row(B.m()); for (int i = 0; i<_n; ++i) { this->refCol(i,tmp); B.mult(tmp,row); C.addRow(i,row,a); } } else { C.resize(_n,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T> tmp; Vector<T> row(B.n()); for (int i = 0; i<_n; ++i) { this->refCol(i,tmp); B.multTrans(tmp,row); C.addRow(i,row,a); } } } else { if (transB) { C.resize(_m,B.m()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T> colB; SpVector<T> colA; for (int i = 0; i<_n; ++i) { this->refCol(i,colA); B.refCol(i,colB); C.rank1Update(colA,colB,a); } } else { C.resize(_m,B.n()); if (b) { C.scal(b); } else { C.setZeros(); } SpVector<T> colB; Vector<T> colC; for (int i = 0; i<B.n(); ++i) { B.refCol(i,colB); C.refCol(i,colC); this->mult(colB,colC,a); } } } }; /// perform C = a*B*A + b*C, possibly transposing A or B. template <typename T> inline void SpMatrix<T>::multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { B.mult(*this,C,transB,transA,a,b); }; template <typename T> inline T SpMatrix<T>::dot(const Matrix<T>& x) const { T sum=0; for (int i = 0; i<_n; ++i) for (int j = _pB[i]; j<_pE[i]; ++j) { sum+=_v[j]*x(_r[j],j); } return sum; }; template <typename T> inline void SpMatrix<T>::copyRow(const int ind, Vector<T>& x) const { x.resize(_n); x.setZeros(); for (int i = 0; i<_n; ++i) { for (int j = _pB[i]; j<_pE[i]; ++j) { if (_r[j]==ind) { x[i]=_v[j]; } else if (_r[j] > ind) { break; } } } }; template <typename T> inline void SpMatrix<T>::addVecToCols( const Vector<T>& vec, const T a) { const T* pr_vec = vec.rawX(); if (isEqual(a,T(1.0))) { for (int i = 0; i<_n; ++i) for (int j = _pB[i]; j<_pE[i]; ++j) _v[j] += pr_vec[_r[j]]; } else { for (int i = 0; i<_n; ++i) for (int j = _pB[i]; j<_pE[i]; ++j) _v[j] += a*pr_vec[_r[j]]; } }; template <typename T> inline void SpMatrix<T>::addVecToColsWeighted( const Vector<T>& vec, const T* weights, const T a) { const T* pr_vec = vec.rawX(); if (isEqual(a,T(1.0))) { for (int i = 0; i<_n; ++i) for (int j = _pB[i]; j<_pE[i]; ++j) _v[j] += pr_vec[_r[j]]*weights[j-_pB[i]]; } else { for (int i = 0; i<_n; ++i) for (int j = _pB[i]; j<_pE[i]; ++j) _v[j] += a*pr_vec[_r[j]]*weights[j-_pB[i]]; } }; template <typename T> inline void SpMatrix<T>::sum_cols(Vector<T>& sum) const { sum.resize(_m); sum.setZeros(); SpVector<T> tmp; for (int i = 0; i<_n; ++i) { this->refCol(i,tmp); sum.add(tmp); } }; /// aat <- A*A' template <typename T> inline void SpMatrix<T>::AAt(Matrix<T>& aat) const { int i,j,k; int K=_m; int M=_n; /* compute alpha alpha^T */ aat.resize(K,K); int NUM_THREADS=init_omp(MAX_THREADS); T* aatT=new T[NUM_THREADS*K*K]; for (j = 0; j<NUM_THREADS*K*K; ++j) aatT[j]=T(); #pragma omp parallel for private(i,j,k) for (i = 0; i<M; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=aatT+numT*K*K; for (j = _pB[i]; j<_pE[i]; ++j) { for (k = _pB[i]; k<=j; ++k) { write_area[_r[j]*K+_r[k]]+=_v[j]*_v[k]; } } } cblas_copy<T>(K*K,aatT,1,aat._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(K*K,1.0,aatT+K*K*i,1,aat._X,1); aat.fillSymmetric(); delete[](aatT); } template <typename T> inline void SpMatrix<T>::XtX(Matrix<T>& XtX) const { XtX.resize(_n,_n); XtX.setZeros(); SpVector<T> col; Vector<T> col_out; for (int i = 0; i<_n; ++i) { this->refCol(i,col); XtX.refCol(i,col_out); this->multTrans(col,col_out); } }; /// aat <- A(:,indices)*A(:,indices)' template <typename T> inline void SpMatrix<T>::AAt(Matrix<T>& aat, const Vector<int>& indices) const { int i,j,k; int K=_m; int M=indices.n(); /* compute alpha alpha^T */ aat.resize(K,K); int NUM_THREADS=init_omp(MAX_THREADS); T* aatT=new T[NUM_THREADS*K*K]; for (j = 0; j<NUM_THREADS*K*K; ++j) aatT[j]=T(); #pragma omp parallel for private(i,j,k) for (i = 0; i<M; ++i) { int ii = indices[i]; #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=aatT+numT*K*K; for (j = _pB[ii]; j<_pE[ii]; ++j) { for (k = _pB[ii]; k<=j; ++k) { write_area[_r[j]*K+_r[k]]+=_v[j]*_v[k]; } } } cblas_copy<T>(K*K,aatT,1,aat._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(K*K,1.0,aatT+K*K*i,1,aat._X,1); aat.fillSymmetric(); delete[](aatT); } /// aat <- sum_i w_i A(:,i)*A(:,i)' template <typename T> inline void SpMatrix<T>::wAAt(const Vector<T>& w, Matrix<T>& aat) const { int i,j,k; int K=_m; int M=_n; /* compute alpha alpha^T */ aat.resize(K,K); int NUM_THREADS=init_omp(MAX_THREADS); T* aatT=new T[NUM_THREADS*K*K]; for (j = 0; j<NUM_THREADS*K*K; ++j) aatT[j]=T(); #pragma omp parallel for private(i,j,k) for (i = 0; i<M; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=aatT+numT*K*K; for (j = _pB[i]; j<_pE[i]; ++j) { for (k = _pB[i]; k<=j; ++k) { write_area[_r[j]*K+_r[k]]+=w._X[i]*_v[j]*_v[k]; } } } cblas_copy<T>(K*K,aatT,1,aat._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(K*K,1.0,aatT+K*K*i,1,aat._X,1); aat.fillSymmetric(); delete[](aatT); } /// XAt <- X*A' template <typename T> inline void SpMatrix<T>::XAt(const Matrix<T>& X, Matrix<T>& XAt) const { int j,i; int n=X._m; int K=_m; int M=_n; XAt.resize(n,K); /* compute X alpha^T */ int NUM_THREADS=init_omp(MAX_THREADS); T* XatT=new T[NUM_THREADS*n*K]; for (j = 0; j<NUM_THREADS*n*K; ++j) XatT[j]=T(); #pragma omp parallel for private(i,j) for (i = 0; i<M; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=XatT+numT*n*K; for (j = _pB[i]; j<_pE[i]; ++j) { cblas_axpy<T>(n,_v[j],X._X+i*n,1,write_area+_r[j]*n,1); } } cblas_copy<T>(n*K,XatT,1,XAt._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(n*K,1.0,XatT+n*K*i,1,XAt._X,1); delete[](XatT); }; /// XAt <- X(:,indices)*A(:,indices)' template <typename T> inline void SpMatrix<T>::XAt(const Matrix<T>& X, Matrix<T>& XAt, const Vector<int>& indices) const { int j,i; int n=X._m; int K=_m; int M=indices.n(); XAt.resize(n,K); /* compute X alpha^T */ int NUM_THREADS=init_omp(MAX_THREADS); T* XatT=new T[NUM_THREADS*n*K]; for (j = 0; j<NUM_THREADS*n*K; ++j) XatT[j]=T(); #pragma omp parallel for private(i,j) for (i = 0; i<M; ++i) { int ii = indices[i]; #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T* write_area=XatT+numT*n*K; for (j = _pB[ii]; j<_pE[ii]; ++j) { cblas_axpy<T>(n,_v[j],X._X+i*n,1,write_area+_r[j]*n,1); } } cblas_copy<T>(n*K,XatT,1,XAt._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(n*K,1.0,XatT+n*K*i,1,XAt._X,1); delete[](XatT); }; /// XAt <- sum_i w_i X(:,i)*A(:,i)' template <typename T> inline void SpMatrix<T>::wXAt(const Vector<T>& w, const Matrix<T>& X, Matrix<T>& XAt, const int numThreads) const { int j,l,i; int n=X._m; int K=_m; int M=_n; int Mx = X._n; int numRepX= M/Mx; assert(numRepX*Mx == M); XAt.resize(n,K); /* compute X alpha^T */ int NUM_THREADS=init_omp(numThreads); T* XatT=new T[NUM_THREADS*n*K]; for (j = 0; j<NUM_THREADS*n*K; ++j) XatT[j]=T(); #pragma omp parallel for private(i,j,l) for (i = 0; i<Mx; ++i) { #ifdef _OPENMP int numT=omp_get_thread_num(); #else int numT=0; #endif T * write_area=XatT+numT*n*K; for (l = 0; l<numRepX; ++l) { int ind=numRepX*i+l; if (w._X[ind] != 0) for (j = _pB[ind]; j<_pE[ind]; ++j) { cblas_axpy<T>(n,w._X[ind]*_v[j],X._X+i*n,1,write_area+_r[j]*n,1); } } } cblas_copy<T>(n*K,XatT,1,XAt._X,1); for (i = 1; i<NUM_THREADS; ++i) cblas_axpy<T>(n*K,1.0,XatT+n*K*i,1,XAt._X,1); delete[](XatT); }; /// copy the sparse matrix into a dense matrix template<typename T> inline void SpMatrix<T>::toFull(Matrix<T>& matrix) const { matrix.resize(_m,_n); matrix.setZeros(); T* out = matrix._X; for (int i=0; i<_n; ++i) { for (int j = _pB[i]; j<_pE[i]; ++j) { out[i*_m+_r[j]]=_v[j]; } } }; /// copy the sparse matrix into a full dense matrix template <typename T> inline void SpMatrix<T>::toFullTrans( Matrix<T>& matrix) const { matrix.resize(_n,_m); matrix.setZeros(); T* out = matrix._X; for (int i=0; i<_n; ++i) { for (int j = _pB[i]; j<_pE[i]; ++j) { out[i+_r[j]*_n]=_v[j]; } } }; /// use the data from v, r for _v, _r template <typename T> inline void SpMatrix<T>::convert(const Matrix<T>&vM, const Matrix<int>& rM, const int K) { const int M = rM.n(); const int L = rM.m(); const int* r = rM.X(); const T* v = vM.X(); int count=0; for (int i = 0; i<M*L; ++i) if (r[i] != -1) ++count; resize(K,M,count); count=0; for (int i = 0; i<M; ++i) { _pB[i]=count; for (int j = 0; j<L; ++j) { if (r[i*L+j] == -1) break; _v[count]=v[i*L+j]; _r[count++]=r[i*L+j]; } _pE[i]=count; } for (int i = 0; i<M; ++i) sort(_r,_v,_pB[i],_pE[i]-1); }; /// use the data from v, r for _v, _r template <typename T> inline void SpMatrix<T>::convert2( const Matrix<T>&vM, const Vector<int>& rv, const int K) { const int M = vM.n(); const int L = vM.m(); int* r = rv.rawX(); const T* v = vM.X(); int LL=0; for (int i = 0; i<L; ++i) if (r[i] != -1) ++LL; this->resize(K,M,LL*M); int count=0; for (int i = 0; i<M; ++i) { _pB[i]=count; for (int j = 0; j<LL; ++j) { _v[count]=v[i*L+j]; _r[count++]=r[j]; } _pE[i]=count; } for (int i = 0; i<M; ++i) sort(_r,_v,_pB[i],_pE[i]-1); }; /// returns the l2 norms ^2 of the columns template <typename T> inline void SpMatrix<T>::norm_2sq_cols(Vector<T>& norms) const { norms.resize(_n); SpVector<T> col; for (int i = 0; i<_n; ++i) { this->refCol(i,col); norms[i] = col.nrm2sq(); } }; template <typename T> inline void SpMatrix<T>::norm_0_cols(Vector<T>& norms) const { norms.resize(_n); SpVector<T> col; for (int i = 0; i<_n; ++i) { this->refCol(i,col); norms[i] = static_cast<T>(col.length()); } }; template <typename T> inline void SpMatrix<T>::norm_1_cols(Vector<T>& norms) const { norms.resize(_n); SpVector<T> col; for (int i = 0; i<_n; ++i) { this->refCol(i,col); norms[i] =col.asum(); } }; /* *************************** * Implementation of SpVector * ***************************/ /// Constructor, of the sparse vector of size L. template <typename T> SpVector<T>::SpVector(T* v, int* r, int L, int nzmax) : _externAlloc(true), _v(v), _r(r), _L(L), _nzmax(nzmax) { }; /// Constructor, allocates nzmax slots template <typename T> SpVector<T>::SpVector(int nzmax) : _externAlloc(false), _L(0), _nzmax(nzmax) { #pragma omp critical { _v = new T[nzmax]; _r = new int[nzmax]; } }; /// Empty constructor template <typename T> SpVector<T>::SpVector() : _externAlloc(true), _v(NULL), _r(NULL), _L(0), _nzmax(0) { }; /// Destructor template <typename T> SpVector<T>::~SpVector() { clear(); }; /// computes the sum of the magnitude of the elements template <typename T> inline T SpVector<T>::asum() const { return cblas_asum<T>(_L,_v,1); }; /// computes the l2 norm ^2 of the vector template <typename T> inline T SpVector<T>::nrm2sq() const { return cblas_dot<T>(_L,_v,1,_v,1); }; /// computes the l2 norm of the vector template <typename T> inline T SpVector<T>::nrm2() const { return cblas_nrm2<T>(_L,_v,1); }; /// computes the l2 norm of the vector template <typename T> inline T SpVector<T>::fmaxval() const { Vector<T> tmp(_v,_L); return tmp.fmaxval(); }; /// print the vector to std::cerr template <typename T> inline void SpVector<T>::print(const string& name) const { std::cerr << name << std::endl; std::cerr << _nzmax << std::endl; for (int i = 0; i<_L; ++i) cerr << "(" <<_r[i] << ", " << _v[i] << ")" << endl; }; /// create a reference on the vector r template <typename T> inline void SpVector<T>::refIndices( Vector<int>& indices) const { indices.setPointer(_r,_L); }; /// creates a reference on the vector val template <typename T> inline void SpVector<T>::refVal( Vector<T>& val) const { val.setPointer(_v,_L); }; /// a <- a.^2 template <typename T> inline void SpVector<T>::sqr() { vSqr<T>(_L,_v,_v); }; template <typename T> inline T SpVector<T>::dot(const SpVector<T>& vec) const { T sum=T(); int countI = 0; int countJ = 0; while (countI < _L && countJ < vec._L) { const int rI = _r[countI]; const int rJ = vec._r[countJ]; if (rI > rJ) { ++countJ; } else if (rJ > rI) { ++countI; } else { sum+=_v[countI]*vec._v[countJ]; ++countI; ++countJ; } } return sum; }; /// clears the vector template <typename T> inline void SpVector<T>::clear() { if (!_externAlloc) { delete[](_v); delete[](_r); } _v=NULL; _r=NULL; _L=0; _nzmax=0; _externAlloc=true; }; /// resizes the vector template <typename T> inline void SpVector<T>::resize(const int nzmax) { if (_nzmax != nzmax) { clear(); _nzmax=nzmax; _L=0; _externAlloc=false; #pragma omp critical { _v=new T[nzmax]; _r=new int[nzmax]; } } }; template <typename T> void inline SpVector<T>::toSpMatrix( SpMatrix<T>& out, const int m, const int n) const { out.resize(m,n,_L); cblas_copy<T>(_L,_v,1,out._v,1); int current_col=0; T* out_v=out._v; int* out_r=out._r; int* out_pB=out._pB; out_pB[0]=current_col; for (int i = 0; i<_L; ++i) { int col=_r[i]/m; if (col > current_col) { out_pB[current_col+1]=i; current_col++; i--; } else { out_r[i]=_r[i]-col*m; } } for (current_col++ ; current_col < n+1; ++current_col) out_pB[current_col]=_L; }; template <typename T> void inline SpVector<T>::toFull(Vector<T>& out) const { out.setZeros(); T* X = out.rawX(); for (int i = 0; i<_L; ++i) X[_r[i]]=_v[i]; }; /* **************************** * Implementaton of ProdMatrix * ****************************/ template <typename T> ProdMatrix<T>::ProdMatrix() { _DtX= NULL; _X=NULL; _D=NULL; _high_memory=true; _n=0; _m=0; _addDiag=0; }; /// Constructor. Matrix D'*X is represented template <typename T> ProdMatrix<T>::ProdMatrix(const Matrix<T>& D, const bool high_memory) { if (high_memory) _DtX = new Matrix<T>(); this->setMatrices(D,high_memory); }; /// Constructor. Matrix D'*X is represented template <typename T> ProdMatrix<T>::ProdMatrix(const Matrix<T>& D, const Matrix<T>& X, const bool high_memory) { if (high_memory) _DtX = new Matrix<T>(); this->setMatrices(D,X,high_memory); }; template <typename T> inline void ProdMatrix<T>::setMatrices(const Matrix<T>& D, const Matrix<T>& X, const bool high_memory) { _high_memory=high_memory; _m = D.n(); _n = X.n(); if (high_memory) { D.mult(X,*_DtX,true,false); } else { _X=&X; _D=&D; _DtX=NULL; } _addDiag=0; }; template <typename T> inline void ProdMatrix<T>::setMatrices( const Matrix<T>& D, const bool high_memory) { _high_memory=high_memory; _m = D.n(); _n = D.n(); if (high_memory) { D.XtX(*_DtX); } else { _X=&D; _D=&D; _DtX=NULL; } _addDiag=0; }; /// compute DtX(:,i) template <typename T> inline void ProdMatrix<T>::copyCol(const int i, Vector<T>& DtXi) const { if (_high_memory) { _DtX->copyCol(i,DtXi); } else { Vector<T> Xi; _X->refCol(i,Xi); _D->multTrans(Xi,DtXi); if (_addDiag && _m == _n) DtXi[i] += _addDiag; } }; /// compute DtX(:,i) template <typename T> inline void ProdMatrix<T>::extract_rawCol(const int i,T* DtXi) const { if (_high_memory) { _DtX->extract_rawCol(i,DtXi); } else { Vector<T> Xi; Vector<T> vDtXi(DtXi,_m); _X->refCol(i,Xi); _D->multTrans(Xi,vDtXi); if (_addDiag && _m == _n) DtXi[i] += _addDiag; } }; template <typename T> inline void ProdMatrix<T>::add_rawCol(const int i,T* DtXi, const T a) const { if (_high_memory) { _DtX->add_rawCol(i,DtXi,a); } else { Vector<T> Xi; Vector<T> vDtXi(DtXi,_m); _X->refCol(i,Xi); _D->multTrans(Xi,vDtXi,a,T(1.0)); if (_addDiag && _m == _n) DtXi[i] += a*_addDiag; } }; template <typename T> void inline ProdMatrix<T>::addDiag(const T diag) { if (_m == _n) { if (_high_memory) { _DtX->addDiag(diag); } else { _addDiag=diag; } } }; template <typename T> inline T ProdMatrix<T>::operator[](const int index) const { if (_high_memory) { return (*_DtX)[index]; } else { const int index2=index/this->_m; const int index1=index-this->_m*index2; Vector<T> col1, col2; _D->refCol(index1,col1); _X->refCol(index2,col2); return col1.dot(col2); } }; template <typename T> inline T ProdMatrix<T>::operator()(const int index1, const int index2) const { if (_high_memory) { return (*_DtX)(index1,index2); } else { Vector<T> col1, col2; _D->refCol(index1,col1); _X->refCol(index2,col2); return col1.dot(col2); } }; template <typename T> void inline ProdMatrix<T>::diag(Vector<T>& diag) const { if (_m == _n) { if (_high_memory) { _DtX->diag(diag); } else { Vector<T> col1, col2; for (int i = 0; i <_m; ++i) { _D->refCol(i,col1); _X->refCol(i,col2); diag[i] = col1.dot(col2); } } } }; template <typename T> class SubMatrix : public AbstractMatrix<T> { public: SubMatrix(AbstractMatrix<T>& G, Vector<int>& indI, Vector<int>& indJ); void inline convertIndicesI(Vector<int>& ind) const; void inline convertIndicesJ(Vector<int>& ind) const; int inline n() const { return _indicesJ.n(); }; int inline m() const { return _indicesI.n(); }; void inline extract_rawCol(const int i, T* pr) const; /// compute DtX(:,i) inline void copyCol(const int i, Vector<T>& DtXi) const; /// compute DtX(:,i) inline void add_rawCol(const int i, T* DtXi, const T a) const; /// compute DtX(:,i) inline void diag(Vector<T>& diag) const; inline T operator()(const int index1, const int index2) const; private: Vector<int> _indicesI; Vector<int> _indicesJ; AbstractMatrix<T>* _matrix; }; template <typename T> SubMatrix<T>::SubMatrix(AbstractMatrix<T>& G, Vector<int>& indI, Vector<int>& indJ) { _matrix = &G; _indicesI.copy(indI); _indicesJ.copy(indJ); }; template <typename T> void inline SubMatrix<T>::convertIndicesI( Vector<int>& ind) const { int* pr_ind = ind.rawX(); for (int i = 0; i<ind.n(); ++i) { if (pr_ind[i] == -1) break; pr_ind[i]=_indicesI[pr_ind[i]]; } }; template <typename T> void inline SubMatrix<T>::convertIndicesJ( Vector<int>& ind) const { int* pr_ind = ind.rawX(); for (int i = 0; i<ind.n(); ++i) { if (pr_ind[i] == -1) break; pr_ind[i]=_indicesJ[pr_ind[i]]; } }; template <typename T> void inline SubMatrix<T>::extract_rawCol(const int i, T* pr) const { int* pr_ind=_indicesI.rawX(); int* pr_ind2=_indicesJ.rawX(); for (int j = 0; j<_indicesI.n(); ++j) { pr[j]=(*_matrix)(pr_ind[j],pr_ind2[i]); } }; template <typename T> inline void SubMatrix<T>::copyCol(const int i, Vector<T>& DtXi) const { this->extract_rawCol(i,DtXi.rawX()); }; template <typename T> void inline SubMatrix<T>::add_rawCol(const int i, T* pr, const T a) const { int* pr_ind=_indicesI.rawX(); int* pr_ind2=_indicesJ.rawX(); for (int j = 0; j<_indicesI.n(); ++j) { pr[j]+=a*(*_matrix)(pr_ind[j],pr_ind2[i]); } }; template <typename T> void inline SubMatrix<T>::diag(Vector<T>& diag) const { T* pr = diag.rawX(); int* pr_ind=_indicesI.rawX(); for (int j = 0; j<_indicesI.n(); ++j) { pr[j]=(*_matrix)(pr_ind[j],pr_ind[j]); } }; template <typename T> inline T SubMatrix<T>::operator()(const int index1, const int index2) const { return (*_matrix)(_indicesI[index1],_indicesJ[index2]); } /// Matrix with shifts template <typename T> class ShiftMatrix : public AbstractMatrixB<T> { public: ShiftMatrix(const AbstractMatrixB<T>& inputmatrix, const int shifts, const bool center = false) : _shifts(shifts), _inputmatrix(&inputmatrix), _centered(false) { _m=_inputmatrix->m()-shifts+1; _n=_inputmatrix->n()*shifts; if (center) this->center(); }; int n() const { return _n; }; int m() const { return _m; }; /// b <- alpha A'x + beta b void multTrans(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A*x + beta*b, when x is sparse virtual void mult(const SpVector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; virtual void mult(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform C = a*A*B + b*C, possibly transposing A or B. virtual void mult(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; virtual void mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. virtual void multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// XtX = A'*A virtual void XtX(Matrix<T>& XtX) const; virtual void copyRow(const int i, Vector<T>& x) const; virtual void copyTo(Matrix<T>& copy) const; virtual T dot(const Matrix<T>& x) const; virtual void print(const string& name) const; virtual ~ShiftMatrix() { }; private: void center() { Vector<T> ones(_m); ones.set(T(1.0)/_m); this->multTrans(ones,_means); _centered=true; }; int _m; int _n; int _shifts; bool _centered; Vector<T> _means; const AbstractMatrixB<T>* _inputmatrix; }; template <typename T> void ShiftMatrix<T>::multTrans(const Vector<T>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_n); if (beta==0) b.setZeros(); Vector<T> tmp(_inputmatrix->m()); Vector<T> subvec; Vector<T> subvec2; const int nn=_inputmatrix->n(); for (int i = 0; i<_shifts; ++i) { tmp.setZeros(); subvec2.setData(tmp.rawX()+i,_m); subvec2.copy(x); subvec.setData(b.rawX()+i*nn,nn); _inputmatrix->multTrans(tmp,subvec,alpha,beta); } if (_centered) { b.add(_means,-alpha*x.sum()); } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> void ShiftMatrix<T>::mult(const SpVector<T>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_m); if (beta==0) { b.setZeros(); } else { b.scal(beta); } const int nn=_inputmatrix->n(); const int mm=_inputmatrix->m(); Vector<T> fullx(_n); x.toFull(fullx); SpVector<T> sptmp(nn); Vector<T> tmp; Vector<T> tmp2(mm); for (int i = 0; i<_shifts; ++i) { tmp.setData(fullx.rawX()+i*nn,nn); tmp.toSparse(sptmp); _inputmatrix->mult(sptmp,tmp2,alpha,0); tmp.setData(tmp2.rawX()+i,_m); b.add(tmp); } if (_centered) { b.add(-alpha*_means.dot(x)); } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> void ShiftMatrix<T>::mult(const Vector<T>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_m); const int nn=_inputmatrix->n(); const int mm=_inputmatrix->m(); Vector<T> tmp; Vector<T> tmp2(mm); if (beta==0) { b.setZeros(); } else { b.scal(beta); } for (int i = 0; i<_shifts; ++i) { tmp.setData(x.rawX()+i*nn,nn); _inputmatrix->mult(tmp,tmp2,alpha,0); tmp.setData(tmp2.rawX()+i,_m); b.add(tmp); } if (_centered) { b.add(-alpha*_means.dot(x)); } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T> void ShiftMatrix<T>::mult(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { cerr << "Shift Matrix is used in inadequate setting" << endl; } template <typename T> void ShiftMatrix<T>::mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { cerr << "Shift Matrix is used in inadequate setting" << endl; } /// perform C = a*B*A + b*C, possibly transposing A or B. template <typename T> void ShiftMatrix<T>::multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { cerr << "Shift Matrix is used in inadequate setting" << endl; } template <typename T> void ShiftMatrix<T>::XtX(Matrix<T>& XtX) const { cerr << "Shift Matrix is used in inadequate setting" << endl; }; template <typename T> void ShiftMatrix<T>::copyRow(const int ind, Vector<T>& x) const { Vector<T> sub_vec; const int mm=_inputmatrix->m(); for (int i = 0; i<_shifts; ++i) { sub_vec.setData(x.rawX()+i*mm,mm); _inputmatrix->copyRow(ind+i,sub_vec); } if (_centered) x.sub(_means); }; template <typename T> void ShiftMatrix<T>::copyTo(Matrix<T>& x) const { cerr << "Shift Matrix is used in inadequate setting" << endl; }; template <typename T> T ShiftMatrix<T>::dot(const Matrix<T>& x) const { cerr << "Shift Matrix is used in inadequate setting" << endl; return 0; }; template <typename T> void ShiftMatrix<T>::print(const string& name) const { cerr << name << endl; cerr << "Shift Matrix: " << _shifts << " shifts" << endl; _inputmatrix->print(name); }; /// Matrix with shifts template <typename T> class DoubleRowMatrix : public AbstractMatrixB<T> { public: DoubleRowMatrix(const AbstractMatrixB<T>& inputmatrix) : _inputmatrix(&inputmatrix) { _n=inputmatrix.n(); _m=2*inputmatrix.m(); }; int n() const { return _n; }; int m() const { return _m; }; /// b <- alpha A'x + beta b void multTrans(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform b = alpha*A*x + beta*b, when x is sparse virtual void mult(const SpVector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; virtual void mult(const Vector<T>& x, Vector<T>& b, const T alpha = 1.0, const T beta = 0.0) const; /// perform C = a*A*B + b*C, possibly transposing A or B. virtual void mult(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; virtual void mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// perform C = a*B*A + b*C, possibly transposing A or B. virtual void multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA = false, const bool transB = false, const T a = 1.0, const T b = 0.0) const; /// XtX = A'*A virtual void XtX(Matrix<T>& XtX) const; virtual void copyRow(const int i, Vector<T>& x) const; virtual void copyTo(Matrix<T>& copy) const; virtual T dot(const Matrix<T>& x) const; virtual void print(const string& name) const; virtual ~DoubleRowMatrix() { }; private: int _m; int _n; const AbstractMatrixB<T>* _inputmatrix; }; template <typename T> void DoubleRowMatrix<T>::multTrans(const Vector<T>& x, Vector<T>& b, const T alpha, const T beta) const { const int mm = _inputmatrix->m(); Vector<T> tmp(mm); for (int i = 0; i<mm; ++i) tmp[i]=x[2*i]+x[2*i+1]; _inputmatrix->multTrans(tmp,b,alpha,beta); }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> void DoubleRowMatrix<T>::mult(const SpVector<T>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_m); if (beta==0) { b.setZeros(); } else { b.scal(beta); } const int mm = _inputmatrix->m(); Vector<T> tmp(mm); _inputmatrix->mult(x,tmp,alpha); for (int i = 0; i<mm; ++i) { b[2*i]+=tmp[i]; b[2*i+1]+=tmp[i]; } }; /// perform b = alpha*A*x + beta*b, when x is sparse template <typename T> void DoubleRowMatrix<T>::mult(const Vector<T>& x, Vector<T>& b, const T alpha, const T beta) const { b.resize(_m); if (beta==0) { b.setZeros(); } else { b.scal(beta); } const int mm = _inputmatrix->m(); Vector<T> tmp(mm); _inputmatrix->mult(x,tmp,alpha); for (int i = 0; i<mm; ++i) { b[2*i]+=tmp[i]; b[2*i+1]+=tmp[i]; } }; /// perform C = a*A*B + b*C, possibly transposing A or B. template <typename T> void DoubleRowMatrix<T>::mult(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { FLAG(5) cerr << "Double Matrix is used in inadequate setting" << endl; } template <typename T> void DoubleRowMatrix<T>::mult(const SpMatrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { FLAG(4) cerr << "Double Matrix is used in inadequate setting" << endl; } /// perform C = a*B*A + b*C, possibly transposing A or B. template <typename T> void DoubleRowMatrix<T>::multSwitch(const Matrix<T>& B, Matrix<T>& C, const bool transA, const bool transB, const T a, const T b) const { FLAG(3) cerr << "Double Matrix is used in inadequate setting" << endl; } template <typename T> void DoubleRowMatrix<T>::XtX(Matrix<T>& XtX) const { FLAG(2) cerr << "Double Matrix is used in inadequate setting" << endl; }; template <typename T> void DoubleRowMatrix<T>::copyRow(const int ind, Vector<T>& x) const { const int indd2= static_cast<int>(floor(static_cast<double>(ind)/2.0)); _inputmatrix->copyRow(indd2,x); }; template <typename T> void DoubleRowMatrix<T>::copyTo(Matrix<T>& x) const { FLAG(1) cerr << "Double Matrix is used in inadequate setting" << endl; }; template <typename T> T DoubleRowMatrix<T>::dot(const Matrix<T>& x) const { FLAG(0) cerr << "Double Matrix is used in inadequate setting" << endl; return 0; }; template <typename T> void DoubleRowMatrix<T>::print(const string& name) const { cerr << name << endl; cerr << "Double Row Matrix" << endl; _inputmatrix->print(name); }; #endif
scan.c
/** * scan.c * Authors: Yizhao Gao <yizhaotsccsj@gmail.com> * Date: {08/03/2017} */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> void getCCCount(double * x1, double * y1, double * x2, double * y2, int * nCass, int * nCons, int locCount, double wSize, int wCount, int * casInW, int * conInW, double elimIntersectOD) { double distance; int minWindow; for(int i = 0; i < locCount * wCount; i++) { casInW[i] = 0; conInW[i] = 0; } #pragma omp parallel for private(distance, minWindow) for(int i = 0; i < locCount; i++) { for(int j = 0; j < locCount; j++) { distance = sqrt((x1[i] - x1[j]) * (x1[i] - x1[j]) + (y1[i] - y1[j]) * (y1[i] - y1[j]) + (x2[i] - x2[j]) * (x2[i] - x2[j]) + (y2[i] - y2[j]) * (y2[i] - y2[j])); minWindow = (int)(ceil(distance / wSize)); if(minWindow > 0) minWindow --; for(int k = minWindow; k < wCount; k++) { casInW[i * wCount + k] += nCass[j]; conInW[i * wCount + k] += nCons[j]; } } if(elimIntersectOD > 0) { double ODDistance = sqrt((x1[i] - x2[i]) * (x1[i] - x2[i]) + (y1[i] - y2[i]) * (y1[i] - y2[i])) / elimIntersectOD; int maxWindow = ODDistance / wSize; for(int k = maxWindow; k < wCount; k++) { casInW[i * wCount + k] = -1; } } } return; } void loglikelihood(double * ll, int * casInW, int * conInW, int totalWindow, int casCount, int conCount, int highLow) { double cas, con, tot; double llTemp; int totCount = casCount + conCount; bool highCluster = true; bool lowCluster = true; if(highLow == 1) lowCluster = false; else if(highLow == -1) highCluster = false; #pragma omp parallel for private(cas, con, tot, llTemp) for(int i = 0; i < totalWindow; i++) { cas = casInW[i]; con = conInW[i]; tot = cas + con; if(cas == -1) { ll[i] = 1; } else if(cas * conCount > con * casCount) { //High cluster of cases if(highCluster) { llTemp = cas * log(cas/tot); if(con > 0) llTemp += con * log(con/tot); if(casCount > cas) llTemp += (casCount - cas) * log((casCount - cas)/(totCount - tot)); if(conCount > con) llTemp += (conCount - con) * log((conCount - con)/(totCount - tot)); ll[i] = llTemp; } else ll[i] = 1; } else { //Low cluster of cases if(lowCluster) { llTemp = con * log(con/tot); if(cas > 0) llTemp += cas * log(cas/tot); if(casCount > cas) llTemp += (casCount - cas) * log((casCount - cas)/(totCount - tot)); if(conCount > con) llTemp += (conCount - con) * log((conCount - con)/(totCount - tot)); ll[i] = llTemp; } else ll[i] = 1; } } } void findTopNCluster(double * x1, double * y1, double * x2, double * y2, int locCount, double * ll, double wSize, int wCount, int * center, int * radius, double * cLL, int nClusters) { if(nClusters < 1) return; int aCenter = -1; int aRadius = -1; for(int i = 0; i < locCount; i++) { for(int j = 0; j < wCount; j++) { if(ll[i * wCount + j] < 0) { if(aCenter < 0) { aCenter = i; aRadius = j; } else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) { aCenter = i; aRadius = j; } } } } center[0] = aCenter; radius[0] = aRadius; cLL[0] = ll[aCenter * wCount + aRadius]; double lastX1, lastY1, lastX2, lastY2, lastRad; lastX1 = x1[aCenter]; lastY1 = y1[aCenter]; lastX2 = x2[aCenter]; lastY2 = y2[aCenter]; lastRad = (aRadius + 1) * wSize; double distance; int maxWindow; for(int c = 1; c < nClusters; c ++) { //Remove intersecting clusters for(int i = 0; i < locCount; i++) { distance = sqrt((x1[i] - lastX1) * (x1[i] - lastX1) + (y1[i] - lastY1) * (y1[i] - lastY1) + (x2[i] - lastX2) * (x2[i] - lastX2) + (y2[i] - lastY2) * (y2[i] - lastY2)) - lastRad; maxWindow = ceil(distance / wSize) - 1; if(maxWindow < 0) maxWindow = 0; for(int j = maxWindow; j < wCount; j++) { ll[i * wCount + j] = 1; } } //Find secoundary clusters aCenter = -1; aRadius = -1; for(int i = 0; i < locCount; i++) { for(int j = 0; j < wCount; j++) { if(ll[i * wCount + j] < 0) { if(aCenter < 0) { aCenter = i; aRadius = j; } else if(ll[i * wCount + j] > ll[aCenter * wCount + aRadius]) { aCenter = i; aRadius = j; } } } } center[c] = aCenter; radius[c] = aRadius; if(aCenter != -1) { cLL[c] = ll[aCenter * wCount + aRadius]; } else { break; } lastX1 = x1[aCenter]; lastY1 = y1[aCenter]; lastX2 = x2[aCenter]; lastY2 = y2[aCenter]; lastRad = (aRadius + 1) * wSize; } return; }
gather_nd_op_cpu_impl.h
/* Copyright 2016 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_CPU_IMPL_H_ #define TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_CPU_IMPL_H_ // Specialization of GatherNdSlice to CPU #define EIGEN_USE_THREADS #include <atomic> #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/register_types.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/kernels/gather_nd_op.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/mem.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/util.h" namespace tensorflow { typedef Eigen::ThreadPoolDevice CPUDevice; namespace generator { template <typename T, typename Index, int IXDIM> class GatherNdSliceGenerator { public: EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE GatherNdSliceGenerator( const Index slice_size, typename TTypes<Index>::ConstMatrix Tindices, typename TTypes<T, IXDIM + 1>::ConstTensor Tparams, typename TTypes<T>::Matrix Tout, std::atomic<Index>* error_loc) : slice_size_(slice_size), Tindices_(Tindices), Tparams_(Tparams), Tout_(Tout), error_loc_(error_loc) {} EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool GenerateIndices( const Index loc, Eigen::array<Eigen::DenseIndex, IXDIM + 1>* ix) const { (*ix)[IXDIM] = 0; bool out_of_bounds = false; for (int i = 0; i < IXDIM; ++i) { const Index ix_i = internal::SubtleMustCopy(Tindices_(loc, i)); (*ix)[i] = ix_i; out_of_bounds |= !FastBoundsCheck(ix_i, Tparams_.dimension(i)); } return out_of_bounds; } EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int32 operator()(const Eigen::array<Eigen::DenseIndex, 1>& loc_array) const { const Index loc = loc_array[0]; Eigen::array<Eigen::DenseIndex, IXDIM + 1> ix; Eigen::array<Eigen::DenseIndex, 2> ix_out; ix_out[0] = loc; ix_out[1] = 0; const bool out_of_bounds = GenerateIndices(loc, &ix); if (TF_PREDICT_FALSE(out_of_bounds)) { error_loc_->store(loc); std::fill_n(&Tout_(ix_out), slice_size_, T()); } else { std::copy_n(&Tparams_(ix), slice_size_, &Tout_(ix_out)); } return static_cast<int32>(0); // Return something... } private: const Index slice_size_; const typename TTypes<Index>::ConstMatrix Tindices_; const typename TTypes<T, IXDIM + 1>::ConstTensor Tparams_; mutable typename TTypes<T>::Matrix Tout_; std::atomic<Index>* error_loc_; }; } // namespace generator namespace functor { template <typename T, typename Index, int IXDIM> struct GatherNdSlice<CPUDevice, T, Index, IXDIM> { Index operator()(const CPUDevice& d, const Index slice_size, typename TTypes<int32>::Scalar Tscratch, typename TTypes<T, IXDIM + 1>::ConstTensor Tparams, typename TTypes<Index>::ConstMatrix Tindices, typename TTypes<T>::Matrix Tout) { std::atomic<Index> error_loc(-1); const Eigen::DenseIndex batch_size = Tindices.dimension(0); #if !defined(EIGEN_HAS_INDEX_LIST) Eigen::Tensor<Eigen::DenseIndex, 1>::Dimensions reshape_dims{{ 1 }}; Eigen::array<Eigen::DenseIndex, 1> broadcast_dims{{ batch_size }}; #else Eigen::IndexList<Eigen::type2index<1> > reshape_dims; Eigen::IndexList<Eigen::DenseIndex> broadcast_dims; broadcast_dims.set(0, batch_size); #endif generator::GatherNdSliceGenerator<T, Index, IXDIM> gather_nd_generator( slice_size, Tindices, Tparams, Tout, &error_loc); #if defined(INTEL_MKL) && defined(ENABLE_MKL) // Eigen implementation below is not highly performant. gather_nd_generator // does not seem to be called in parallel, leading to very poor performance. // Additionally, since it uses scalar (Tscratch) to invoke 'generate', it // needs to go through redundant operations like 'reshape', 'broadcast' and // 'sum'. OpenMP loop below essentially does same thing as Eigen code, but // is considerably more efficient. #pragma omp parallel for for (Eigen::DenseIndex i = 0; i < batch_size; i++) { const Eigen::array<Eigen::DenseIndex, 1> loc{i}; gather_nd_generator(loc); } #else // INTEL_MKL && ENABLE_MKL Tscratch.device(d) = Tscratch.reshape(reshape_dims) .broadcast(broadcast_dims) .generate(gather_nd_generator) .sum(); #endif // INTEL_MKL && ENABLE_MKL // error_loc() returns -1 if there's no out-of-bounds index, // otherwise it returns the location of an OOB index in Tindices. return error_loc.load(); } }; #define REGISTER_GATHER_ND_FULL(T, Index) \ template Index GatherNdSlice<CPUDevice, T, Index, CPU_PROVIDED_IXDIM>:: \ operator()(const CPUDevice& d, const Index slice_size, \ typename TTypes<int32>::Scalar Tscratch, \ typename TTypes<T, CPU_PROVIDED_IXDIM + 1>::ConstTensor Tparams, \ typename TTypes<Index>::ConstMatrix Tindices, \ typename TTypes<T>::Matrix Tout); #define REGISTER_GATHER_ND_CPU(type) \ REGISTER_GATHER_ND_FULL(type, int32); \ REGISTER_GATHER_ND_FULL(type, int64) TF_CALL_ALL_TYPES(REGISTER_GATHER_ND_CPU); } // namespace functor } // namespace tensorflow #endif // TENSORFLOW_CORE_KERNELS_GATHER_ND_OP_CPU_IMPL_H_
openmp_demo.c
#include <stdio.h> #include <omp.h> int main(int argc, char const *argv[]) { int val = 10; #pragma omp parallel { printf("Starting Thread %d --- %d\n", omp_get_thread_num(), val); printf("Finishing Thread %d --- %d\n", omp_get_thread_num(), val); } return 0; }
ast-dump-openmp-sections.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_zero() { #pragma omp sections {} } void test_one() { #pragma omp sections { ; } } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-sections.c:3:1, line:6:1> line:3:6 test_zero 'void ()' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:18, line:6:1> // CHECK-NEXT: `-FunctionDecl {{.*}} <line:8:1, line:11:1> line:8:6 test_one 'void ()' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:17, line:11:1> // CHECK-NEXT: `-OMPSectionsDirective {{.*}} <line:9:1, col:21> // CHECK-NEXT: `-CapturedStmt {{.*}} <line:10:3, col:7> // CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK-NEXT: |-CompoundStmt {{.*}} <col:3, col:7> openmp_structured_block // CHECK-NEXT: | `-NullStmt {{.*}} <col:5> // CHECK-NEXT: `-ImplicitParamDecl {{.*}} <line:9:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-sections.c:9:1) *const restrict'
y_solve.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB BT code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" #include "work_lhs.h" #include "timers.h" //--------------------------------------------------------------------- // Performs line solves in Y direction by first factoring // the block-tridiagonal matrix into an upper triangular matrix, // and then performing back substitution to solve for the unknow // vectors of each line. // // Make sure we treat elements zero to cell_size in the direction // of the sweep. //--------------------------------------------------------------------- void y_solve() { int i, j, k, m, n, jsize; //--------------------------------------------------------------------- //--------------------------------------------------------------------- if (timeron) timer_start(t_ysolve); //--------------------------------------------------------------------- //--------------------------------------------------------------------- //--------------------------------------------------------------------- // This function computes the left hand side for the three y-factors //--------------------------------------------------------------------- jsize = grid_points[1]-1; //--------------------------------------------------------------------- // Compute the indices for storing the tri-diagonal matrix; // determine a (labeled f) and n jacobians for cell c //--------------------------------------------------------------------- #pragma scop #pragma omp parallel for default(shared) shared(jsize) private(i,j,k,m,n) for (k = 1; k <= grid_points[2]-2; k++) { for (i = 1; i <= grid_points[0]-2; i++) { for (j = 0; j <= jsize; j++) { tmp1 = rho_i[k][j][i]; tmp2 = tmp1 * tmp1; tmp3 = tmp1 * tmp2; fjac[j][0][0] = 0.0; fjac[j][1][0] = 0.0; fjac[j][2][0] = 1.0; fjac[j][3][0] = 0.0; fjac[j][4][0] = 0.0; fjac[j][0][1] = - ( u[k][j][i][1]*u[k][j][i][2] ) * tmp2; fjac[j][1][1] = u[k][j][i][2] * tmp1; fjac[j][2][1] = u[k][j][i][1] * tmp1; fjac[j][3][1] = 0.0; fjac[j][4][1] = 0.0; fjac[j][0][2] = - ( u[k][j][i][2]*u[k][j][i][2]*tmp2) + c2 * qs[k][j][i]; fjac[j][1][2] = - c2 * u[k][j][i][1] * tmp1; fjac[j][2][2] = ( 2.0 - c2 ) * u[k][j][i][2] * tmp1; fjac[j][3][2] = - c2 * u[k][j][i][3] * tmp1; fjac[j][4][2] = c2; fjac[j][0][3] = - ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2; fjac[j][1][3] = 0.0; fjac[j][2][3] = u[k][j][i][3] * tmp1; fjac[j][3][3] = u[k][j][i][2] * tmp1; fjac[j][4][3] = 0.0; fjac[j][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] ) * u[k][j][i][2] * tmp2; fjac[j][1][4] = - c2 * u[k][j][i][1]*u[k][j][i][2] * tmp2; fjac[j][2][4] = c1 * u[k][j][i][4] * tmp1 - c2 * ( qs[k][j][i] + u[k][j][i][2]*u[k][j][i][2] * tmp2 ); fjac[j][3][4] = - c2 * ( u[k][j][i][2]*u[k][j][i][3] ) * tmp2; fjac[j][4][4] = c1 * u[k][j][i][2] * tmp1; njac[j][0][0] = 0.0; njac[j][1][0] = 0.0; njac[j][2][0] = 0.0; njac[j][3][0] = 0.0; njac[j][4][0] = 0.0; njac[j][0][1] = - c3c4 * tmp2 * u[k][j][i][1]; njac[j][1][1] = c3c4 * tmp1; njac[j][2][1] = 0.0; njac[j][3][1] = 0.0; njac[j][4][1] = 0.0; njac[j][0][2] = - con43 * c3c4 * tmp2 * u[k][j][i][2]; njac[j][1][2] = 0.0; njac[j][2][2] = con43 * c3c4 * tmp1; njac[j][3][2] = 0.0; njac[j][4][2] = 0.0; njac[j][0][3] = - c3c4 * tmp2 * u[k][j][i][3]; njac[j][1][3] = 0.0; njac[j][2][3] = 0.0; njac[j][3][3] = c3c4 * tmp1; njac[j][4][3] = 0.0; njac[j][0][4] = - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][1]*u[k][j][i][1]) - ( con43 * c3c4 - c1345 ) * tmp3 * (u[k][j][i][2]*u[k][j][i][2]) - ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][3]*u[k][j][i][3]) - c1345 * tmp2 * u[k][j][i][4]; njac[j][1][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][1]; njac[j][2][4] = ( con43 * c3c4 - c1345 ) * tmp2 * u[k][j][i][2]; njac[j][3][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][3]; njac[j][4][4] = ( c1345 ) * tmp1; } //--------------------------------------------------------------------- // now joacobians set, so form left hand side in y direction //--------------------------------------------------------------------- lhsinit(lhs, jsize); for (j = 1; j <= jsize-1; j++) { tmp1 = dt * ty1; tmp2 = dt * ty2; lhs[j][AA][0][0] = - tmp2 * fjac[j-1][0][0] - tmp1 * njac[j-1][0][0] - tmp1 * dy1; lhs[j][AA][1][0] = - tmp2 * fjac[j-1][1][0] - tmp1 * njac[j-1][1][0]; lhs[j][AA][2][0] = - tmp2 * fjac[j-1][2][0] - tmp1 * njac[j-1][2][0]; lhs[j][AA][3][0] = - tmp2 * fjac[j-1][3][0] - tmp1 * njac[j-1][3][0]; lhs[j][AA][4][0] = - tmp2 * fjac[j-1][4][0] - tmp1 * njac[j-1][4][0]; lhs[j][AA][0][1] = - tmp2 * fjac[j-1][0][1] - tmp1 * njac[j-1][0][1]; lhs[j][AA][1][1] = - tmp2 * fjac[j-1][1][1] - tmp1 * njac[j-1][1][1] - tmp1 * dy2; lhs[j][AA][2][1] = - tmp2 * fjac[j-1][2][1] - tmp1 * njac[j-1][2][1]; lhs[j][AA][3][1] = - tmp2 * fjac[j-1][3][1] - tmp1 * njac[j-1][3][1]; lhs[j][AA][4][1] = - tmp2 * fjac[j-1][4][1] - tmp1 * njac[j-1][4][1]; lhs[j][AA][0][2] = - tmp2 * fjac[j-1][0][2] - tmp1 * njac[j-1][0][2]; lhs[j][AA][1][2] = - tmp2 * fjac[j-1][1][2] - tmp1 * njac[j-1][1][2]; lhs[j][AA][2][2] = - tmp2 * fjac[j-1][2][2] - tmp1 * njac[j-1][2][2] - tmp1 * dy3; lhs[j][AA][3][2] = - tmp2 * fjac[j-1][3][2] - tmp1 * njac[j-1][3][2]; lhs[j][AA][4][2] = - tmp2 * fjac[j-1][4][2] - tmp1 * njac[j-1][4][2]; lhs[j][AA][0][3] = - tmp2 * fjac[j-1][0][3] - tmp1 * njac[j-1][0][3]; lhs[j][AA][1][3] = - tmp2 * fjac[j-1][1][3] - tmp1 * njac[j-1][1][3]; lhs[j][AA][2][3] = - tmp2 * fjac[j-1][2][3] - tmp1 * njac[j-1][2][3]; lhs[j][AA][3][3] = - tmp2 * fjac[j-1][3][3] - tmp1 * njac[j-1][3][3] - tmp1 * dy4; lhs[j][AA][4][3] = - tmp2 * fjac[j-1][4][3] - tmp1 * njac[j-1][4][3]; lhs[j][AA][0][4] = - tmp2 * fjac[j-1][0][4] - tmp1 * njac[j-1][0][4]; lhs[j][AA][1][4] = - tmp2 * fjac[j-1][1][4] - tmp1 * njac[j-1][1][4]; lhs[j][AA][2][4] = - tmp2 * fjac[j-1][2][4] - tmp1 * njac[j-1][2][4]; lhs[j][AA][3][4] = - tmp2 * fjac[j-1][3][4] - tmp1 * njac[j-1][3][4]; lhs[j][AA][4][4] = - tmp2 * fjac[j-1][4][4] - tmp1 * njac[j-1][4][4] - tmp1 * dy5; lhs[j][BB][0][0] = 1.0 + tmp1 * 2.0 * njac[j][0][0] + tmp1 * 2.0 * dy1; lhs[j][BB][1][0] = tmp1 * 2.0 * njac[j][1][0]; lhs[j][BB][2][0] = tmp1 * 2.0 * njac[j][2][0]; lhs[j][BB][3][0] = tmp1 * 2.0 * njac[j][3][0]; lhs[j][BB][4][0] = tmp1 * 2.0 * njac[j][4][0]; lhs[j][BB][0][1] = tmp1 * 2.0 * njac[j][0][1]; lhs[j][BB][1][1] = 1.0 + tmp1 * 2.0 * njac[j][1][1] + tmp1 * 2.0 * dy2; lhs[j][BB][2][1] = tmp1 * 2.0 * njac[j][2][1]; lhs[j][BB][3][1] = tmp1 * 2.0 * njac[j][3][1]; lhs[j][BB][4][1] = tmp1 * 2.0 * njac[j][4][1]; lhs[j][BB][0][2] = tmp1 * 2.0 * njac[j][0][2]; lhs[j][BB][1][2] = tmp1 * 2.0 * njac[j][1][2]; lhs[j][BB][2][2] = 1.0 + tmp1 * 2.0 * njac[j][2][2] + tmp1 * 2.0 * dy3; lhs[j][BB][3][2] = tmp1 * 2.0 * njac[j][3][2]; lhs[j][BB][4][2] = tmp1 * 2.0 * njac[j][4][2]; lhs[j][BB][0][3] = tmp1 * 2.0 * njac[j][0][3]; lhs[j][BB][1][3] = tmp1 * 2.0 * njac[j][1][3]; lhs[j][BB][2][3] = tmp1 * 2.0 * njac[j][2][3]; lhs[j][BB][3][3] = 1.0 + tmp1 * 2.0 * njac[j][3][3] + tmp1 * 2.0 * dy4; lhs[j][BB][4][3] = tmp1 * 2.0 * njac[j][4][3]; lhs[j][BB][0][4] = tmp1 * 2.0 * njac[j][0][4]; lhs[j][BB][1][4] = tmp1 * 2.0 * njac[j][1][4]; lhs[j][BB][2][4] = tmp1 * 2.0 * njac[j][2][4]; lhs[j][BB][3][4] = tmp1 * 2.0 * njac[j][3][4]; lhs[j][BB][4][4] = 1.0 + tmp1 * 2.0 * njac[j][4][4] + tmp1 * 2.0 * dy5; lhs[j][CC][0][0] = tmp2 * fjac[j+1][0][0] - tmp1 * njac[j+1][0][0] - tmp1 * dy1; lhs[j][CC][1][0] = tmp2 * fjac[j+1][1][0] - tmp1 * njac[j+1][1][0]; lhs[j][CC][2][0] = tmp2 * fjac[j+1][2][0] - tmp1 * njac[j+1][2][0]; lhs[j][CC][3][0] = tmp2 * fjac[j+1][3][0] - tmp1 * njac[j+1][3][0]; lhs[j][CC][4][0] = tmp2 * fjac[j+1][4][0] - tmp1 * njac[j+1][4][0]; lhs[j][CC][0][1] = tmp2 * fjac[j+1][0][1] - tmp1 * njac[j+1][0][1]; lhs[j][CC][1][1] = tmp2 * fjac[j+1][1][1] - tmp1 * njac[j+1][1][1] - tmp1 * dy2; lhs[j][CC][2][1] = tmp2 * fjac[j+1][2][1] - tmp1 * njac[j+1][2][1]; lhs[j][CC][3][1] = tmp2 * fjac[j+1][3][1] - tmp1 * njac[j+1][3][1]; lhs[j][CC][4][1] = tmp2 * fjac[j+1][4][1] - tmp1 * njac[j+1][4][1]; lhs[j][CC][0][2] = tmp2 * fjac[j+1][0][2] - tmp1 * njac[j+1][0][2]; lhs[j][CC][1][2] = tmp2 * fjac[j+1][1][2] - tmp1 * njac[j+1][1][2]; lhs[j][CC][2][2] = tmp2 * fjac[j+1][2][2] - tmp1 * njac[j+1][2][2] - tmp1 * dy3; lhs[j][CC][3][2] = tmp2 * fjac[j+1][3][2] - tmp1 * njac[j+1][3][2]; lhs[j][CC][4][2] = tmp2 * fjac[j+1][4][2] - tmp1 * njac[j+1][4][2]; lhs[j][CC][0][3] = tmp2 * fjac[j+1][0][3] - tmp1 * njac[j+1][0][3]; lhs[j][CC][1][3] = tmp2 * fjac[j+1][1][3] - tmp1 * njac[j+1][1][3]; lhs[j][CC][2][3] = tmp2 * fjac[j+1][2][3] - tmp1 * njac[j+1][2][3]; lhs[j][CC][3][3] = tmp2 * fjac[j+1][3][3] - tmp1 * njac[j+1][3][3] - tmp1 * dy4; lhs[j][CC][4][3] = tmp2 * fjac[j+1][4][3] - tmp1 * njac[j+1][4][3]; lhs[j][CC][0][4] = tmp2 * fjac[j+1][0][4] - tmp1 * njac[j+1][0][4]; lhs[j][CC][1][4] = tmp2 * fjac[j+1][1][4] - tmp1 * njac[j+1][1][4]; lhs[j][CC][2][4] = tmp2 * fjac[j+1][2][4] - tmp1 * njac[j+1][2][4]; lhs[j][CC][3][4] = tmp2 * fjac[j+1][3][4] - tmp1 * njac[j+1][3][4]; lhs[j][CC][4][4] = tmp2 * fjac[j+1][4][4] - tmp1 * njac[j+1][4][4] - tmp1 * dy5; } //--------------------------------------------------------------------- //--------------------------------------------------------------------- //--------------------------------------------------------------------- // performs guaussian elimination on this cell. // // assumes that unpacking routines for non-first cells // preload C' and rhs' from previous cell. // // assumed send happens outside this routine, but that // c'(JMAX) and rhs'(JMAX) will be sent to next cell //--------------------------------------------------------------------- //--------------------------------------------------------------------- // multiply c[k][0][i] by b_inverse and copy back to c // multiply rhs(0) by b_inverse(0) and copy to rhs //--------------------------------------------------------------------- binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][0][i] ); //--------------------------------------------------------------------- // begin inner most do loop // do all the elements of the cell unless last //--------------------------------------------------------------------- for (j = 1; j <= jsize-1; j++) { //------------------------------------------------------------------- // subtract A*lhs_vector(j-1) from lhs_vector(j) // // rhs(j) = rhs(j) - A*rhs(j-1) //------------------------------------------------------------------- matvec_sub(lhs[j][AA], rhs[k][j-1][i], rhs[k][j][i]); //------------------------------------------------------------------- // B(j) = B(j) - C(j-1)*A(j) //------------------------------------------------------------------- matmul_sub(lhs[j][AA], lhs[j-1][CC], lhs[j][BB]); //------------------------------------------------------------------- // multiply c[k][j][i] by b_inverse and copy back to c // multiply rhs[k][0][i] by b_inverse[k][0][i] and copy to rhs //------------------------------------------------------------------- binvcrhs( lhs[j][BB], lhs[j][CC], rhs[k][j][i] ); } //--------------------------------------------------------------------- // rhs(jsize) = rhs(jsize) - A*rhs(jsize-1) //--------------------------------------------------------------------- matvec_sub(lhs[jsize][AA], rhs[k][jsize-1][i], rhs[k][jsize][i]); //--------------------------------------------------------------------- // B(jsize) = B(jsize) - C(jsize-1)*A(jsize) // matmul_sub(AA,i,jsize,k,c, // $ CC,i,jsize-1,k,c,BB,i,jsize,k) //--------------------------------------------------------------------- matmul_sub(lhs[jsize][AA], lhs[jsize-1][CC], lhs[jsize][BB]); //--------------------------------------------------------------------- // multiply rhs(jsize) by b_inverse(jsize) and copy to rhs //--------------------------------------------------------------------- binvrhs( lhs[jsize][BB], rhs[k][jsize][i] ); //--------------------------------------------------------------------- // back solve: if last cell, then generate U(jsize)=rhs(jsize) // else assume U(jsize) is loaded in un pack backsub_info // so just use it // after u(jstart) will be sent to next cell //--------------------------------------------------------------------- for (j = jsize-1; j >= 0; j--) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhs[j][CC][n][m]*rhs[k][j+1][i][n]; } } } } } #pragma endscop if (timeron) timer_stop(t_ysolve); }
tinyexr.h
/* Copyright (c) 2014 - 2019, Syoyo Fujita and many contributors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Syoyo Fujita nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // TinyEXR contains some OpenEXR code, which is licensed under ------------ /////////////////////////////////////////////////////////////////////////// // // Copyright (c) 2002, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Industrial Light & Magic nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////// // End of OpenEXR license ------------------------------------------------- #ifndef TINYEXR_H_ #define TINYEXR_H_ // // // Do this: // #define TINYEXR_IMPLEMENTATION // before you include this file in *one* C or C++ file to create the // implementation. // // // i.e. it should look like this: // #include ... // #include ... // #include ... // #define TINYEXR_IMPLEMENTATION // #include "tinyexr.h" // // #include <stddef.h> // for size_t #include <stdint.h> // guess stdint.h is available(C99) #ifdef __cplusplus extern "C" { #endif // Use embedded miniz or not to decode ZIP format pixel. Linking with zlib // required if this flas is 0. #ifndef TINYEXR_USE_MINIZ #define TINYEXR_USE_MINIZ (1) #endif // Disable PIZ comporession when applying cpplint. #ifndef TINYEXR_USE_PIZ #define TINYEXR_USE_PIZ (1) #endif #ifndef TINYEXR_USE_ZFP #define TINYEXR_USE_ZFP (0) // TinyEXR extension. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_THREAD #define TINYEXR_USE_THREAD (0) // No threaded loading. // http://computation.llnl.gov/projects/floating-point-compression #endif #ifndef TINYEXR_USE_OPENMP #ifdef _OPENMP #define TINYEXR_USE_OPENMP (1) #else #define TINYEXR_USE_OPENMP (0) #endif #endif #define TINYEXR_SUCCESS (0) #define TINYEXR_ERROR_INVALID_MAGIC_NUMBER (-1) #define TINYEXR_ERROR_INVALID_EXR_VERSION (-2) #define TINYEXR_ERROR_INVALID_ARGUMENT (-3) #define TINYEXR_ERROR_INVALID_DATA (-4) #define TINYEXR_ERROR_INVALID_FILE (-5) #define TINYEXR_ERROR_INVALID_PARAMETER (-6) #define TINYEXR_ERROR_CANT_OPEN_FILE (-7) #define TINYEXR_ERROR_UNSUPPORTED_FORMAT (-8) #define TINYEXR_ERROR_INVALID_HEADER (-9) #define TINYEXR_ERROR_UNSUPPORTED_FEATURE (-10) #define TINYEXR_ERROR_CANT_WRITE_FILE (-11) #define TINYEXR_ERROR_SERIALZATION_FAILED (-12) #define TINYEXR_ERROR_LAYER_NOT_FOUND (-13) // @note { OpenEXR file format: http://www.openexr.com/openexrfilelayout.pdf } // pixel type: possible values are: UINT = 0 HALF = 1 FLOAT = 2 #define TINYEXR_PIXELTYPE_UINT (0) #define TINYEXR_PIXELTYPE_HALF (1) #define TINYEXR_PIXELTYPE_FLOAT (2) #define TINYEXR_MAX_HEADER_ATTRIBUTES (1024) #define TINYEXR_MAX_CUSTOM_ATTRIBUTES (128) #define TINYEXR_COMPRESSIONTYPE_NONE (0) #define TINYEXR_COMPRESSIONTYPE_RLE (1) #define TINYEXR_COMPRESSIONTYPE_ZIPS (2) #define TINYEXR_COMPRESSIONTYPE_ZIP (3) #define TINYEXR_COMPRESSIONTYPE_PIZ (4) #define TINYEXR_COMPRESSIONTYPE_ZFP (128) // TinyEXR extension #define TINYEXR_ZFP_COMPRESSIONTYPE_RATE (0) #define TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION (1) #define TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY (2) #define TINYEXR_TILE_ONE_LEVEL (0) #define TINYEXR_TILE_MIPMAP_LEVELS (1) #define TINYEXR_TILE_RIPMAP_LEVELS (2) #define TINYEXR_TILE_ROUND_DOWN (0) #define TINYEXR_TILE_ROUND_UP (1) typedef struct _EXRVersion { int version; // this must be 2 int tiled; // tile format image int long_name; // long name attribute int non_image; // deep image(EXR 2.0) int multipart; // multi-part(EXR 2.0) } EXRVersion; typedef struct _EXRAttribute { char name[256]; // name and type are up to 255 chars long. char type[256]; unsigned char *value; // uint8_t* int size; int pad0; } EXRAttribute; typedef struct _EXRChannelInfo { char name[256]; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } EXRChannelInfo; typedef struct _EXRTile { int offset_x; int offset_y; int level_x; int level_y; int width; // actual width in a tile. int height; // actual height int a tile. unsigned char **images; // image[channels][pixels] } EXRTile; typedef struct _EXRHeader { float pixel_aspect_ratio; int line_order; int data_window[4]; int display_window[4]; float screen_window_center[2]; float screen_window_width; int chunk_count; // Properties for tiled format(`tiledesc`). int tiled; int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; int long_name; int non_image; int multipart; unsigned int header_len; // Custom attributes(exludes required attributes(e.g. `channels`, // `compression`, etc) int num_custom_attributes; EXRAttribute *custom_attributes; // array of EXRAttribute. size = // `num_custom_attributes`. EXRChannelInfo *channels; // [num_channels] int *pixel_types; // Loaded pixel type(TINYEXR_PIXELTYPE_*) of `images` for // each channel. This is overwritten with `requested_pixel_types` when // loading. int num_channels; int compression_type; // compression type(TINYEXR_COMPRESSIONTYPE_*) int *requested_pixel_types; // Filled initially by // ParseEXRHeaderFrom(Meomory|File), then users // can edit it(only valid for HALF pixel type // channel) } EXRHeader; typedef struct _EXRMultiPartHeader { int num_headers; EXRHeader *headers; } EXRMultiPartHeader; typedef struct _EXRImage { EXRTile *tiles; // Tiled pixel data. The application must reconstruct image // from tiles manually. NULL if scanline format. unsigned char **images; // image[channels][pixels]. NULL if tiled format. int width; int height; int num_channels; // Properties for tile format. int num_tiles; } EXRImage; typedef struct _EXRMultiPartImage { int num_images; EXRImage *images; } EXRMultiPartImage; typedef struct _DeepImage { const char **channel_names; float ***image; // image[channels][scanlines][samples] int **offset_table; // offset_table[scanline][offsets] int num_channels; int width; int height; int pad0; } DeepImage; // @deprecated { For backward compatibility. Not recommended to use. } // Loads single-frame OpenEXR image. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error extern int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err); // Loads single-frame OpenEXR image by specifing layer name. Assume EXR image contains A(single channel // alpha) or RGB(A) channels. // Application must free image data as returned by `out_rgba` // Result image format is: float x RGBA x width x hight // Returns negative value and may set error string in `err` when there's an // error // When the specified layer name is not found in the EXR file, the function will return `TINYEXR_ERROR_LAYER_NOT_FOUND`. extern int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layer_name, const char **err); // // Get layer infos from EXR file. // // @param[out] layer_names List of layer names. Application must free memory after using this. // @param[out] num_layers The number of layers // @param[out] err Error string(wll be filled when the function returns error code). Free it using FreeEXRErrorMessage after using this value. // // @return TINYEXR_SUCCEES upon success. // extern int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err); // @deprecated { to be removed. } // Simple wrapper API for ParseEXRHeaderFromFile. // checking given file is a EXR file(by just look up header) // @return TINYEXR_SUCCEES for EXR image, TINYEXR_ERROR_INVALID_HEADER for // others extern int IsEXR(const char *filename); // @deprecated { to be removed. } // Saves single-frame OpenEXR image. Assume EXR image contains RGB(A) channels. // components must be 1(Grayscale), 3(RGB) or 4(RGBA). // Input image format is: `float x width x height`, or `float x RGB(A) x width x // hight` // Save image as fp16(HALF) format when `save_as_fp16` is positive non-zero // value. // Save image as fp32(FLOAT) format when `save_as_fp16` is 0. // Use ZIP compression by default. // Returns negative value and may set error string in `err` when there's an // error extern int SaveEXR(const float *data, const int width, const int height, const int components, const int save_as_fp16, const char *filename, const char **err); // Initialize EXRHeader struct extern void InitEXRHeader(EXRHeader *exr_header); // Initialize EXRImage struct extern void InitEXRImage(EXRImage *exr_image); // Free's internal data of EXRHeader struct extern int FreeEXRHeader(EXRHeader *exr_header); // Free's internal data of EXRImage struct extern int FreeEXRImage(EXRImage *exr_image); // Free's error message extern void FreeEXRErrorMessage(const char *msg); // Parse EXR version header of a file. extern int ParseEXRVersionFromFile(EXRVersion *version, const char *filename); // Parse EXR version header from memory-mapped EXR data. extern int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size); // Parse single-part OpenEXR header from a file and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromFile(EXRHeader *header, const EXRVersion *version, const char *filename, const char **err); // Parse single-part OpenEXR header from a memory and initialize `EXRHeader`. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRHeaderFromMemory(EXRHeader *header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Parse multi-part OpenEXR headers from a file and initialize `EXRHeader*` // array. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromFile(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const char *filename, const char **err); // Parse multi-part OpenEXR headers from a memory and initialize `EXRHeader*` // array // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int ParseEXRMultipartHeaderFromMemory(EXRHeader ***headers, int *num_headers, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err); // Loads single-part OpenEXR image from a file. // Application must setup `ParseEXRHeaderFromFile` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromFile(EXRImage *image, const EXRHeader *header, const char *filename, const char **err); // Loads single-part OpenEXR image from a memory. // Application must setup `EXRHeader` with // `ParseEXRHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRImageFromMemory(EXRImage *image, const EXRHeader *header, const unsigned char *memory, const size_t size, const char **err); // Loads multi-part OpenEXR image from a file. // Application must setup `ParseEXRMultipartHeaderFromFile` before calling this // function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromFile(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const char *filename, const char **err); // Loads multi-part OpenEXR image from a memory. // Application must setup `EXRHeader*` array with // `ParseEXRMultipartHeaderFromMemory` before calling this function. // Application can free EXRImage using `FreeEXRImage` // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRMultipartImageFromMemory(EXRImage *images, const EXRHeader **headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err); // Saves multi-channel, single-frame OpenEXR image to a file. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int SaveEXRImageToFile(const EXRImage *image, const EXRHeader *exr_header, const char *filename, const char **err); // Saves multi-channel, single-frame OpenEXR image to a memory. // Image is compressed using EXRImage.compression value. // Return the number of bytes if success. // Return zero and will set error string in `err` when there's an // error. // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern size_t SaveEXRImageToMemory(const EXRImage *image, const EXRHeader *exr_header, unsigned char **memory, const char **err); // Loads single-frame OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadDeepEXR(DeepImage *out_image, const char *filename, const char **err); // NOT YET IMPLEMENTED: // Saves single-frame OpenEXR deep image. // Returns negative value and may set error string in `err` when there's an // error // extern int SaveDeepEXR(const DeepImage *in_image, const char *filename, // const char **err); // NOT YET IMPLEMENTED: // Loads multi-part OpenEXR deep image. // Application must free memory of variables in DeepImage(image, offset_table) // extern int LoadMultiPartDeepEXR(DeepImage **out_image, int num_parts, const // char *filename, // const char **err); // For emscripten. // Loads single-frame OpenEXR image from memory. Assume EXR image contains // RGB(A) channels. // Returns negative value and may set error string in `err` when there's an // error // When there was an error message, Application must free `err` with // FreeEXRErrorMessage() extern int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err); #ifdef __cplusplus } #endif #endif // TINYEXR_H_ #ifdef TINYEXR_IMPLEMENTATION #ifndef TINYEXR_IMPLEMENTATION_DEIFNED #define TINYEXR_IMPLEMENTATION_DEIFNED #include <algorithm> #include <cassert> #include <cstdio> #include <cstdlib> #include <cstring> #include <sstream> // #include <iostream> // debug #include <limits> #include <string> #include <vector> #if __cplusplus > 199711L // C++11 #include <cstdint> #if TINYEXR_USE_THREAD #include <atomic> #include <thread> #endif #endif // __cplusplus > 199711L #if TINYEXR_USE_OPENMP #include <omp.h> #endif #if TINYEXR_USE_MINIZ #else // Issue #46. Please include your own zlib-compatible API header before // including `tinyexr.h` //#include "zlib.h" #endif #if TINYEXR_USE_ZFP #include "zfp.h" #endif namespace tinyexr { #if __cplusplus > 199711L // C++11 typedef uint64_t tinyexr_uint64; typedef int64_t tinyexr_int64; #else // Although `long long` is not a standard type pre C++11, assume it is defined // as a compiler's extension. #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #endif typedef unsigned long long tinyexr_uint64; typedef long long tinyexr_int64; #ifdef __clang__ #pragma clang diagnostic pop #endif #endif #if TINYEXR_USE_MINIZ namespace miniz { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wunused-function" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #pragma clang diagnostic ignored "-Wundef" #if __has_warning("-Wcomma") #pragma clang diagnostic ignored "-Wcomma" #endif #if __has_warning("-Wmacro-redefined") #pragma clang diagnostic ignored "-Wmacro-redefined" #endif #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #if __has_warning("-Wtautological-constant-compare") #pragma clang diagnostic ignored "-Wtautological-constant-compare" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif /* miniz.c v1.15 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing See "unlicense" statement at the end of this file. Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013 Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros). * Change History 10/13/13 v1.15 r4 - Interim bugfix release while I work on the next major release with Zip64 support (almost there!): - Critical fix for the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY bug (thanks kahmyong.moon@hp.com) which could cause locate files to not find files. This bug would only have occured in earlier versions if you explicitly used this flag, OR if you used mz_zip_extract_archive_file_to_heap() or mz_zip_add_mem_to_archive_file_in_place() (which used this flag). If you can't switch to v1.15 but want to fix this bug, just remove the uses of this flag from both helper funcs (and of course don't use the flag). - Bugfix in mz_zip_reader_extract_to_mem_no_alloc() from kymoon when pUser_read_buf is not NULL and compressed size is > uncompressed size - Fixing mz_zip_reader_extract_*() funcs so they don't try to extract compressed data from directory entries, to account for weird zipfiles which contain zero-size compressed data on dir entries. Hopefully this fix won't cause any issues on weird zip archives, because it assumes the low 16-bits of zip external attributes are DOS attributes (which I believe they always are in practice). - Fixing mz_zip_reader_is_file_a_directory() so it doesn't check the internal attributes, just the filename and external attributes - mz_zip_reader_init_file() - missing MZ_FCLOSE() call if the seek failed - Added cmake support for Linux builds which builds all the examples, tested with clang v3.3 and gcc v4.6. - Clang fix for tdefl_write_image_to_png_file_in_memory() from toffaletti - Merged MZ_FORCEINLINE fix from hdeanclark - Fix <time.h> include before config #ifdef, thanks emil.brink - Added tdefl_write_image_to_png_file_in_memory_ex(): supports Y flipping (super useful for OpenGL apps), and explicit control over the compression level (so you can set it to 1 for real-time compression). - Merged in some compiler fixes from paulharris's github repro. - Retested this build under Windows (VS 2010, including static analysis), tcc 0.9.26, gcc v4.6 and clang v3.3. - Added example6.c, which dumps an image of the mandelbrot set to a PNG file. - Modified example2 to help test the MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY flag more. - In r3: Bugfix to mz_zip_writer_add_file() found during merge: Fix possible src file fclose() leak if alignment bytes+local header file write faiiled - In r4: Minor bugfix to mz_zip_writer_add_from_zip_reader(): Was pushing the wrong central dir header offset, appears harmless in this release, but it became a problem in the zip64 branch 5/20/12 v1.14 - MinGW32/64 GCC 4.6.1 compiler fixes: added MZ_FORCEINLINE, #include <time.h> (thanks fermtect). 5/19/12 v1.13 - From jason@cornsyrup.org and kelwert@mtu.edu - Fix mz_crc32() so it doesn't compute the wrong CRC-32's when mz_ulong is 64-bit. - Temporarily/locally slammed in "typedef unsigned long mz_ulong" and re-ran a randomized regression test on ~500k files. - Eliminated a bunch of warnings when compiling with GCC 32-bit/64. - Ran all examples, miniz.c, and tinfl.c through MSVC 2008's /analyze (static analysis) option and fixed all warnings (except for the silly "Use of the comma-operator in a tested expression.." analysis warning, which I purposely use to work around a MSVC compiler warning). - Created 32-bit and 64-bit Codeblocks projects/workspace. Built and tested Linux executables. The codeblocks workspace is compatible with Linux+Win32/x64. - Added miniz_tester solution/project, which is a useful little app derived from LZHAM's tester app that I use as part of the regression test. - Ran miniz.c and tinfl.c through another series of regression testing on ~500,000 files and archives. - Modified example5.c so it purposely disables a bunch of high-level functionality (MINIZ_NO_STDIO, etc.). (Thanks to corysama for the MINIZ_NO_STDIO bug report.) - Fix ftell() usage in examples so they exit with an error on files which are too large (a limitation of the examples, not miniz itself). 4/12/12 v1.12 - More comments, added low-level example5.c, fixed a couple minor level_and_flags issues in the archive API's. level_and_flags can now be set to MZ_DEFAULT_COMPRESSION. Thanks to Bruce Dawson <bruced@valvesoftware.com> for the feedback/bug report. 5/28/11 v1.11 - Added statement from unlicense.org 5/27/11 v1.10 - Substantial compressor optimizations: - Level 1 is now ~4x faster than before. The L1 compressor's throughput now varies between 70-110MB/sec. on a - Core i7 (actual throughput varies depending on the type of data, and x64 vs. x86). - Improved baseline L2-L9 compression perf. Also, greatly improved compression perf. issues on some file types. - Refactored the compression code for better readability and maintainability. - Added level 10 compression level (L10 has slightly better ratio than level 9, but could have a potentially large drop in throughput on some files). 5/15/11 v1.09 - Initial stable release. * Low-level Deflate/Inflate implementation notes: Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses approximately as well as zlib. Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory block large enough to hold the entire file. The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation. * zlib-style API notes: miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in zlib replacement in many apps: The z_stream struct, optional memory allocation callbacks deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound inflateInit/inflateInit2/inflate/inflateEnd compress, compress2, compressBound, uncompress CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines. Supports raw deflate streams or standard zlib streams with adler-32 checking. Limitations: The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries. I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but there are no guarantees that miniz.c pulls this off perfectly. * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by Alex Evans. Supports 1-4 bytes/pixel images. * ZIP archive API notes: The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to get the job done with minimal fuss. There are simple API's to retrieve file information, read files from existing archives, create new archives, append new files to existing archives, or clone archive data from one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h), or you can specify custom file read/write callbacks. - Archive reading: Just call this function to read a single file from a disk archive: void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files. - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file: int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); The locate operation can optionally check file comments too, which (as one example) can be used to identify multiple versions of the same file in an archive. This function uses a simple linear search through the central directory, so it's not very fast. Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and retrieve detailed info on each file by calling mz_zip_reader_file_stat(). - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data to disk and builds an exact image of the central directory in memory. The central directory image is written all at once at the end of the archive file when the archive is finalized. The archive writer can optionally align each file's local header and file data to any power of 2 alignment, which can be useful when the archive will be read from optical media. Also, the writer supports placing arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still readable by any ZIP tool. - Archive appending: The simple way to add a single file to an archive is to call this function: mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); The archive will be created if it doesn't already exist, otherwise it'll be appended to. Note the appending is done in-place and is not an atomic operation, so if something goes wrong during the operation it's possible the archive could be left without a central directory (although the local file headers and file data will be fine, so the archive will be recoverable). For more complex archive modification scenarios: 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and you're done. This is safe but requires a bunch of temporary disk space or heap memory. 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(), append new files as needed, then finalize the archive which will write an updated central directory to the original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a possibility that the archive's central directory could be lost with this method if anything goes wrong, though. - ZIP archive support limitations: No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files. Requires streams capable of seeking. * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it. * Important: For best perf. be sure to customize the below macros for your target platform: #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_LITTLE_ENDIAN 1 #define MINIZ_HAS_64BIT_REGISTERS 1 * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes). */ #ifndef MINIZ_HEADER_INCLUDED #define MINIZ_HEADER_INCLUDED //#include <stdlib.h> // Defines to completely disable specific portions of miniz.c: // If all macros here are defined the only functionality remaining will be // CRC-32, adler-32, tinfl, and tdefl. // Define MINIZ_NO_STDIO to disable all usage and any functions which rely on // stdio for file I/O. //#define MINIZ_NO_STDIO // If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able // to get the current time, or // get/set file times, and the C run-time funcs that get/set times won't be // called. // The current downside is the times written to your archives will be from 1979. #define MINIZ_NO_TIME // Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. #define MINIZ_NO_ARCHIVE_APIS // Define MINIZ_NO_ARCHIVE_APIS to disable all writing related ZIP archive // API's. //#define MINIZ_NO_ARCHIVE_WRITING_APIS // Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression // API's. //#define MINIZ_NO_ZLIB_APIS // Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent // conflicts against stock zlib. //#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES // Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. // Note if MINIZ_NO_MALLOC is defined then the user must always provide custom // user alloc/free/realloc // callbacks to the zlib and archive API's, and a few stand-alone helper API's // which don't provide custom user // functions (such as tdefl_compress_mem_to_heap() and // tinfl_decompress_mem_to_heap()) won't work. //#define MINIZ_NO_MALLOC #if defined(__TINYC__) && (defined(__linux) || defined(__linux__)) // TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc // on Linux #define MINIZ_NO_TIME #endif #if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS) //#include <time.h> #endif #if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || \ defined(__i386) || defined(__i486__) || defined(__i486) || \ defined(i386) || defined(__ia64__) || defined(__x86_64__) // MINIZ_X86_OR_X64_CPU is only used to help set the below macros. #define MINIZ_X86_OR_X64_CPU 1 #endif #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #if MINIZ_X86_OR_X64_CPU // Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient // integer loads and stores from unaligned addresses. //#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1 #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES \ 0 // disable to suppress compiler warnings #endif #if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || \ defined(_LP64) || defined(__LP64__) || defined(__ia64__) || \ defined(__x86_64__) // Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are // reasonably fast (and don't involve compiler generated calls to helper // functions). #define MINIZ_HAS_64BIT_REGISTERS 1 #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API Definitions. // For more compatibility with zlib, miniz.c uses unsigned long for some // parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! typedef unsigned long mz_ulong; // mz_free() internally uses the MZ_FREE() macro (which by default calls free() // unless you've modified the MZ_MALLOC macro) to release a block allocated from // the heap. void mz_free(void *p); #define MZ_ADLER32_INIT (1) // mz_adler32() returns the initial adler-32 value to use when called with // ptr==NULL. mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len); #define MZ_CRC32_INIT (0) // mz_crc32() returns the initial CRC-32 value to use when called with // ptr==NULL. mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len); // Compression strategies. enum { MZ_DEFAULT_STRATEGY = 0, MZ_FILTERED = 1, MZ_HUFFMAN_ONLY = 2, MZ_RLE = 3, MZ_FIXED = 4 }; // Method #define MZ_DEFLATED 8 #ifndef MINIZ_NO_ZLIB_APIS // Heap allocation callbacks. // Note that mz_alloc_func parameter types purpsosely differ from zlib's: // items/size is size_t, not unsigned long. typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size); typedef void (*mz_free_func)(void *opaque, void *address); typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size); #define MZ_VERSION "9.1.15" #define MZ_VERNUM 0x91F0 #define MZ_VER_MAJOR 9 #define MZ_VER_MINOR 1 #define MZ_VER_REVISION 15 #define MZ_VER_SUBREVISION 0 // Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The // other values are for advanced use (refer to the zlib docs). enum { MZ_NO_FLUSH = 0, MZ_PARTIAL_FLUSH = 1, MZ_SYNC_FLUSH = 2, MZ_FULL_FLUSH = 3, MZ_FINISH = 4, MZ_BLOCK = 5 }; // Return status codes. MZ_PARAM_ERROR is non-standard. enum { MZ_OK = 0, MZ_STREAM_END = 1, MZ_NEED_DICT = 2, MZ_ERRNO = -1, MZ_STREAM_ERROR = -2, MZ_DATA_ERROR = -3, MZ_MEM_ERROR = -4, MZ_BUF_ERROR = -5, MZ_VERSION_ERROR = -6, MZ_PARAM_ERROR = -10000 }; // Compression levels: 0-9 are the standard zlib-style levels, 10 is best // possible compression (not zlib compatible, and may be very slow), // MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. enum { MZ_NO_COMPRESSION = 0, MZ_BEST_SPEED = 1, MZ_BEST_COMPRESSION = 9, MZ_UBER_COMPRESSION = 10, MZ_DEFAULT_LEVEL = 6, MZ_DEFAULT_COMPRESSION = -1 }; // Window bits #define MZ_DEFAULT_WINDOW_BITS 15 struct mz_internal_state; // Compression/decompression stream struct. typedef struct mz_stream_s { const unsigned char *next_in; // pointer to next byte to read unsigned int avail_in; // number of bytes available at next_in mz_ulong total_in; // total number of bytes consumed so far unsigned char *next_out; // pointer to next byte to write unsigned int avail_out; // number of bytes that can be written to next_out mz_ulong total_out; // total number of bytes produced so far char *msg; // error msg (unused) struct mz_internal_state *state; // internal state, allocated by zalloc/zfree mz_alloc_func zalloc; // optional heap allocation function (defaults to malloc) mz_free_func zfree; // optional heap free function (defaults to free) void *opaque; // heap alloc function user pointer int data_type; // data_type (unused) mz_ulong adler; // adler32 of the source or uncompressed data mz_ulong reserved; // not used } mz_stream; typedef mz_stream *mz_streamp; // Returns the version string of miniz.c. const char *mz_version(void); // mz_deflateInit() initializes a compressor with default options: // Parameters: // pStream must point to an initialized mz_stream struct. // level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. // level 1 enables a specially optimized compression function that's been // optimized purely for performance, not ratio. // (This special func. is currently only enabled when // MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if the input parameters are bogus. // MZ_MEM_ERROR on out of memory. int mz_deflateInit(mz_streamp pStream, int level); // mz_deflateInit2() is like mz_deflate(), except with more control: // Additional parameters: // method must be MZ_DEFLATED // window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with // zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no // header or footer) // mem_level must be between [1, 9] (it's checked but ignored by miniz.c) int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy); // Quickly resets a compressor without having to reallocate anything. Same as // calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). int mz_deflateReset(mz_streamp pStream); // mz_deflate() compresses the input to output, consuming as much of the input // and producing as much output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or // MZ_FINISH. // Return values: // MZ_OK on success (when flushing, or if more input is needed but not // available, and/or there's more output to be written but the output buffer // is full). // MZ_STREAM_END if all input has been consumed and all output bytes have been // written. Don't call mz_deflate() on the stream anymore. // MZ_STREAM_ERROR if the stream is bogus. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input and/or // output buffers are empty. (Fill up the input buffer or free up some output // space and try again.) int mz_deflate(mz_streamp pStream, int flush); // mz_deflateEnd() deinitializes a compressor: // Return values: // MZ_OK on success. // MZ_STREAM_ERROR if the stream is bogus. int mz_deflateEnd(mz_streamp pStream); // mz_deflateBound() returns a (very) conservative upper bound on the amount of // data that could be generated by deflate(), assuming flush is set to only // MZ_NO_FLUSH or MZ_FINISH. mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len); // Single-call compression functions mz_compress() and mz_compress2(): // Returns MZ_OK on success, or one of the error codes from mz_deflate() on // failure. int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level); // mz_compressBound() returns a (very) conservative upper bound on the amount of // data that could be generated by calling mz_compress(). mz_ulong mz_compressBound(mz_ulong source_len); // Initializes a decompressor. int mz_inflateInit(mz_streamp pStream); // mz_inflateInit2() is like mz_inflateInit() with an additional option that // controls the window size and whether or not the stream has been wrapped with // a zlib header/footer: // window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or // -MZ_DEFAULT_WINDOW_BITS (raw deflate). int mz_inflateInit2(mz_streamp pStream, int window_bits); // Decompresses the input stream to the output, consuming only as much of the // input as needed, and writing as much to the output as possible. // Parameters: // pStream is the stream to read from and write to. You must initialize/update // the next_in, avail_in, next_out, and avail_out members. // flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. // On the first call, if flush is MZ_FINISH it's assumed the input and output // buffers are both sized large enough to decompress the entire stream in a // single call (this is slightly faster). // MZ_FINISH implies that there are no more source bytes available beside // what's already in the input buffer, and that the output buffer is large // enough to hold the rest of the decompressed data. // Return values: // MZ_OK on success. Either more input is needed but not available, and/or // there's more output to be written but the output buffer is full. // MZ_STREAM_END if all needed input has been consumed and all output bytes // have been written. For zlib streams, the adler-32 of the decompressed data // has also been verified. // MZ_STREAM_ERROR if the stream is bogus. // MZ_DATA_ERROR if the deflate stream is invalid. // MZ_PARAM_ERROR if one of the parameters is invalid. // MZ_BUF_ERROR if no forward progress is possible because the input buffer is // empty but the inflater needs more input to continue, or if the output // buffer is not large enough. Call mz_inflate() again // with more input data, or with more room in the output buffer (except when // using single call decompression, described above). int mz_inflate(mz_streamp pStream, int flush); // Deinitializes a decompressor. int mz_inflateEnd(mz_streamp pStream); // Single-call decompression. // Returns MZ_OK on success, or one of the error codes from mz_inflate() on // failure. int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len); // Returns a string description of the specified error code, or NULL if the // error code is invalid. const char *mz_error(int err); // Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used // as a drop-in replacement for the subset of zlib that miniz.c supports. // Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you // use zlib in the same project. #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES typedef unsigned char Byte; typedef unsigned int uInt; typedef mz_ulong uLong; typedef Byte Bytef; typedef uInt uIntf; typedef char charf; typedef int intf; typedef void *voidpf; typedef uLong uLongf; typedef void *voidp; typedef void *const voidpc; #define Z_NULL 0 #define Z_NO_FLUSH MZ_NO_FLUSH #define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH #define Z_SYNC_FLUSH MZ_SYNC_FLUSH #define Z_FULL_FLUSH MZ_FULL_FLUSH #define Z_FINISH MZ_FINISH #define Z_BLOCK MZ_BLOCK #define Z_OK MZ_OK #define Z_STREAM_END MZ_STREAM_END #define Z_NEED_DICT MZ_NEED_DICT #define Z_ERRNO MZ_ERRNO #define Z_STREAM_ERROR MZ_STREAM_ERROR #define Z_DATA_ERROR MZ_DATA_ERROR #define Z_MEM_ERROR MZ_MEM_ERROR #define Z_BUF_ERROR MZ_BUF_ERROR #define Z_VERSION_ERROR MZ_VERSION_ERROR #define Z_PARAM_ERROR MZ_PARAM_ERROR #define Z_NO_COMPRESSION MZ_NO_COMPRESSION #define Z_BEST_SPEED MZ_BEST_SPEED #define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION #define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION #define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY #define Z_FILTERED MZ_FILTERED #define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY #define Z_RLE MZ_RLE #define Z_FIXED MZ_FIXED #define Z_DEFLATED MZ_DEFLATED #define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS #define alloc_func mz_alloc_func #define free_func mz_free_func #define internal_state mz_internal_state #define z_stream mz_stream #define deflateInit mz_deflateInit #define deflateInit2 mz_deflateInit2 #define deflateReset mz_deflateReset #define deflate mz_deflate #define deflateEnd mz_deflateEnd #define deflateBound mz_deflateBound #define compress mz_compress #define compress2 mz_compress2 #define compressBound mz_compressBound #define inflateInit mz_inflateInit #define inflateInit2 mz_inflateInit2 #define inflate mz_inflate #define inflateEnd mz_inflateEnd #define uncompress mz_uncompress #define crc32 mz_crc32 #define adler32 mz_adler32 #define MAX_WBITS 15 #define MAX_MEM_LEVEL 9 #define zError mz_error #define ZLIB_VERSION MZ_VERSION #define ZLIB_VERNUM MZ_VERNUM #define ZLIB_VER_MAJOR MZ_VER_MAJOR #define ZLIB_VER_MINOR MZ_VER_MINOR #define ZLIB_VER_REVISION MZ_VER_REVISION #define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION #define zlibVersion mz_version #define zlib_version mz_version() #endif // #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES #endif // MINIZ_NO_ZLIB_APIS // ------------------- Types and macros typedef unsigned char mz_uint8; typedef signed short mz_int16; typedef unsigned short mz_uint16; typedef unsigned int mz_uint32; typedef unsigned int mz_uint; typedef long long mz_int64; typedef unsigned long long mz_uint64; typedef int mz_bool; #define MZ_FALSE (0) #define MZ_TRUE (1) // An attempt to work around MSVC's spammy "warning C4127: conditional // expression is constant" message. #ifdef _MSC_VER #define MZ_MACRO_END while (0, 0) #else #define MZ_MACRO_END while (0) #endif // ------------------- ZIP archive reading/writing #ifndef MINIZ_NO_ARCHIVE_APIS enum { MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 260, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 256 }; typedef struct { mz_uint32 m_file_index; mz_uint32 m_central_dir_ofs; mz_uint16 m_version_made_by; mz_uint16 m_version_needed; mz_uint16 m_bit_flag; mz_uint16 m_method; #ifndef MINIZ_NO_TIME time_t m_time; #endif mz_uint32 m_crc32; mz_uint64 m_comp_size; mz_uint64 m_uncomp_size; mz_uint16 m_internal_attr; mz_uint32 m_external_attr; mz_uint64 m_local_header_ofs; mz_uint32 m_comment_size; char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE]; char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE]; } mz_zip_archive_file_stat; typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n); typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n); struct mz_zip_internal_state_tag; typedef struct mz_zip_internal_state_tag mz_zip_internal_state; typedef enum { MZ_ZIP_MODE_INVALID = 0, MZ_ZIP_MODE_READING = 1, MZ_ZIP_MODE_WRITING = 2, MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3 } mz_zip_mode; typedef struct mz_zip_archive_tag { mz_uint64 m_archive_size; mz_uint64 m_central_directory_file_ofs; mz_uint m_total_files; mz_zip_mode m_zip_mode; mz_uint m_file_offset_alignment; mz_alloc_func m_pAlloc; mz_free_func m_pFree; mz_realloc_func m_pRealloc; void *m_pAlloc_opaque; mz_file_read_func m_pRead; mz_file_write_func m_pWrite; void *m_pIO_opaque; mz_zip_internal_state *m_pState; } mz_zip_archive; typedef enum { MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100, MZ_ZIP_FLAG_IGNORE_PATH = 0x0200, MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400, MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800 } mz_zip_flags; // ZIP archive reading // Inits a ZIP archive reader. // These functions read and validate the archive's central directory. mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags); mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags); #endif // Returns the total number of files in the archive. mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip); // Returns detailed information about an archive file entry. mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat); // Determines if an archive file entry is a directory entry. mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index); mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index); // Retrieves the filename of an archive file entry. // Returns the number of bytes written to pFilename, or if filename_buf_size is // 0 this function returns the number of bytes needed to fully store the // filename. mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size); // Attempts to locates a file in the archive's central directory. // Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH // Returns -1 if the file cannot be found. int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags); // Extracts a archive file to a memory buffer using no memory allocation. mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size); // Extracts a archive file to a memory buffer. mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags); // Extracts a archive file to a dynamically allocated heap buffer. void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags); void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags); // Extracts a archive file using a callback function to output the file's data. mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags); #ifndef MINIZ_NO_STDIO // Extracts a archive file to a disk file and sets its last accessed and // modified times. // This function only extracts files, not archive directory records. mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags); mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags); #endif // Ends archive reading, freeing all allocations, and closing the input archive // file if mz_zip_reader_init_file() was used. mz_bool mz_zip_reader_end(mz_zip_archive *pZip); // ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS // Inits a ZIP archive writer. mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size); mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size); #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning); #endif // Converts a ZIP archive reader object into a writer object, to allow efficient // in-place file appends to occur on an existing archive. // For archives opened using mz_zip_reader_init_file, pFilename must be the // archive's filename so it can be reopened for writing. If the file can't be // reopened, mz_zip_reader_end() will be called. // For archives opened using mz_zip_reader_init_mem, the memory block must be // growable using the realloc callback (which defaults to realloc unless you've // overridden it). // Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's // user provided m_pWrite function cannot be NULL. // Note: In-place archive modification is not recommended unless you know what // you're doing, because if execution stops or something goes wrong before // the archive is finalized the file's central directory will be hosed. mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename); // Adds the contents of a memory buffer to an archive. These functions record // the current local time into the archive. // To add a directory entry, call this method with an archive name ending in a // forwardslash with empty buffer. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags); mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32); #ifndef MINIZ_NO_STDIO // Adds the contents of a disk file to an archive. This function also records // the disk file's modified time into the archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); #endif // Adds a file to an archive by fully cloning the data from another archive. // This function fully clones the source file's compressed data (no // recompression), along with its full filename, extra data, and comment fields. mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index); // Finalizes the archive by writing the central directory records followed by // the end of central directory record. // After an archive is finalized, the only valid call on the mz_zip_archive // struct is mz_zip_writer_end(). // An archive must be manually finalized by calling this function for it to be // valid. mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip); mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize); // Ends archive writing, freeing all allocations, and closing the output file if // mz_zip_writer_init_file() was used. // Note for the archive to be valid, it must have been finalized before ending. mz_bool mz_zip_writer_end(mz_zip_archive *pZip); // Misc. high-level helper functions: // mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) // appends a memory blob to a ZIP archive. // level_and_flags - compression level (0-10, see MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or // just set to MZ_DEFAULT_COMPRESSION. mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags); // Reads a single file from an archive into a heap block. // Returns NULL on failure. void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint zip_flags); #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS // ------------------- Low-level Decompression API Definitions // Decompression flags used by tinfl_decompress(). // TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and // ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the // input is a raw deflate stream. // TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available // beyond the end of the supplied input buffer. If clear, the input buffer // contains all remaining input. // TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large // enough to hold the entire decompressed stream. If clear, the output buffer is // at least the size of the dictionary (typically 32KB). // TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the // decompressed bytes. enum { TINFL_FLAG_PARSE_ZLIB_HEADER = 1, TINFL_FLAG_HAS_MORE_INPUT = 2, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4, TINFL_FLAG_COMPUTE_ADLER32 = 8 }; // High level decompression functions: // tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data // to decompress. // On return: // Function returns a pointer to the decompressed data, or NULL on failure. // *pOut_len will be set to the decompressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must call mz_free() on the returned block when it's no longer // needed. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tinfl_decompress_mem_to_mem() decompresses a block in memory to another block // in memory. // Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes // written on success. #define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1)) size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // tinfl_decompress_mem_to_callback() decompresses a block in memory to an // internal 32KB buffer, and a user provided callback function will be called to // flush the buffer. // Returns 1 on success or 0 on failure. typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); struct tinfl_decompressor_tag; typedef struct tinfl_decompressor_tag tinfl_decompressor; // Max size of LZ dictionary. #define TINFL_LZ_DICT_SIZE 32768 // Return status. typedef enum { TINFL_STATUS_BAD_PARAM = -3, TINFL_STATUS_ADLER32_MISMATCH = -2, TINFL_STATUS_FAILED = -1, TINFL_STATUS_DONE = 0, TINFL_STATUS_NEEDS_MORE_INPUT = 1, TINFL_STATUS_HAS_MORE_OUTPUT = 2 } tinfl_status; // Initializes the decompressor to its initial state. #define tinfl_init(r) \ do { \ (r)->m_state = 0; \ } \ MZ_MACRO_END #define tinfl_get_adler32(r) (r)->m_check_adler32 // Main low-level decompressor coroutine function. This is the only function // actually needed for decompression. All the other functions are just // high-level helpers for improved usability. // This is a universal API, i.e. it can be used as a building block to build any // desired higher level decompression API. In the limit case, it can be called // once per every byte input or output. tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags); // Internal/private bits follow. enum { TINFL_MAX_HUFF_TABLES = 3, TINFL_MAX_HUFF_SYMBOLS_0 = 288, TINFL_MAX_HUFF_SYMBOLS_1 = 32, TINFL_MAX_HUFF_SYMBOLS_2 = 19, TINFL_FAST_LOOKUP_BITS = 10, TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS }; typedef struct { mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0]; mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2]; } tinfl_huff_table; #if MINIZ_HAS_64BIT_REGISTERS #define TINFL_USE_64BIT_BITBUF 1 #endif #if TINFL_USE_64BIT_BITBUF typedef mz_uint64 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (64) #else typedef mz_uint32 tinfl_bit_buf_t; #define TINFL_BITBUF_SIZE (32) #endif struct tinfl_decompressor_tag { mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES]; tinfl_bit_buf_t m_bit_buf; size_t m_dist_from_out_buf_start; tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES]; mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137]; }; // ------------------- Low-level Compression API Definitions // Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly // slower, and raw/dynamic blocks will be output more frequently). #define TDEFL_LESS_MEMORY 0 // tdefl_init() compression flags logically OR'd together (low 12 bits contain // the max. number of probes per dictionary search): // TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes // per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap // compression), 4095=Huffman+LZ (slowest/best compression). enum { TDEFL_HUFFMAN_ONLY = 0, TDEFL_DEFAULT_MAX_PROBES = 128, TDEFL_MAX_PROBES_MASK = 0xFFF }; // TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before // the deflate data, and the Adler-32 of the source data at the end. Otherwise, // you'll get raw deflate data. // TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even // when not writing zlib headers). // TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more // efficient lazy parsing. // TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's // initialization time to the minimum, but the output may vary from run to run // given the same input (depending on the contents of memory). // TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) // TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. // TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. // TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. // The low 12 bits are reserved to control the max # of hash probes per // dictionary lookup (see TDEFL_MAX_PROBES_MASK). enum { TDEFL_WRITE_ZLIB_HEADER = 0x01000, TDEFL_COMPUTE_ADLER32 = 0x02000, TDEFL_GREEDY_PARSING_FLAG = 0x04000, TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000, TDEFL_RLE_MATCHES = 0x10000, TDEFL_FILTER_MATCHES = 0x20000, TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000, TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000 }; // High level compression functions: // tdefl_compress_mem_to_heap() compresses a block in memory to a heap block // allocated via malloc(). // On entry: // pSrc_buf, src_buf_len: Pointer and size of source block to compress. // flags: The max match finder probes (default is 128) logically OR'd against // the above flags. Higher probes are slower but improve compression. // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pOut_len will be set to the compressed data's size, which could be larger // than src_buf_len on uncompressible data. // The caller must free() the returned block when it's no longer needed. void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags); // tdefl_compress_mem_to_mem() compresses a block in memory to another block in // memory. // Returns 0 on failure. size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags); // Compresses an image to a compressed PNG file in memory. // On entry: // pImage, w, h, and num_chans describe the image to compress. num_chans may be // 1, 2, 3, or 4. // The image pitch in bytes per scanline will be w*num_chans. The leftmost // pixel on the top scanline is stored first in memory. // level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, // MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL // If flip is true, the image will be flipped on the Y axis (useful for OpenGL // apps). // On return: // Function returns a pointer to the compressed data, or NULL on failure. // *pLen_out will be set to the size of the PNG image file. // The caller must mz_free() the returned heap block (which will typically be // larger than *pLen_out) when it's no longer needed. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip); void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out); // Output stream interface. The compressor uses this interface to write // compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser); // tdefl_compress_mem_to_output() compresses a block to an output stream. The // above helpers use this function internally. mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); enum { TDEFL_MAX_HUFF_TABLES = 3, TDEFL_MAX_HUFF_SYMBOLS_0 = 288, TDEFL_MAX_HUFF_SYMBOLS_1 = 32, TDEFL_MAX_HUFF_SYMBOLS_2 = 19, TDEFL_LZ_DICT_SIZE = 32768, TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1, TDEFL_MIN_MATCH_LEN = 3, TDEFL_MAX_MATCH_LEN = 258 }; // TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed // output block (using static/fixed Huffman codes). #if TDEFL_LESS_MEMORY enum { TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 12, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #else enum { TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024, TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10, TDEFL_MAX_HUFF_SYMBOLS = 288, TDEFL_LZ_HASH_BITS = 15, TDEFL_LEVEL1_HASH_SIZE_MASK = 4095, TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3, TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS }; #endif // The low-level tdefl functions below may be used directly if the above helper // functions aren't flexible enough. The low-level functions don't make any heap // allocations, unlike the above helper functions. typedef enum { TDEFL_STATUS_BAD_PARAM = -2, TDEFL_STATUS_PUT_BUF_FAILED = -1, TDEFL_STATUS_OKAY = 0, TDEFL_STATUS_DONE = 1 } tdefl_status; // Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums typedef enum { TDEFL_NO_FLUSH = 0, TDEFL_SYNC_FLUSH = 2, TDEFL_FULL_FLUSH = 3, TDEFL_FINISH = 4 } tdefl_flush; // tdefl's compression state structure. typedef struct { tdefl_put_buf_func_ptr m_pPut_buf_func; void *m_pPut_buf_user; mz_uint m_flags, m_max_probes[2]; int m_greedy_parsing; mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size; mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end; mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer; mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish; tdefl_status m_prev_return_status; const void *m_pIn_buf; void *m_pOut_buf; size_t *m_pIn_buf_size, *m_pOut_buf_size; tdefl_flush m_flush; const mz_uint8 *m_pSrc; size_t m_src_buf_left, m_out_buf_ofs; mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1]; mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS]; mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE]; mz_uint16 m_next[TDEFL_LZ_DICT_SIZE]; mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE]; mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE]; } tdefl_compressor; // Initializes the compressor. // There is no corresponding deinit() function because the tdefl API's do not // dynamically allocate memory. // pBut_buf_func: If NULL, output data will be supplied to the specified // callback. In this case, the user should call the tdefl_compress_buffer() API // for compression. // If pBut_buf_func is NULL the user should always call the tdefl_compress() // API. // flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, // etc.) tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags); // Compresses a block of data, consuming as much of the specified input buffer // as possible, and writing as much compressed data to the specified output // buffer as possible. tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush); // tdefl_compress_buffer() is only usable when the tdefl_init() is called with a // non-NULL tdefl_put_buf_func_ptr. // tdefl_compress_buffer() always consumes the entire input buffer. tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush); tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d); mz_uint32 tdefl_get_adler32(tdefl_compressor *d); // Can't use tdefl_create_comp_flags_from_zip_params if MINIZ_NO_ZLIB_APIS isn't // defined, because it uses some of its macros. #ifndef MINIZ_NO_ZLIB_APIS // Create tdefl_compress() flags given zlib-style compression parameters. // level may range from [0,10] (where 10 is absolute max compression, but may be // much slower on some files) // window_bits may be -15 (raw deflate) or 15 (zlib) // strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, // MZ_RLE, or MZ_FIXED mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy); #endif // #ifndef MINIZ_NO_ZLIB_APIS #ifdef __cplusplus } #endif #endif // MINIZ_HEADER_INCLUDED // ------------------- End of Header: Implementation follows. (If you only want // the header, define MINIZ_HEADER_FILE_ONLY.) #ifndef MINIZ_HEADER_FILE_ONLY typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1]; typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1]; typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1]; //#include <assert.h> //#include <string.h> #define MZ_ASSERT(x) assert(x) #ifdef MINIZ_NO_MALLOC #define MZ_MALLOC(x) NULL #define MZ_FREE(x) (void)x, ((void)0) #define MZ_REALLOC(p, x) NULL #else #define MZ_MALLOC(x) malloc(x) #define MZ_FREE(x) free(x) #define MZ_REALLOC(p, x) realloc(p, x) #endif #define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj)) #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN #define MZ_READ_LE16(p) *((const mz_uint16 *)(p)) #define MZ_READ_LE32(p) *((const mz_uint32 *)(p)) #else #define MZ_READ_LE16(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U)) #define MZ_READ_LE32(p) \ ((mz_uint32)(((const mz_uint8 *)(p))[0]) | \ ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | \ ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U)) #endif #ifdef _MSC_VER #define MZ_FORCEINLINE __forceinline #elif defined(__GNUC__) #define MZ_FORCEINLINE inline __attribute__((__always_inline__)) #else #define MZ_FORCEINLINE inline #endif #ifdef __cplusplus extern "C" { #endif // ------------------- zlib-style API's mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len) { mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16); size_t block_len = buf_len % 5552; if (!ptr) return MZ_ADLER32_INIT; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } return (s2 << 16) + s1; } // Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C // implementation that balances processor cache usage against speed": // http://www.geocities.com/malbrain/ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len) { static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c, 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c}; mz_uint32 crcu32 = (mz_uint32)crc; if (!ptr) return MZ_CRC32_INIT; crcu32 = ~crcu32; while (buf_len--) { mz_uint8 b = *ptr++; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)]; crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)]; } return ~crcu32; } void mz_free(void *p) { MZ_FREE(p); } #ifndef MINIZ_NO_ZLIB_APIS static void *def_alloc_func(void *opaque, size_t items, size_t size) { (void)opaque, (void)items, (void)size; return MZ_MALLOC(items * size); } static void def_free_func(void *opaque, void *address) { (void)opaque, (void)address; MZ_FREE(address); } // static void *def_realloc_func(void *opaque, void *address, size_t items, // size_t size) { // (void)opaque, (void)address, (void)items, (void)size; // return MZ_REALLOC(address, items * size); //} const char *mz_version(void) { return MZ_VERSION; } int mz_deflateInit(mz_streamp pStream, int level) { return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY); } int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy) { tdefl_compressor *pComp; mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy); if (!pStream) return MZ_STREAM_ERROR; if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = MZ_ADLER32_INIT; pStream->msg = NULL; pStream->reserved = 0; pStream->total_in = 0; pStream->total_out = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor)); if (!pComp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pComp; if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY) { mz_deflateEnd(pStream); return MZ_PARAM_ERROR; } return MZ_OK; } int mz_deflateReset(mz_streamp pStream) { if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree)) return MZ_STREAM_ERROR; pStream->total_in = pStream->total_out = 0; tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags); return MZ_OK; } int mz_deflate(mz_streamp pStream, int flush) { size_t in_bytes, out_bytes; mz_ulong orig_total_in, orig_total_out; int mz_status = MZ_OK; if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out)) return MZ_STREAM_ERROR; if (!pStream->avail_out) return MZ_BUF_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE) return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR; orig_total_in = pStream->total_in; orig_total_out = pStream->total_out; for (;;) { tdefl_status defl_status; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush); pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (defl_status < 0) { mz_status = MZ_STREAM_ERROR; break; } else if (defl_status == TDEFL_STATUS_DONE) { mz_status = MZ_STREAM_END; break; } else if (!pStream->avail_out) break; else if ((!pStream->avail_in) && (flush != MZ_FINISH)) { if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out)) break; return MZ_BUF_ERROR; // Can't make forward progress without some input. } } return mz_status; } int mz_deflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len) { (void)pStream; // This is really over conservative. (And lame, but it's actually pretty // tricky to compute a true upper bound given the way tdefl's blocking works.) return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5); } int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level) { int status; mz_stream stream; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_deflateInit(&stream, level); if (status != MZ_OK) return status; status = mz_deflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_deflateEnd(&stream); return (status == MZ_OK) ? MZ_BUF_ERROR : status; } *pDest_len = stream.total_out; return mz_deflateEnd(&stream); } int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION); } mz_ulong mz_compressBound(mz_ulong source_len) { return mz_deflateBound(NULL, source_len); } typedef struct { tinfl_decompressor m_decomp; mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed; int m_window_bits; mz_uint8 m_dict[TINFL_LZ_DICT_SIZE]; tinfl_status m_last_status; } inflate_state; int mz_inflateInit2(mz_streamp pStream, int window_bits) { inflate_state *pDecomp; if (!pStream) return MZ_STREAM_ERROR; if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)) return MZ_PARAM_ERROR; pStream->data_type = 0; pStream->adler = 0; pStream->msg = NULL; pStream->total_in = 0; pStream->total_out = 0; pStream->reserved = 0; if (!pStream->zalloc) pStream->zalloc = def_alloc_func; if (!pStream->zfree) pStream->zfree = def_free_func; pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state)); if (!pDecomp) return MZ_MEM_ERROR; pStream->state = (struct mz_internal_state *)pDecomp; tinfl_init(&pDecomp->m_decomp); pDecomp->m_dict_ofs = 0; pDecomp->m_dict_avail = 0; pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT; pDecomp->m_first_call = 1; pDecomp->m_has_flushed = 0; pDecomp->m_window_bits = window_bits; return MZ_OK; } int mz_inflateInit(mz_streamp pStream) { return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS); } int mz_inflate(mz_streamp pStream, int flush) { inflate_state *pState; mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32; size_t in_bytes, out_bytes, orig_avail_in; tinfl_status status; if ((!pStream) || (!pStream->state)) return MZ_STREAM_ERROR; if (flush == MZ_PARTIAL_FLUSH) flush = MZ_SYNC_FLUSH; if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState = (inflate_state *)pStream->state; if (pState->m_window_bits > 0) decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER; orig_avail_in = pStream->avail_in; first_call = pState->m_first_call; pState->m_first_call = 0; if (pState->m_last_status < 0) return MZ_DATA_ERROR; if (pState->m_has_flushed && (flush != MZ_FINISH)) return MZ_STREAM_ERROR; pState->m_has_flushed |= (flush == MZ_FINISH); if ((flush == MZ_FINISH) && (first_call)) { // MZ_FINISH on the first call implies that the input and output buffers are // large enough to hold the entire compressed/decompressed file. decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF; in_bytes = pStream->avail_in; out_bytes = pStream->avail_out; status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pStream->next_out += (mz_uint)out_bytes; pStream->avail_out -= (mz_uint)out_bytes; pStream->total_out += (mz_uint)out_bytes; if (status < 0) return MZ_DATA_ERROR; else if (status != TINFL_STATUS_DONE) { pState->m_last_status = TINFL_STATUS_FAILED; return MZ_BUF_ERROR; } return MZ_STREAM_END; } // flush != MZ_FINISH then we must assume there's more input. if (flush != MZ_FINISH) decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT; if (pState->m_dict_avail) { n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } for (;;) { in_bytes = pStream->avail_in; out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs; status = tinfl_decompress( &pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags); pState->m_last_status = status; pStream->next_in += (mz_uint)in_bytes; pStream->avail_in -= (mz_uint)in_bytes; pStream->total_in += (mz_uint)in_bytes; pStream->adler = tinfl_get_adler32(&pState->m_decomp); pState->m_dict_avail = (mz_uint)out_bytes; n = MZ_MIN(pState->m_dict_avail, pStream->avail_out); memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n); pStream->next_out += n; pStream->avail_out -= n; pStream->total_out += n; pState->m_dict_avail -= n; pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1); if (status < 0) return MZ_DATA_ERROR; // Stream is corrupted (there could be some // uncompressed data left in the output dictionary - // oh well). else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in)) return MZ_BUF_ERROR; // Signal caller that we can't make forward progress // without supplying more input or by setting flush // to MZ_FINISH. else if (flush == MZ_FINISH) { // The output buffer MUST be large to hold the remaining uncompressed data // when flush==MZ_FINISH. if (status == TINFL_STATUS_DONE) return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END; // status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's // at least 1 more byte on the way. If there's no more room left in the // output buffer then something is wrong. else if (!pStream->avail_out) return MZ_BUF_ERROR; } else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail)) break; } return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK; } int mz_inflateEnd(mz_streamp pStream) { if (!pStream) return MZ_STREAM_ERROR; if (pStream->state) { pStream->zfree(pStream->opaque, pStream->state); pStream->state = NULL; } return MZ_OK; } int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len) { mz_stream stream; int status; memset(&stream, 0, sizeof(stream)); // In case mz_ulong is 64-bits (argh I hate longs). if ((source_len | *pDest_len) > 0xFFFFFFFFU) return MZ_PARAM_ERROR; stream.next_in = pSource; stream.avail_in = (mz_uint32)source_len; stream.next_out = pDest; stream.avail_out = (mz_uint32)*pDest_len; status = mz_inflateInit(&stream); if (status != MZ_OK) return status; status = mz_inflate(&stream, MZ_FINISH); if (status != MZ_STREAM_END) { mz_inflateEnd(&stream); return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status; } *pDest_len = stream.total_out; return mz_inflateEnd(&stream); } const char *mz_error(int err) { static struct { int m_err; const char *m_pDesc; } s_error_descs[] = {{MZ_OK, ""}, {MZ_STREAM_END, "stream end"}, {MZ_NEED_DICT, "need dictionary"}, {MZ_ERRNO, "file error"}, {MZ_STREAM_ERROR, "stream error"}, {MZ_DATA_ERROR, "data error"}, {MZ_MEM_ERROR, "out of memory"}, {MZ_BUF_ERROR, "buf error"}, {MZ_VERSION_ERROR, "version error"}, {MZ_PARAM_ERROR, "parameter error"}}; mz_uint i; for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i) if (s_error_descs[i].m_err == err) return s_error_descs[i].m_pDesc; return NULL; } #endif // MINIZ_NO_ZLIB_APIS // ------------------- Low-level Decompression (completely independent from all // compression API's) #define TINFL_MEMCPY(d, s, l) memcpy(d, s, l) #define TINFL_MEMSET(p, c, l) memset(p, c, l) #define TINFL_CR_BEGIN \ switch (r->m_state) { \ case 0: #define TINFL_CR_RETURN(state_index, result) \ do { \ status = result; \ r->m_state = state_index; \ goto common_exit; \ case state_index:; \ } \ MZ_MACRO_END #define TINFL_CR_RETURN_FOREVER(state_index, result) \ do { \ for (;;) { \ TINFL_CR_RETURN(state_index, result); \ } \ } \ MZ_MACRO_END #define TINFL_CR_FINISH } // TODO: If the caller has indicated that there's no more input, and we attempt // to read beyond the input buf, then something is wrong with the input because // the inflator never // reads ahead more than it needs to. Currently TINFL_GET_BYTE() pads the end of // the stream with 0's in this scenario. #define TINFL_GET_BYTE(state_index, c) \ do { \ if (pIn_buf_cur >= pIn_buf_end) { \ for (;;) { \ if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { \ TINFL_CR_RETURN(state_index, TINFL_STATUS_NEEDS_MORE_INPUT); \ if (pIn_buf_cur < pIn_buf_end) { \ c = *pIn_buf_cur++; \ break; \ } \ } else { \ c = 0; \ break; \ } \ } \ } else \ c = *pIn_buf_cur++; \ } \ MZ_MACRO_END #define TINFL_NEED_BITS(state_index, n) \ do { \ mz_uint c; \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < (mz_uint)(n)) #define TINFL_SKIP_BITS(state_index, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END #define TINFL_GET_BITS(state_index, b, n) \ do { \ if (num_bits < (mz_uint)(n)) { \ TINFL_NEED_BITS(state_index, n); \ } \ b = bit_buf & ((1 << (n)) - 1); \ bit_buf >>= (n); \ num_bits -= (n); \ } \ MZ_MACRO_END // TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes // remaining in the input buffer falls below 2. // It reads just enough bytes from the input stream that are needed to decode // the next Huffman code (and absolutely no more). It works by trying to fully // decode a // Huffman code by using whatever bits are currently present in the bit buffer. // If this fails, it reads another byte, and tries again until it succeeds or // until the // bit buffer contains >=15 bits (deflate's max. Huffman code size). #define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \ do { \ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \ if (temp >= 0) { \ code_len = temp >> 9; \ if ((code_len) && (num_bits >= code_len)) break; \ } else if (num_bits > TINFL_FAST_LOOKUP_BITS) { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while ((temp < 0) && (num_bits >= (code_len + 1))); \ if (temp >= 0) break; \ } \ TINFL_GET_BYTE(state_index, c); \ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \ num_bits += 8; \ } while (num_bits < 15); // TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex // than you would initially expect because the zlib API expects the decompressor // to never read // beyond the final byte of the deflate stream. (In other words, when this macro // wants to read another byte from the input, it REALLY needs another byte in // order to fully // decode the next Huffman code.) Handling this properly is particularly // important on raw deflate (non-zlib) streams, which aren't followed by a byte // aligned adler-32. // The slow path is only executed at the very end of the input buffer. #define TINFL_HUFF_DECODE(state_index, sym, pHuff) \ do { \ int temp; \ mz_uint code_len, c; \ if (num_bits < 15) { \ if ((pIn_buf_end - pIn_buf_cur) < 2) { \ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \ } else { \ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | \ (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \ pIn_buf_cur += 2; \ num_bits += 16; \ } \ } \ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= \ 0) \ code_len = temp >> 9, temp &= 511; \ else { \ code_len = TINFL_FAST_LOOKUP_BITS; \ do { \ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \ } while (temp < 0); \ } \ sym = temp; \ bit_buf >>= code_len; \ num_bits -= code_len; \ } \ MZ_MACRO_END tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags) { static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0}; static const int s_length_extra[31] = {0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0}; static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0}; static const int s_dist_extra[32] = {0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13}; static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static const int s_min_table_sizes[3] = {257, 1, 4}; tinfl_status status = TINFL_STATUS_FAILED; mz_uint32 num_bits, dist, counter, num_extra; tinfl_bit_buf_t bit_buf; const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size; mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size; size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start; // Ensure the output buffer's size is a power of 2, unless the output buffer // is large enough to hold the entire output file (in which case it doesn't // matter). if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start)) { *pIn_buf_size = *pOut_buf_size = 0; return TINFL_STATUS_BAD_PARAM; } num_bits = r->m_num_bits; bit_buf = r->m_bit_buf; dist = r->m_dist; counter = r->m_counter; num_extra = r->m_num_extra; dist_from_out_buf_start = r->m_dist_from_out_buf_start; TINFL_CR_BEGIN bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0; r->m_z_adler32 = r->m_check_adler32 = 1; if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_GET_BYTE(1, r->m_zhdr0); TINFL_GET_BYTE(2, r->m_zhdr1); counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8)); if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1ULL << (8U + (r->m_zhdr0 >> 4))))); if (counter) { TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED); } } do { TINFL_GET_BITS(3, r->m_final, 3); r->m_type = r->m_final >> 1; if (r->m_type == 0) { TINFL_SKIP_BITS(5, num_bits & 7); for (counter = 0; counter < 4; ++counter) { if (num_bits) TINFL_GET_BITS(6, r->m_raw_header[counter], 8); else TINFL_GET_BYTE(7, r->m_raw_header[counter]); } if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8)))) { TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED); } while ((counter) && (num_bits)) { TINFL_GET_BITS(51, dist, 8); while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)dist; counter--; } while (counter) { size_t n; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT); } while (pIn_buf_cur >= pIn_buf_end) { if (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) { TINFL_CR_RETURN(38, TINFL_STATUS_NEEDS_MORE_INPUT); } else { TINFL_CR_RETURN_FOREVER(40, TINFL_STATUS_FAILED); } } n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter); TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n); pIn_buf_cur += n; pOut_buf_cur += n; counter -= (mz_uint)n; } } else if (r->m_type == 3) { TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED); } else { if (r->m_type == 1) { mz_uint8 *p = r->m_tables[0].m_code_size; mz_uint i; r->m_table_sizes[0] = 288; r->m_table_sizes[1] = 32; TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32); for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; } else { for (counter = 0; counter < 3; counter++) { TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]); r->m_table_sizes[counter] += s_min_table_sizes[counter]; } MZ_CLEAR_OBJ(r->m_tables[2].m_code_size); for (counter = 0; counter < r->m_table_sizes[2]; counter++) { mz_uint s; TINFL_GET_BITS(14, s, 3); r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s; } r->m_table_sizes[2] = 19; } for (; (int)r->m_type >= 0; r->m_type--) { int tree_next, tree_cur; tinfl_huff_table *pTable; mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16]; pTable = &r->m_tables[r->m_type]; MZ_CLEAR_OBJ(total_syms); MZ_CLEAR_OBJ(pTable->m_look_up); MZ_CLEAR_OBJ(pTable->m_tree); for (i = 0; i < r->m_table_sizes[r->m_type]; ++i) total_syms[pTable->m_code_size[i]]++; used_syms = 0, total = 0; next_code[0] = next_code[1] = 0; for (i = 1; i <= 15; ++i) { used_syms += total_syms[i]; next_code[i + 1] = (total = ((total + total_syms[i]) << 1)); } if ((65536 != total) && (used_syms > 1)) { TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED); } for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index) { mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index]; if (!code_size) continue; cur_code = next_code[code_size]++; for (l = code_size; l > 0; l--, cur_code >>= 1) rev_code = (rev_code << 1) | (cur_code & 1); if (code_size <= TINFL_FAST_LOOKUP_BITS) { mz_int16 k = (mz_int16)((code_size << 9) | sym_index); while (rev_code < TINFL_FAST_LOOKUP_SIZE) { pTable->m_look_up[rev_code] = k; rev_code += (1 << code_size); } continue; } if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)])) { pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1); for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--) { tree_cur -= ((rev_code >>= 1) & 1); if (!pTable->m_tree[-tree_cur - 1]) { pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next; tree_cur = tree_next; tree_next -= 2; } else tree_cur = pTable->m_tree[-tree_cur - 1]; } tree_cur -= ((rev_code >>= 1) & 1); pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index; } if (r->m_type == 2) { for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);) { mz_uint s; TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]); if (dist < 16) { r->m_len_codes[counter++] = (mz_uint8)dist; continue; } if ((dist == 16) && (!counter)) { TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED); } num_extra = "\02\03\07"[dist - 16]; TINFL_GET_BITS(18, s, num_extra); s += "\03\03\013"[dist - 16]; TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s); counter += s; } if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter) { TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED); } TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]); TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]); } } for (;;) { mz_uint8 *pSrc; for (;;) { if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2)) { TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]); if (counter >= 256) break; while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = (mz_uint8)counter; } else { int sym2; mz_uint code_len; #if TINFL_USE_64BIT_BITBUF if (num_bits < 30) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits); pIn_buf_cur += 4; num_bits += 32; } #else if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } counter = sym2; bit_buf >>= code_len; num_bits -= code_len; if (counter & 256) break; #if !TINFL_USE_64BIT_BITBUF if (num_bits < 15) { bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits); pIn_buf_cur += 2; num_bits += 16; } #endif if ((sym2 = r->m_tables[0] .m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) code_len = sym2 >> 9; else { code_len = TINFL_FAST_LOOKUP_BITS; do { sym2 = r->m_tables[0] .m_tree[~sym2 + ((bit_buf >> code_len++) & 1)]; } while (sym2 < 0); } bit_buf >>= code_len; num_bits -= code_len; pOut_buf_cur[0] = (mz_uint8)counter; if (sym2 & 256) { pOut_buf_cur++; counter = sym2; break; } pOut_buf_cur[1] = (mz_uint8)sym2; pOut_buf_cur += 2; } } if ((counter &= 511) == 256) break; num_extra = s_length_extra[counter - 257]; counter = s_length_base[counter - 257]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(25, extra_bits, num_extra); counter += extra_bits; } TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]); num_extra = s_dist_extra[dist]; dist = s_dist_base[dist]; if (num_extra) { mz_uint extra_bits; TINFL_GET_BITS(27, extra_bits, num_extra); dist += extra_bits; } dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start; if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)) { TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED); } pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask); if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end) { while (counter--) { while (pOut_buf_cur >= pOut_buf_end) { TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT); } *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask]; } continue; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES else if ((counter >= 9) && (counter <= dist)) { const mz_uint8 *pSrc_end = pSrc + (counter & ~7); do { ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0]; ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1]; pOut_buf_cur += 8; } while ((pSrc += 8) < pSrc_end); if ((counter &= 7) < 3) { if (counter) { pOut_buf_cur[0] = pSrc[0]; if (counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } continue; } } #endif do { pOut_buf_cur[0] = pSrc[0]; pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur[2] = pSrc[2]; pOut_buf_cur += 3; pSrc += 3; } while ((int)(counter -= 3) > 2); if ((int)counter > 0) { pOut_buf_cur[0] = pSrc[0]; if ((int)counter > 1) pOut_buf_cur[1] = pSrc[1]; pOut_buf_cur += counter; } } } } while (!(r->m_final & 1)); if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) { TINFL_SKIP_BITS(32, num_bits & 7); for (counter = 0; counter < 4; ++counter) { mz_uint s; if (num_bits) TINFL_GET_BITS(41, s, 8); else TINFL_GET_BYTE(42, s); r->m_z_adler32 = (r->m_z_adler32 << 8) | s; } } TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE); TINFL_CR_FINISH common_exit: r->m_num_bits = num_bits; r->m_bit_buf = bit_buf; r->m_dist = dist; r->m_counter = counter; r->m_num_extra = num_extra; r->m_dist_from_out_buf_start = dist_from_out_buf_start; *pIn_buf_size = pIn_buf_cur - pIn_buf_next; *pOut_buf_size = pOut_buf_cur - pOut_buf_next; if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0)) { const mz_uint8 *ptr = pOut_buf_next; size_t buf_len = *pOut_buf_size; mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16; size_t block_len = buf_len % 5552; while (buf_len) { for (i = 0; i + 7 < block_len; i += 8, ptr += 8) { s1 += ptr[0], s2 += s1; s1 += ptr[1], s2 += s1; s1 += ptr[2], s2 += s1; s1 += ptr[3], s2 += s1; s1 += ptr[4], s2 += s1; s1 += ptr[5], s2 += s1; s1 += ptr[6], s2 += s1; s1 += ptr[7], s2 += s1; } for (; i < block_len; ++i) s1 += *ptr++, s2 += s1; s1 %= 65521U, s2 %= 65521U; buf_len -= block_len; block_len = 5552; } r->m_check_adler32 = (s2 << 16) + s1; if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32)) status = TINFL_STATUS_ADLER32_MISMATCH; } return status; } // Higher level helper functions. void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tinfl_decompressor decomp; void *pBuf = NULL, *pNew_buf; size_t src_buf_ofs = 0, out_buf_capacity = 0; *pOut_len = 0; tinfl_init(&decomp); for (;;) { size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity; tinfl_status status = tinfl_decompress( &decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT)) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } src_buf_ofs += src_buf_size; *pOut_len += dst_buf_size; if (status == TINFL_STATUS_DONE) break; new_out_buf_capacity = out_buf_capacity * 2; if (new_out_buf_capacity < 128) new_out_buf_capacity = 128; pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity); if (!pNew_buf) { MZ_FREE(pBuf); *pOut_len = 0; return NULL; } pBuf = pNew_buf; out_buf_capacity = new_out_buf_capacity; } return pBuf; } size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tinfl_decompressor decomp; tinfl_status status; tinfl_init(&decomp); status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF); return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len; } int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { int result = 0; tinfl_decompressor decomp; mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE); size_t in_buf_ofs = 0, dict_ofs = 0; if (!pDict) return TINFL_STATUS_FAILED; tinfl_init(&decomp); for (;;) { size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs; tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size, (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))); in_buf_ofs += in_buf_size; if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user))) break; if (status != TINFL_STATUS_HAS_MORE_OUTPUT) { result = (status == TINFL_STATUS_DONE); break; } dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1); } MZ_FREE(pDict); *pIn_buf_size = in_buf_ofs; return result; } // ------------------- Low-level Compression (independent from all decompression // API's) // Purposely making these tables static for faster init and thread safety. static const mz_uint16 s_tdefl_len_sym[256] = { 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272, 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285}; static const mz_uint8 s_tdefl_len_extra[256] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0}; static const mz_uint8 s_tdefl_small_dist_sym[512] = { 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17}; static const mz_uint8 s_tdefl_small_dist_extra[512] = { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}; static const mz_uint8 s_tdefl_large_dist_sym[128] = { 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29}; static const mz_uint8 s_tdefl_large_dist_extra[128] = { 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13}; // Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted // values. typedef struct { mz_uint16 m_key, m_sym_index; } tdefl_sym_freq; static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1) { mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2]; tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1; MZ_CLEAR_OBJ(hist); for (i = 0; i < num_syms; i++) { mz_uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; } while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--; for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8) { const mz_uint32 *pHist = &hist[pass << 8]; mz_uint offsets[256], cur_ofs = 0; for (i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; } for (i = 0; i < num_syms; i++) pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i]; { tdefl_sym_freq *t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t; } } return pCur_syms; } // tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, // alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n) { int root, leaf, next, avbl, used, dpth; if (n == 0) return; else if (n == 1) { A[0].m_key = 1; return; } A[0].m_key += A[1].m_key; root = 0; leaf = 2; for (next = 1; next < n - 1; next++) { if (leaf >= n || A[root].m_key < A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = (mz_uint16)next; } else A[next].m_key = A[leaf++].m_key; if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key)) { A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key); A[root++].m_key = (mz_uint16)next; } else A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key); } A[n - 2].m_key = 0; for (next = n - 3; next >= 0; next--) A[next].m_key = A[A[next].m_key].m_key + 1; avbl = 1; used = dpth = 0; root = n - 2; next = n - 1; while (avbl > 0) { while (root >= 0 && (int)A[root].m_key == dpth) { used++; root--; } while (avbl > used) { A[next--].m_key = (mz_uint16)(dpth); avbl--; } avbl = 2 * used; dpth++; used = 0; } } // Limits canonical Huffman code table's max code size. enum { TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32 }; static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size) { int i; mz_uint32 total = 0; if (code_list_len <= 1) return; for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i]; for (i = max_code_size; i > 0; i--) total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i)); while (total != (1UL << max_code_size)) { pNum_codes[max_code_size]--; for (i = max_code_size - 1; i > 0; i--) if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; } total--; } } static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table) { int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE]; mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1]; MZ_CLEAR_OBJ(num_codes); if (static_table) { for (i = 0; i < table_len; i++) num_codes[d->m_huff_code_sizes[table_num][i]]++; } else { tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms; int num_used_syms = 0; const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0]; for (i = 0; i < table_len; i++) if (pSym_count[i]) { syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i]; syms0[num_used_syms++].m_sym_index = (mz_uint16)i; } pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1); tdefl_calculate_minimum_redundancy(pSyms, num_used_syms); for (i = 0; i < num_used_syms; i++) num_codes[pSyms[i].m_key]++; tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit); MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]); MZ_CLEAR_OBJ(d->m_huff_codes[table_num]); for (i = 1, j = num_used_syms; i <= code_size_limit; i++) for (l = num_codes[i]; l > 0; l--) d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i); } next_code[1] = 0; for (j = 0, i = 2; i <= code_size_limit; i++) next_code[i] = j = ((j + num_codes[i - 1]) << 1); for (i = 0; i < table_len; i++) { mz_uint rev_code = 0, code, code_size; if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0) continue; code = next_code[code_size]++; for (l = code_size; l > 0; l--, code >>= 1) rev_code = (rev_code << 1) | (code & 1); d->m_huff_codes[table_num][i] = (mz_uint16)rev_code; } } #define TDEFL_PUT_BITS(b, l) \ do { \ mz_uint bits = b; \ mz_uint len = l; \ MZ_ASSERT(bits <= ((1U << len) - 1U)); \ d->m_bit_buffer |= (bits << d->m_bits_in); \ d->m_bits_in += len; \ while (d->m_bits_in >= 8) { \ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \ d->m_bit_buffer >>= 8; \ d->m_bits_in -= 8; \ } \ } \ MZ_MACRO_END #define TDEFL_RLE_PREV_CODE_SIZE() \ { \ if (rle_repeat_count) { \ if (rle_repeat_count < 3) { \ d->m_huff_count[2][prev_code_size] = (mz_uint16)( \ d->m_huff_count[2][prev_code_size] + rle_repeat_count); \ while (rle_repeat_count--) \ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \ } else { \ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 16; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_repeat_count - 3); \ } \ rle_repeat_count = 0; \ } \ } #define TDEFL_RLE_ZERO_CODE_SIZE() \ { \ if (rle_z_count) { \ if (rle_z_count < 3) { \ d->m_huff_count[2][0] = \ (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \ while (rle_z_count--) packed_code_sizes[num_packed_code_sizes++] = 0; \ } else if (rle_z_count <= 10) { \ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 17; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 3); \ } else { \ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \ packed_code_sizes[num_packed_code_sizes++] = 18; \ packed_code_sizes[num_packed_code_sizes++] = \ (mz_uint8)(rle_z_count - 11); \ } \ rle_z_count = 0; \ } \ } static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; static void tdefl_start_dynamic_block(tdefl_compressor *d) { int num_lit_codes, num_dist_codes, num_bit_lengths; mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index; mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF; d->m_huff_count[0][256] = 1; tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE); tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE); for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--) if (d->m_huff_code_sizes[0][num_lit_codes - 1]) break; for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--) if (d->m_huff_code_sizes[1][num_dist_codes - 1]) break; memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes); memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes); total_code_sizes_to_pack = num_lit_codes + num_dist_codes; num_packed_code_sizes = 0; rle_z_count = 0; rle_repeat_count = 0; memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2); for (i = 0; i < total_code_sizes_to_pack; i++) { mz_uint8 code_size = code_sizes_to_pack[i]; if (!code_size) { TDEFL_RLE_PREV_CODE_SIZE(); if (++rle_z_count == 138) { TDEFL_RLE_ZERO_CODE_SIZE(); } } else { TDEFL_RLE_ZERO_CODE_SIZE(); if (code_size != prev_code_size) { TDEFL_RLE_PREV_CODE_SIZE(); d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1); packed_code_sizes[num_packed_code_sizes++] = code_size; } else if (++rle_repeat_count == 6) { TDEFL_RLE_PREV_CODE_SIZE(); } } prev_code_size = code_size; } if (rle_repeat_count) { TDEFL_RLE_PREV_CODE_SIZE(); } else { TDEFL_RLE_ZERO_CODE_SIZE(); } tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE); TDEFL_PUT_BITS(2, 2); TDEFL_PUT_BITS(num_lit_codes - 257, 5); TDEFL_PUT_BITS(num_dist_codes - 1, 5); for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--) if (d->m_huff_code_sizes [2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]]) break; num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1)); TDEFL_PUT_BITS(num_bit_lengths - 4, 4); for (i = 0; (int)i < num_bit_lengths; i++) TDEFL_PUT_BITS( d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3); for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;) { mz_uint code = packed_code_sizes[packed_code_sizes_index++]; MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2); TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]); if (code >= 16) TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]); } } static void tdefl_start_static_block(tdefl_compressor *d) { mz_uint i; mz_uint8 *p = &d->m_huff_code_sizes[0][0]; for (i = 0; i <= 143; ++i) *p++ = 8; for (; i <= 255; ++i) *p++ = 9; for (; i <= 279; ++i) *p++ = 7; for (; i <= 287; ++i) *p++ = 8; memset(d->m_huff_code_sizes[1], 5, 32); tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE); tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE); TDEFL_PUT_BITS(1, 2); } static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF}; #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && \ MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; mz_uint8 *pOutput_buf = d->m_pOutput_buf; mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf; mz_uint64 bit_buffer = d->m_bit_buffer; mz_uint bits_in = d->m_bits_in; #define TDEFL_PUT_BITS_FAST(b, l) \ { \ bit_buffer |= (((mz_uint64)(b)) << bits_in); \ bits_in += (l); \ } flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint s0, s1, n0, n1, sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); // This sequence coaxes MSVC into using cmov's vs. jmp's. s0 = s_tdefl_small_dist_sym[match_dist & 511]; n0 = s_tdefl_small_dist_extra[match_dist & 511]; s1 = s_tdefl_large_dist_sym[match_dist >> 8]; n1 = s_tdefl_large_dist_extra[match_dist >> 8]; sym = (match_dist < 512) ? s0 : s1; num_extra_bits = (match_dist < 512) ? n0 : n1; MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end)) { flags >>= 1; lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } } if (pOutput_buf >= d->m_pOutput_buf_end) return MZ_FALSE; *(mz_uint64 *)pOutput_buf = bit_buffer; pOutput_buf += (bits_in >> 3); bit_buffer >>= (bits_in & ~7); bits_in &= 7; } #undef TDEFL_PUT_BITS_FAST d->m_pOutput_buf = pOutput_buf; d->m_bits_in = 0; d->m_bit_buffer = 0; while (bits_in) { mz_uint32 n = MZ_MIN(bits_in, 16); TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n); bit_buffer >>= n; bits_in -= n; } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #else static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d) { mz_uint flags; mz_uint8 *pLZ_codes; flags = 1; for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1) { if (flags == 1) flags = *pLZ_codes++ | 0x100; if (flags & 1) { mz_uint sym, num_extra_bits; mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8)); pLZ_codes += 3; MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]); TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]); if (match_dist < 512) { sym = s_tdefl_small_dist_sym[match_dist]; num_extra_bits = s_tdefl_small_dist_extra[match_dist]; } else { sym = s_tdefl_large_dist_sym[match_dist >> 8]; num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8]; } MZ_ASSERT(d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]); TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits); } else { mz_uint lit = *pLZ_codes++; MZ_ASSERT(d->m_huff_code_sizes[0][lit]); TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]); } } TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]); return (d->m_pOutput_buf < d->m_pOutput_buf_end); } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && // MINIZ_HAS_64BIT_REGISTERS static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block) { if (static_block) tdefl_start_static_block(d); else tdefl_start_dynamic_block(d); return tdefl_compress_lz_codes(d); } static int tdefl_flush_block(tdefl_compressor *d, int flush) { mz_uint saved_bit_buf, saved_bits_in; mz_uint8 *pSaved_output_buf; mz_bool comp_block_succeeded = MZ_FALSE; int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size; mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf; d->m_pOutput_buf = pOutput_buf_start; d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16; MZ_ASSERT(!d->m_output_flush_remaining); d->m_output_flush_ofs = 0; d->m_output_flush_remaining = 0; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left); d->m_pLZ_code_buf -= (d->m_num_flags_left == 8); if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index)) { TDEFL_PUT_BITS(0x78, 8); TDEFL_PUT_BITS(0x01, 8); } TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1); pSaved_output_buf = d->m_pOutput_buf; saved_bit_buf = d->m_bit_buffer; saved_bits_in = d->m_bits_in; if (!use_raw_block) comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48)); // If the block gets expanded, forget the current contents of the output // buffer and send a raw block instead. if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) && ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size)) { mz_uint i; d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; TDEFL_PUT_BITS(0, 2); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF) { TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16); } for (i = 0; i < d->m_total_lz_bytes; ++i) { TDEFL_PUT_BITS( d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8); } } // Check for the extremely unlikely (if not impossible) case of the compressed // block not fitting into the output buffer when using dynamic codes. else if (!comp_block_succeeded) { d->m_pOutput_buf = pSaved_output_buf; d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in; tdefl_compress_block(d, MZ_TRUE); } if (flush) { if (flush == TDEFL_FINISH) { if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER) { mz_uint i, a = d->m_adler32; for (i = 0; i < 4; i++) { TDEFL_PUT_BITS((a >> 24) & 0xFF, 8); a <<= 8; } } } else { mz_uint i, z = 0; TDEFL_PUT_BITS(0, 3); if (d->m_bits_in) { TDEFL_PUT_BITS(0, 8 - d->m_bits_in); } for (i = 2; i; --i, z ^= 0xFFFF) { TDEFL_PUT_BITS(z & 0xFFFF, 16); } } } MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end); memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes; d->m_total_lz_bytes = 0; d->m_block_index++; if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0) { if (d->m_pPut_buf_func) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user)) return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED); } else if (pOutput_buf_start == d->m_output_buf) { int bytes_to_copy = (int)MZ_MIN( (size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs)); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy); d->m_out_buf_ofs += bytes_to_copy; if ((n -= bytes_to_copy) != 0) { d->m_output_flush_ofs = bytes_to_copy; d->m_output_flush_remaining = n; } } else { d->m_out_buf_ofs += n; } } return d->m_output_flush_remaining; } #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p) static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q; mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD(s); MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; q = (const mz_uint16 *)(d->m_dict + probe_pos); if (TDEFL_READ_UNALIGNED_WORD(q) != s01) continue; p = s; probe_len = 32; do { } while ( (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); if (!probe_len) { *pMatch_dist = dist; *pMatch_len = MZ_MIN(max_match_len, TDEFL_MAX_MATCH_LEN); break; } else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len) break; c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]); } } } #else static MZ_FORCEINLINE void tdefl_find_match( tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len) { mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len; mz_uint num_probes_left = d->m_max_probes[match_len >= 32]; const mz_uint8 *s = d->m_dict + pos, *p, *q; mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1]; MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN); if (max_match_len <= match_len) return; for (;;) { for (;;) { if (--num_probes_left == 0) return; #define TDEFL_PROBE \ next_probe_pos = d->m_next[probe_pos]; \ if ((!next_probe_pos) || \ ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \ return; \ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \ if ((d->m_dict[probe_pos + match_len] == c0) && \ (d->m_dict[probe_pos + match_len - 1] == c1)) \ break; TDEFL_PROBE; TDEFL_PROBE; TDEFL_PROBE; } if (!dist) break; p = s; q = d->m_dict + probe_pos; for (probe_len = 0; probe_len < max_match_len; probe_len++) if (*p++ != *q++) break; if (probe_len > match_len) { *pMatch_dist = dist; if ((*pMatch_len = match_len = probe_len) == max_match_len) return; c0 = d->m_dict[pos + match_len]; c1 = d->m_dict[pos + match_len - 1]; } } } #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static mz_bool tdefl_compress_fast(tdefl_compressor *d) { // Faster, minimally featured LZRW1-style match+parse loop with better // register utilization. Intended for applications where raw throughput is // valued more highly than ratio. mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left; mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags; mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size))) { const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096; mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size); d->m_src_buf_left -= num_bytes_to_process; lookahead_size += num_bytes_to_process; while (num_bytes_to_process) { mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process); memcpy(d->m_dict + dst_pos, d->m_pSrc, n); if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos)); d->m_pSrc += n; dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK; num_bytes_to_process -= n; } dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size); if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE)) break; while (lookahead_size >= 4) { mz_uint cur_match_dist, cur_match_len = 1; mz_uint8 *pCur_dict = d->m_dict + cur_pos; mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF; mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK; mz_uint probe_pos = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)lookahead_pos; if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram)) { const mz_uint16 *p = (const mz_uint16 *)pCur_dict; const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos); mz_uint32 probe_len = 32; do { } while ((TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (TDEFL_READ_UNALIGNED_WORD(++p) == TDEFL_READ_UNALIGNED_WORD(++q)) && (--probe_len > 0)); cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q); if (!probe_len) cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0; if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U))) { cur_match_len = 1; *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } else { mz_uint32 s0, s1; cur_match_len = MZ_MIN(cur_match_len, lookahead_size); MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE)); cur_match_dist--; pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN); *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist; pLZ_code_buf += 3; *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80); s0 = s_tdefl_small_dist_sym[cur_match_dist & 511]; s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8]; d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++; d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++; } } else { *pLZ_code_buf++ = (mz_uint8)first_trigram; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); d->m_huff_count[0][(mz_uint8)first_trigram]++; } if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } total_lz_bytes += cur_match_len; lookahead_pos += cur_match_len; dict_size = MZ_MIN(dict_size + cur_match_len, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK; MZ_ASSERT(lookahead_size >= cur_match_len); lookahead_size -= cur_match_len; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } while (lookahead_size) { mz_uint8 lit = d->m_dict[cur_pos]; total_lz_bytes++; *pLZ_code_buf++ = lit; *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1); if (--num_flags_left == 0) { num_flags_left = 8; pLZ_flags = pLZ_code_buf++; } d->m_huff_count[0][lit]++; lookahead_pos++; dict_size = MZ_MIN(dict_size + 1, TDEFL_LZ_DICT_SIZE); cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; lookahead_size--; if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) { int n; d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; total_lz_bytes = d->m_total_lz_bytes; pLZ_code_buf = d->m_pLZ_code_buf; pLZ_flags = d->m_pLZ_flags; num_flags_left = d->m_num_flags_left; } } } d->m_lookahead_pos = lookahead_pos; d->m_lookahead_size = lookahead_size; d->m_dict_size = dict_size; d->m_total_lz_bytes = total_lz_bytes; d->m_pLZ_code_buf = pLZ_code_buf; d->m_pLZ_flags = pLZ_flags; d->m_num_flags_left = num_flags_left; return MZ_TRUE; } #endif // MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit) { d->m_total_lz_bytes++; *d->m_pLZ_code_buf++ = lit; *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } d->m_huff_count[0][lit]++; } static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist) { mz_uint32 s0, s1; MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE)); d->m_total_lz_bytes += match_len; d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN); match_dist -= 1; d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF); d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8); d->m_pLZ_code_buf += 3; *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80); if (--d->m_num_flags_left == 0) { d->m_num_flags_left = 8; d->m_pLZ_flags = d->m_pLZ_code_buf++; } s0 = s_tdefl_small_dist_sym[match_dist & 511]; s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127]; d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++; if (match_len >= TDEFL_MIN_MATCH_LEN) d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++; } static mz_bool tdefl_compress_normal(tdefl_compressor *d) { const mz_uint8 *pSrc = d->m_pSrc; size_t src_buf_left = d->m_src_buf_left; tdefl_flush flush = d->m_flush; while ((src_buf_left) || ((flush) && (d->m_lookahead_size))) { mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos; // Update dictionary and hash chains. Keeps the lookahead size equal to // TDEFL_MAX_MATCH_LEN. if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1)) { mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2; mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK]; mz_uint num_bytes_to_process = (mz_uint)MZ_MIN( src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size); const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process; src_buf_left -= num_bytes_to_process; d->m_lookahead_size += num_bytes_to_process; while (pSrc != pSrc_end) { mz_uint8 c = *pSrc++; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK; ins_pos++; } } else { while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) { mz_uint8 c = *pSrc++; mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK; src_buf_left--; d->m_dict[dst_pos] = c; if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1)) d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c; if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN) { mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2; mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1); d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash]; d->m_hash[hash] = (mz_uint16)(ins_pos); } } } d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size); if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN)) break; // Simple lazy/greedy parsing state machine. len_to_move = 1; cur_match_dist = 0; cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1); cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK; if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS)) { if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))) { mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK]; cur_match_len = 0; while (cur_match_len < d->m_lookahead_size) { if (d->m_dict[cur_pos + cur_match_len] != c) break; cur_match_len++; } if (cur_match_len < TDEFL_MIN_MATCH_LEN) cur_match_len = 0; else cur_match_dist = 1; } } else { tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len); } if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5))) { cur_match_dist = cur_match_len = 0; } if (d->m_saved_match_len) { if (cur_match_len > d->m_saved_match_len) { tdefl_record_literal(d, (mz_uint8)d->m_saved_lit); if (cur_match_len >= 128) { tdefl_record_match(d, cur_match_len, cur_match_dist); d->m_saved_match_len = 0; len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[cur_pos]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } } else { tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist); len_to_move = d->m_saved_match_len - 1; d->m_saved_match_len = 0; } } else if (!cur_match_dist) tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]); else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128)) { tdefl_record_match(d, cur_match_len, cur_match_dist); len_to_move = cur_match_len; } else { d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]; d->m_saved_match_dist = cur_match_dist; d->m_saved_match_len = cur_match_len; } // Move the lookahead forward by len_to_move bytes. d->m_lookahead_pos += len_to_move; MZ_ASSERT(d->m_lookahead_size >= len_to_move); d->m_lookahead_size -= len_to_move; d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE); // Check if it's time to flush the current LZ codes to the internal output // buffer. if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) || ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))) { int n; d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; if ((n = tdefl_flush_block(d, 0)) != 0) return (n < 0) ? MZ_FALSE : MZ_TRUE; } } d->m_pSrc = pSrc; d->m_src_buf_left = src_buf_left; return MZ_TRUE; } static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d) { if (d->m_pIn_buf_size) { *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf; } if (d->m_pOut_buf_size) { size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining); memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n); d->m_output_flush_ofs += (mz_uint)n; d->m_output_flush_remaining -= (mz_uint)n; d->m_out_buf_ofs += n; *d->m_pOut_buf_size = d->m_out_buf_ofs; } return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY; } tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush) { if (!d) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return TDEFL_STATUS_BAD_PARAM; } d->m_pIn_buf = pIn_buf; d->m_pIn_buf_size = pIn_buf_size; d->m_pOut_buf = pOut_buf; d->m_pOut_buf_size = pOut_buf_size; d->m_pSrc = (const mz_uint8 *)(pIn_buf); d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0; d->m_out_buf_ofs = 0; d->m_flush = flush; if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) || (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf)) { if (pIn_buf_size) *pIn_buf_size = 0; if (pOut_buf_size) *pOut_buf_size = 0; return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM); } d->m_wants_to_finish |= (flush == TDEFL_FINISH); if ((d->m_output_flush_remaining) || (d->m_finished)) return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) && ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) && ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0)) { if (!tdefl_compress_fast(d)) return d->m_prev_return_status; } else #endif // #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN { if (!tdefl_compress_normal(d)) return d->m_prev_return_status; } if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf)) d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf); if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining)) { if (tdefl_flush_block(d, flush) < 0) return d->m_prev_return_status; d->m_finished = (flush == TDEFL_FINISH); if (flush == TDEFL_FULL_FLUSH) { MZ_CLEAR_OBJ(d->m_hash); MZ_CLEAR_OBJ(d->m_next); d->m_dict_size = 0; } } return (d->m_prev_return_status = tdefl_flush_output_buffer(d)); } tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush) { MZ_ASSERT(d->m_pPut_buf_func); return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush); } tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { d->m_pPut_buf_func = pPut_buf_func; d->m_pPut_buf_user = pPut_buf_user; d->m_flags = (mz_uint)(flags); d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3; d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0; d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3; if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG)) MZ_CLEAR_OBJ(d->m_hash); d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0; d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0; d->m_pLZ_code_buf = d->m_lz_code_buf + 1; d->m_pLZ_flags = d->m_lz_code_buf; d->m_num_flags_left = 8; d->m_pOutput_buf = d->m_output_buf; d->m_pOutput_buf_end = d->m_output_buf; d->m_prev_return_status = TDEFL_STATUS_OKAY; d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0; d->m_adler32 = 1; d->m_pIn_buf = NULL; d->m_pOut_buf = NULL; d->m_pIn_buf_size = NULL; d->m_pOut_buf_size = NULL; d->m_flush = TDEFL_NO_FLUSH; d->m_pSrc = NULL; d->m_src_buf_left = 0; d->m_out_buf_ofs = 0; memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0); memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1); return TDEFL_STATUS_OKAY; } tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d) { return d->m_prev_return_status; } mz_uint32 tdefl_get_adler32(tdefl_compressor *d) { return d->m_adler32; } mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags) { tdefl_compressor *pComp; mz_bool succeeded; if (((buf_len) && (!pBuf)) || (!pPut_buf_func)) return MZ_FALSE; pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); if (!pComp) return MZ_FALSE; succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY); succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE); MZ_FREE(pComp); return succeeded; } typedef struct { size_t m_size, m_capacity; mz_uint8 *m_pBuf; mz_bool m_expandable; } tdefl_output_buffer; static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser) { tdefl_output_buffer *p = (tdefl_output_buffer *)pUser; size_t new_size = p->m_size + len; if (new_size > p->m_capacity) { size_t new_capacity = p->m_capacity; mz_uint8 *pNew_buf; if (!p->m_expandable) return MZ_FALSE; do { new_capacity = MZ_MAX(128U, new_capacity << 1U); } while (new_size > new_capacity); pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity); if (!pNew_buf) return MZ_FALSE; p->m_pBuf = pNew_buf; p->m_capacity = new_capacity; } memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len); p->m_size = new_size; return MZ_TRUE; } void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_len) return MZ_FALSE; else *pOut_len = 0; out_buf.m_expandable = MZ_TRUE; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return NULL; *pOut_len = out_buf.m_size; return out_buf.m_pBuf; } size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags) { tdefl_output_buffer out_buf; MZ_CLEAR_OBJ(out_buf); if (!pOut_buf) return 0; out_buf.m_pBuf = (mz_uint8 *)pOut_buf; out_buf.m_capacity = out_buf_len; if (!tdefl_compress_mem_to_output( pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags)) return 0; return out_buf.m_size; } #ifndef MINIZ_NO_ZLIB_APIS static const mz_uint s_tdefl_num_probes[11] = {0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; // level may actually range from [0,10] (10 is a "hidden" max level, where we // want a bit more compression and it's fine if throughput to fall off a cliff // on some files). mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy) { mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0); if (window_bits > 0) comp_flags |= TDEFL_WRITE_ZLIB_HEADER; if (!level) comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS; else if (strategy == MZ_FILTERED) comp_flags |= TDEFL_FILTER_MATCHES; else if (strategy == MZ_HUFFMAN_ONLY) comp_flags &= ~TDEFL_MAX_PROBES_MASK; else if (strategy == MZ_FIXED) comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS; else if (strategy == MZ_RLE) comp_flags |= TDEFL_RLE_MATCHES; return comp_flags; } #endif // MINIZ_NO_ZLIB_APIS #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif // Simple PNG writer function by Alex Evans, 2011. Released into the public // domain: https://gist.github.com/908299, more context at // http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/. // This is actually a modification of Alex's original code so PNG files // generated by this function pass pngcheck. void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip) { // Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was // defined. static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500}; tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor)); tdefl_output_buffer out_buf; int i, bpl = w * num_chans, y, z; mz_uint32 c; *pLen_out = 0; if (!pComp) return NULL; MZ_CLEAR_OBJ(out_buf); out_buf.m_expandable = MZ_TRUE; out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h); if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity))) { MZ_FREE(pComp); return NULL; } // write dummy header for (z = 41; z; --z) tdefl_output_buffer_putter(&z, 1, &out_buf); // compress image data tdefl_init( pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER); for (y = 0; y < h; ++y) { tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH); tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH); } if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE) { MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } // write real header *pLen_out = out_buf.m_size - 41; { static const mz_uint8 chans[] = {0x00, 0x00, 0x04, 0x02, 0x06}; mz_uint8 pnghdr[41] = {0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x48, 0x44, 0x52, 0, 0, (mz_uint8)(w >> 8), (mz_uint8)w, 0, 0, (mz_uint8)(h >> 8), (mz_uint8)h, 8, chans[num_chans], 0, 0, 0, 0, 0, 0, 0, (mz_uint8)(*pLen_out >> 24), (mz_uint8)(*pLen_out >> 16), (mz_uint8)(*pLen_out >> 8), (mz_uint8)*pLen_out, 0x49, 0x44, 0x41, 0x54}; c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17); for (i = 0; i < 4; ++i, c <<= 8) ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24); memcpy(out_buf.m_pBuf, pnghdr, 41); } // write footer (IDAT CRC-32, followed by IEND chunk) if (!tdefl_output_buffer_putter( "\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf)) { *pLen_out = 0; MZ_FREE(pComp); MZ_FREE(out_buf.m_pBuf); return NULL; } c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4); for (i = 0; i < 4; ++i, c <<= 8) (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24); // compute final size of file, grab compressed data buffer and return *pLen_out += 57; MZ_FREE(pComp); return out_buf.m_pBuf; } void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out) { // Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we // can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's // where #defined out) return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE); } // ------------------- .ZIP archive reading #ifndef MINIZ_NO_ARCHIVE_APIS #error "No arvhive APIs" #ifdef MINIZ_NO_STDIO #define MZ_FILE void * #else #include <stdio.h> #include <sys/stat.h> #if defined(_MSC_VER) || defined(__MINGW64__) static FILE *mz_fopen(const char *pFilename, const char *pMode) { FILE *pFile = NULL; fopen_s(&pFile, pFilename, pMode); return pFile; } static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream) { FILE *pFile = NULL; if (freopen_s(&pFile, pPath, pMode, pStream)) return NULL; return pFile; } #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN mz_fopen #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 _ftelli64 #define MZ_FSEEK64 _fseeki64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN mz_freopen #define MZ_DELETE_FILE remove #elif defined(__MINGW32__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT _stat #define MZ_FILE_STAT _stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__TINYC__) #ifndef MINIZ_NO_TIME #include <sys/utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftell #define MZ_FSEEK64 fseek #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #elif defined(__GNUC__) && defined(_LARGEFILE64_SOURCE) && _LARGEFILE64_SOURCE #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen64(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello64 #define MZ_FSEEK64 fseeko64 #define MZ_FILE_STAT_STRUCT stat64 #define MZ_FILE_STAT stat64 #define MZ_FFLUSH fflush #define MZ_FREOPEN(p, m, s) freopen64(p, m, s) #define MZ_DELETE_FILE remove #else #ifndef MINIZ_NO_TIME #include <utime.h> #endif #define MZ_FILE FILE #define MZ_FOPEN(f, m) fopen(f, m) #define MZ_FCLOSE fclose #define MZ_FREAD fread #define MZ_FWRITE fwrite #define MZ_FTELL64 ftello #define MZ_FSEEK64 fseeko #define MZ_FILE_STAT_STRUCT stat #define MZ_FILE_STAT stat #define MZ_FFLUSH fflush #define MZ_FREOPEN(f, m, s) freopen(f, m, s) #define MZ_DELETE_FILE remove #endif // #ifdef _MSC_VER #endif // #ifdef MINIZ_NO_STDIO #define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c)) // Various ZIP archive enums. To completely avoid cross platform compiler // alignment and platform endian issues, miniz.c doesn't use structs for any of // this stuff. enum { // ZIP archive identifiers and record sizes MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50, MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50, MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50, MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22, // Central directory header record offsets MZ_ZIP_CDH_SIG_OFS = 0, MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4, MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6, MZ_ZIP_CDH_BIT_FLAG_OFS = 8, MZ_ZIP_CDH_METHOD_OFS = 10, MZ_ZIP_CDH_FILE_TIME_OFS = 12, MZ_ZIP_CDH_FILE_DATE_OFS = 14, MZ_ZIP_CDH_CRC32_OFS = 16, MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20, MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24, MZ_ZIP_CDH_FILENAME_LEN_OFS = 28, MZ_ZIP_CDH_EXTRA_LEN_OFS = 30, MZ_ZIP_CDH_COMMENT_LEN_OFS = 32, MZ_ZIP_CDH_DISK_START_OFS = 34, MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36, MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38, MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42, // Local directory header offsets MZ_ZIP_LDH_SIG_OFS = 0, MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4, MZ_ZIP_LDH_BIT_FLAG_OFS = 6, MZ_ZIP_LDH_METHOD_OFS = 8, MZ_ZIP_LDH_FILE_TIME_OFS = 10, MZ_ZIP_LDH_FILE_DATE_OFS = 12, MZ_ZIP_LDH_CRC32_OFS = 14, MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18, MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22, MZ_ZIP_LDH_FILENAME_LEN_OFS = 26, MZ_ZIP_LDH_EXTRA_LEN_OFS = 28, // End of central directory offsets MZ_ZIP_ECDH_SIG_OFS = 0, MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4, MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6, MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8, MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10, MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12, MZ_ZIP_ECDH_CDIR_OFS_OFS = 16, MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20, }; typedef struct { void *m_p; size_t m_size, m_capacity; mz_uint m_element_size; } mz_zip_array; struct mz_zip_internal_state_tag { mz_zip_array m_central_dir; mz_zip_array m_central_dir_offsets; mz_zip_array m_sorted_central_dir_offsets; MZ_FILE *m_pFile; void *m_pMem; size_t m_mem_size; size_t m_mem_capacity; }; #define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) \ (array_ptr)->m_element_size = element_size #define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) \ ((element_type *)((array_ptr)->m_p))[index] static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray) { pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p); memset(pArray, 0, sizeof(mz_zip_array)); } static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing) { void *pNew_p; size_t new_capacity = min_new_capacity; MZ_ASSERT(pArray->m_element_size); if (pArray->m_capacity >= min_new_capacity) return MZ_TRUE; if (growing) { new_capacity = MZ_MAX(1, pArray->m_capacity); while (new_capacity < min_new_capacity) new_capacity *= 2; } if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity))) return MZ_FALSE; pArray->m_p = pNew_p; pArray->m_capacity = new_capacity; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing) { if (new_capacity > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing)) return MZ_FALSE; } return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing) { if (new_size > pArray->m_capacity) { if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing)) return MZ_FALSE; } pArray->m_size = new_size; return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n) { return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE); } static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n) { size_t orig_size = pArray->m_size; if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE)) return MZ_FALSE; memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size); return MZ_TRUE; } #ifndef MINIZ_NO_TIME static time_t mz_zip_dos_to_time_t(int dos_time, int dos_date) { struct tm tm; memset(&tm, 0, sizeof(tm)); tm.tm_isdst = -1; tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900; tm.tm_mon = ((dos_date >> 5) & 15) - 1; tm.tm_mday = dos_date & 31; tm.tm_hour = (dos_time >> 11) & 31; tm.tm_min = (dos_time >> 5) & 63; tm.tm_sec = (dos_time << 1) & 62; return mktime(&tm); } static void mz_zip_time_to_dos_time(time_t time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef _MSC_VER struct tm tm_struct; struct tm *tm = &tm_struct; errno_t err = localtime_s(tm, &time); if (err) { *pDOS_date = 0; *pDOS_time = 0; return; } #else struct tm *tm = localtime(&time); #endif *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1)); *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday); } #endif #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_get_file_modified_time(const char *pFilename, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date) { #ifdef MINIZ_NO_TIME (void)pFilename; *pDOS_date = *pDOS_time = 0; #else struct MZ_FILE_STAT_STRUCT file_stat; // On Linux with x86 glibc, this call will fail on large files (>= 0x80000000 // bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. if (MZ_FILE_STAT(pFilename, &file_stat) != 0) return MZ_FALSE; mz_zip_time_to_dos_time(file_stat.st_mtime, pDOS_time, pDOS_date); #endif // #ifdef MINIZ_NO_TIME return MZ_TRUE; } #ifndef MINIZ_NO_TIME static mz_bool mz_zip_set_file_times(const char *pFilename, time_t access_time, time_t modified_time) { struct utimbuf t; t.actime = access_time; t.modtime = modified_time; return !utime(pFilename, &t); } #endif // #ifndef MINIZ_NO_TIME #endif // #ifndef MINIZ_NO_STDIO static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint32 flags) { (void)flags; if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_READING; pZip->m_archive_size = 0; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index)); mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (l_len < r_len) : (l < r); } #define MZ_SWAP_UINT32(a, b) \ do { \ mz_uint32 t = a; \ a = b; \ b = t; \ } \ MZ_MACRO_END // Heap sort of lowercased filenames, used to help accelerate plain central // directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), // but it could allocate memory.) static void mz_zip_reader_sort_central_dir_offsets_by_filename( mz_zip_archive *pZip) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; int start = (size - 2) >> 1, end; while (start >= 0) { int child, root = start; for (;;) { if ((child = (root << 1) + 1) >= size) break; child += (((child + 1) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1]))); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } start--; } end = size - 1; while (end > 0) { int child, root = 0; MZ_SWAP_UINT32(pIndices[end], pIndices[0]); for (;;) { if ((child = (root << 1) + 1) >= end) break; child += (((child + 1) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1])); if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child])) break; MZ_SWAP_UINT32(pIndices[root], pIndices[child]); root = child; } end--; } } static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint32 flags) { mz_uint cdir_size, num_this_disk, cdir_disk_index; mz_uint64 cdir_ofs; mz_int64 cur_file_ofs; const mz_uint8 *p; mz_uint32 buf_u32[4096 / sizeof(mz_uint32)]; mz_uint8 *pBuf = (mz_uint8 *)buf_u32; mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0); // Basic sanity checks - reject files which are too small, and check the first // 4 bytes of the file to make sure a local header is there. if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; // Find the end of central directory record by scanning the file from the end // towards the beginning. cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0); for (;;) { int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n) return MZ_FALSE; for (i = n - 4; i >= 0; --i) if (MZ_READ_LE32(pBuf + i) == MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) break; if (i >= 0) { cur_file_ofs += i; break; } if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (0xFFFF + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE))) return MZ_FALSE; cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0); } // Read and verify the end of central directory record. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; if ((MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG) || ((pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS)) != MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS))) return MZ_FALSE; num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS); cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS); if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1))) return MZ_FALSE; if ((cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS)) < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) return MZ_FALSE; cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS); if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size) return MZ_FALSE; pZip->m_central_directory_file_ofs = cdir_ofs; if (pZip->m_total_files) { mz_uint i, n; // Read the entire central directory into a heap block, and allocate another // heap block to hold the unsorted central dir file record offsets, and // another to hold the sorted indices. if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) || (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE))) return MZ_FALSE; if (sort_central_dir) { if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE)) return MZ_FALSE; } if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size) return MZ_FALSE; // Now create an index into the central directory file records, do some // basic sanity checking on each record, and check for zip64 entries (which // are not yet supported). p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p; for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i) { mz_uint total_header_size, comp_size, decomp_size, disk_index; if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)) return MZ_FALSE; MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p); if (sort_central_dir) MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size) || (decomp_size == 0xFFFFFFFF) || (comp_size == 0xFFFFFFFF)) return MZ_FALSE; disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS); if ((disk_index != num_this_disk) && (disk_index != 1)) return MZ_FALSE; if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n) return MZ_FALSE; n -= total_header_size; p += total_header_size; } } if (sort_central_dir) mz_zip_reader_sort_central_dir_offsets_by_filename(pZip); return MZ_TRUE; } mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint32 flags) { if ((!pZip) || (!pZip->m_pRead)) return MZ_FALSE; if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n); memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s); return s; } mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint32 flags) { if (!mz_zip_reader_init_internal(pZip, flags)) return MZ_FALSE; pZip->m_archive_size = size; pZip->m_pRead = mz_zip_mem_read_func; pZip->m_pIO_opaque = pZip; #ifdef __cplusplus pZip->m_pState->m_pMem = const_cast<void *>(pMem); #else pZip->m_pState->m_pMem = (void *)pMem; #endif pZip->m_pState->m_mem_size = size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint32 flags) { mz_uint64 file_size; MZ_FILE *pFile = MZ_FOPEN(pFilename, "rb"); if (!pFile) return MZ_FALSE; if (MZ_FSEEK64(pFile, 0, SEEK_END)) { MZ_FCLOSE(pFile); return MZ_FALSE; } file_size = MZ_FTELL64(pFile); if (!mz_zip_reader_init_internal(pZip, flags)) { MZ_FCLOSE(pFile); return MZ_FALSE; } pZip->m_pRead = mz_zip_file_read_func; pZip->m_pIO_opaque = pZip; pZip->m_pState->m_pFile = pFile; pZip->m_archive_size = file_size; if (!mz_zip_reader_read_central_dir(pZip, flags)) { mz_zip_reader_end(pZip); return MZ_FALSE; } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip) { return pZip ? pZip->m_total_files : 0; } static MZ_FORCEINLINE const mz_uint8 *mz_zip_reader_get_cdh( mz_zip_archive *pZip, mz_uint file_index) { if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return NULL; return &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); } mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index) { mz_uint m_bit_flag; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); return (m_bit_flag & 1); } mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index) { mz_uint filename_len, external_attr; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) return MZ_FALSE; // First see if the filename ends with a '/' character. filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_len) { if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/') return MZ_TRUE; } // Bugfix: This code was also checking if the internal attribute was non-zero, // which wasn't correct. // Most/all zip writers (hopefully) set DOS file/directory attributes in the // low 16-bits, so check for the DOS directory flag and ignore the source OS // ID in the created by field. // FIXME: Remove this check? Is it necessary - we already check the filename. external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); if ((external_attr & 0x10) != 0) return MZ_TRUE; return MZ_FALSE; } mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if ((!p) || (!pStat)) return MZ_FALSE; // Unpack the central directory record. pStat->m_file_index = file_index; pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index); pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS); pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS); pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS); pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS); #ifndef MINIZ_NO_TIME pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS)); #endif pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS); pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS); pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS); pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS); // Copy as much of the filename and comment as possible. n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1); memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pStat->m_filename[n] = '\0'; n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS); n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1); pStat->m_comment_size = n; memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n); pStat->m_comment[n] = '\0'; return MZ_TRUE; } mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size) { mz_uint n; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); if (!p) { if (filename_buf_size) pFilename[0] = '\0'; return 0; } n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS); if (filename_buf_size) { n = MZ_MIN(n, filename_buf_size - 1); memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n); pFilename[n] = '\0'; } return n + 1; } static MZ_FORCEINLINE mz_bool mz_zip_reader_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags) { mz_uint i; if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE) return 0 == memcmp(pA, pB, len); for (i = 0; i < len; ++i) if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i])) return MZ_FALSE; return MZ_TRUE; } static MZ_FORCEINLINE int mz_zip_reader_filename_compare( const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len) { const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT( pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE; mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS); mz_uint8 l = 0, r = 0; pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; pE = pL + MZ_MIN(l_len, r_len); while (pL < pE) { if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR))) break; pL++; pR++; } return (pL == pE) ? (int)(l_len - r_len) : (l - r); } static int mz_zip_reader_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState = pZip->m_pState; const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets; const mz_zip_array *pCentral_dir = &pState->m_central_dir; mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT( &pState->m_sorted_central_dir_offsets, mz_uint32, 0); const int size = pZip->m_total_files; const mz_uint filename_len = (mz_uint)strlen(pFilename); int l = 0, h = size - 1; while (l <= h) { int m = (l + h) >> 1, file_index = pIndices[m], comp = mz_zip_reader_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len); if (!comp) return file_index; else if (comp < 0) l = m + 1; else h = m - 1; } return -1; } int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags) { mz_uint file_index; size_t name_len, comment_len; if ((!pZip) || (!pZip->m_pState) || (!pName) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return -1; if (((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size)) return mz_zip_reader_locate_file_binary_search(pZip, pName); name_len = strlen(pName); if (name_len > 0xFFFF) return -1; comment_len = pComment ? strlen(pComment) : 0; if (comment_len > 0xFFFF) return -1; for (file_index = 0; file_index < pZip->m_total_files; file_index++) { const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT( &pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index)); mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS); const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE; if (filename_len < name_len) continue; if (comment_len) { mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS); const char *pFile_comment = pFilename + filename_len + file_extra_len; if ((file_comment_len != comment_len) || (!mz_zip_reader_string_equal(pComment, pFile_comment, file_comment_len, flags))) continue; } if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len)) { int ofs = filename_len - 1; do { if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':')) break; } while (--ofs >= 0); ofs++; pFilename += ofs; filename_len -= ofs; } if ((filename_len == name_len) && (mz_zip_reader_string_equal(pName, pFilename, filename_len, flags))) return file_index; } return -1; } mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int status = TINFL_STATUS_DONE; mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail; mz_zip_archive_file_stat file_stat; void *pRead_buf; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; tinfl_decompressor inflator; if ((buf_size) && (!pBuf)) return MZ_FALSE; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Ensure supplied output buffer is large enough. needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size; if (buf_size < needed_size) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size) return MZ_FALSE; return ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) != 0) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) == file_stat.m_crc32); } // Decompress the file either directly from memory or from a file input // buffer. tinfl_init(&inflator); if (pZip->m_pState->m_pMem) { // Read directly from the archive in memory. pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else if (pUser_read_buf) { // Use a user provided read buffer. if (!user_read_buf_size) return MZ_FALSE; pRead_buf = (mz_uint8 *)pUser_read_buf; read_buf_size = user_read_buf_size; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } else { // Temporarily allocate a read buffer. read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF)) #endif return MZ_FALSE; if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } do { size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0)); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; out_buf_ofs += out_buf_size; } while (status == TINFL_STATUS_NEEDS_MORE_INPUT); if (status == TINFL_STATUS_DONE) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf)) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_mem_no_alloc( mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size); } mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0); } mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags) { return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0); } void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags) { mz_uint64 comp_size, uncomp_size, alloc_size; const mz_uint8 *p = mz_zip_reader_get_cdh(pZip, file_index); void *pBuf; if (pSize) *pSize = 0; if (!p) return NULL; comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS); alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size; #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF)) #endif return NULL; if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size))) return NULL; if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return NULL; } if (pSize) *pSize = (size_t)alloc_size; return pBuf; } void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) { if (pSize) *pSize = 0; return MZ_FALSE; } return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags); } mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int status = TINFL_STATUS_DONE; mz_uint file_crc32 = MZ_CRC32_INIT; mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs; mz_zip_archive_file_stat file_stat; void *pRead_buf = NULL; void *pWrite_buf = NULL; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; // Empty file, or a directory (but not always a directory - I've seen odd zips // with directories that have compressed data which inflates to 0 bytes) if (!file_stat.m_comp_size) return MZ_TRUE; // Entry is a subdirectory (I've seen old zips with dir entries which have // compressed deflate data which inflates to 0 bytes, but these entries claim // to uncompress to 512 bytes in the headers). // I'm torn how to handle this case - should it fail instead? if (mz_zip_reader_is_file_a_directory(pZip, file_index)) return MZ_TRUE; // Encryption and patch files are not supported. if (file_stat.m_bit_flag & (1 | 32)) return MZ_FALSE; // This function only supports stored and deflate. if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED)) return MZ_FALSE; // Read and parse the local directory entry. cur_file_ofs = file_stat.m_local_header_ofs; if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size) return MZ_FALSE; // Decompress the file either directly from memory or from a file input // buffer. if (pZip->m_pState->m_pMem) { pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs; read_buf_size = read_buf_avail = file_stat.m_comp_size; comp_remaining = 0; } else { read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size))) return MZ_FALSE; read_buf_avail = 0; comp_remaining = file_stat.m_comp_size; } if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method)) { // The file is stored or the caller has requested the compressed data. if (pZip->m_pState->m_pMem) { #ifdef _MSC_VER if (((0, sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #else if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > 0xFFFFFFFF)) #endif return MZ_FALSE; if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size) status = TINFL_STATUS_FAILED; else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size); cur_file_ofs += file_stat.m_comp_size; out_buf_ofs += file_stat.m_comp_size; comp_remaining = 0; } else { while (comp_remaining) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) file_crc32 = (mz_uint32)mz_crc32( file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail); if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; out_buf_ofs += read_buf_avail; comp_remaining -= read_buf_avail; } } } else { tinfl_decompressor inflator; tinfl_init(&inflator); if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE))) status = TINFL_STATUS_FAILED; else { do { mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1)); if ((!read_buf_avail) && (!pZip->m_pState->m_pMem)) { read_buf_avail = MZ_MIN(read_buf_size, comp_remaining); if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail) { status = TINFL_STATUS_FAILED; break; } cur_file_ofs += read_buf_avail; comp_remaining -= read_buf_avail; read_buf_ofs = 0; } in_buf_size = (size_t)read_buf_avail; status = tinfl_decompress( &inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0); read_buf_avail -= in_buf_size; read_buf_ofs += in_buf_size; if (out_buf_size) { if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size) { status = TINFL_STATUS_FAILED; break; } file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size); if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size) { status = TINFL_STATUS_FAILED; break; } } } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT)); } } if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))) { // Make sure the entire file was decompressed, and check its CRC. if ((out_buf_ofs != file_stat.m_uncomp_size) || (file_crc32 != file_stat.m_crc32)) status = TINFL_STATUS_FAILED; } if (!pZip->m_pState->m_pMem) pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); if (pWrite_buf) pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf); return status == TINFL_STATUS_DONE; } mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pFilename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags); } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n) { (void)ofs; return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque); } mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags) { mz_bool status; mz_zip_archive_file_stat file_stat; MZ_FILE *pFile; if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat)) return MZ_FALSE; pFile = MZ_FOPEN(pDst_filename, "wb"); if (!pFile) return MZ_FALSE; status = mz_zip_reader_extract_to_callback( pZip, file_index, mz_zip_file_write_callback, pFile, flags); if (MZ_FCLOSE(pFile) == EOF) return MZ_FALSE; #ifndef MINIZ_NO_TIME if (status) mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time); #endif return status; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_end(mz_zip_archive *pZip) { if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; if (pZip->m_pState) { mz_zip_internal_state *pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO pZip->m_pFree(pZip->m_pAlloc_opaque, pState); } pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags) { int file_index = mz_zip_reader_locate_file(pZip, pArchive_filename, NULL, flags); if (file_index < 0) return MZ_FALSE; return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags); } #endif // ------------------- .ZIP archive writing #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS static void mz_write_le16(mz_uint8 *p, mz_uint16 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); } static void mz_write_le32(mz_uint8 *p, mz_uint32 v) { p[0] = (mz_uint8)v; p[1] = (mz_uint8)(v >> 8); p[2] = (mz_uint8)(v >> 16); p[3] = (mz_uint8)(v >> 24); } #define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v)) #define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v)) mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size) { if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID)) return MZ_FALSE; if (pZip->m_file_offset_alignment) { // Ensure user specified file offset alignment is a power of 2. if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1)) return MZ_FALSE; } if (!pZip->m_pAlloc) pZip->m_pAlloc = def_alloc_func; if (!pZip->m_pFree) pZip->m_pFree = def_free_func; if (!pZip->m_pRealloc) pZip->m_pRealloc = def_realloc_func; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_archive_size = existing_size; pZip->m_central_directory_file_ofs = 0; pZip->m_total_files = 0; if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state)))) return MZ_FALSE; memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32)); MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32)); return MZ_TRUE; } static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_zip_internal_state *pState = pZip->m_pState; mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size); #ifdef _MSC_VER if ((!n) || ((0, sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #else if ((!n) || ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))) #endif return 0; if (new_size > pState->m_mem_capacity) { void *pNew_block; size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity); while (new_capacity < new_size) new_capacity *= 2; if (NULL == (pNew_block = pZip->m_pRealloc( pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity))) return 0; pState->m_pMem = pNew_block; pState->m_mem_capacity = new_capacity; } memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n); pState->m_mem_size = (size_t)new_size; return n; } mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size) { pZip->m_pWrite = mz_zip_heap_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning))) { if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, initial_allocation_size))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_mem_capacity = initial_allocation_size; } return MZ_TRUE; } #ifndef MINIZ_NO_STDIO static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n) { mz_zip_archive *pZip = (mz_zip_archive *)pOpaque; mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile); if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET)))) return 0; return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile); } mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning) { MZ_FILE *pFile; pZip->m_pWrite = mz_zip_file_write_func; pZip->m_pIO_opaque = pZip; if (!mz_zip_writer_init(pZip, size_to_reserve_at_beginning)) return MZ_FALSE; if (NULL == (pFile = MZ_FOPEN(pFilename, "wb"))) { mz_zip_writer_end(pZip); return MZ_FALSE; } pZip->m_pState->m_pFile = pFile; if (size_to_reserve_at_beginning) { mz_uint64 cur_ofs = 0; char buf[4096]; MZ_CLEAR_OBJ(buf); do { size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n) { mz_zip_writer_end(pZip); return MZ_FALSE; } cur_ofs += n; size_to_reserve_at_beginning -= n; } while (size_to_reserve_at_beginning); } return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename) { mz_zip_internal_state *pState; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING)) return MZ_FALSE; // No sense in trying to write to an archive that's already at the support max // size if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; pState = pZip->m_pState; if (pState->m_pFile) { #ifdef MINIZ_NO_STDIO pFilename; return MZ_FALSE; #else // Archive is being read from stdio - try to reopen as writable. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; if (!pFilename) return MZ_FALSE; pZip->m_pWrite = mz_zip_file_write_func; if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile))) { // The mz_zip_archive is now in a bogus state because pState->m_pFile is // NULL, so just close it. mz_zip_reader_end(pZip); return MZ_FALSE; } #endif // #ifdef MINIZ_NO_STDIO } else if (pState->m_pMem) { // Archive lives in a memory block. Assume it's from the heap that we can // resize using the realloc callback. if (pZip->m_pIO_opaque != pZip) return MZ_FALSE; pState->m_mem_capacity = pState->m_mem_size; pZip->m_pWrite = mz_zip_heap_write_func; } // Archive is being read via a user provided read function - make sure the // user has specified a write function too. else if (!pZip->m_pWrite) return MZ_FALSE; // Start writing new files at the archive's current central directory // location. pZip->m_archive_size = pZip->m_central_directory_file_ofs; pZip->m_zip_mode = MZ_ZIP_MODE_WRITING; pZip->m_central_directory_file_ofs = 0; return MZ_TRUE; } mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags) { return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0); } typedef struct { mz_zip_archive *m_pZip; mz_uint64 m_cur_archive_file_ofs; mz_uint64 m_comp_size; } mz_zip_writer_add_state; static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser) { mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser; if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len) return MZ_FALSE; pState->m_cur_archive_file_ofs += len; pState->m_comp_size += len; return MZ_TRUE; } static mz_bool mz_zip_writer_create_local_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date) { (void)pZip; memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size); return MZ_TRUE; } static mz_bool mz_zip_writer_create_central_dir_header( mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { (void)pZip; memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, comp_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, uncomp_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size); MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes); MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_header_ofs); return MZ_TRUE; } static mz_bool mz_zip_writer_add_to_central_dir( mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size, const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date, mz_uint64 local_header_ofs, mz_uint32 ext_attributes) { mz_zip_internal_state *pState = pZip->m_pState; mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size; size_t orig_central_dir_size = pState->m_central_dir.m_size; mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; // No zip64 support yet if ((local_header_ofs > 0xFFFFFFFF) || (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + comment_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_central_dir_header( pZip, central_dir_header, filename_size, extra_size, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes)) return MZ_FALSE; if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) || (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &central_dir_ofs, 1))) { // Try to push the central directory array back into its original state. mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } return MZ_TRUE; } static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name) { // Basic ZIP archive filename validity checks: Valid filenames cannot start // with a forward slash, cannot contain a drive letter, and cannot use // DOS-style backward slashes. if (*pArchive_name == '/') return MZ_FALSE; while (*pArchive_name) { if ((*pArchive_name == '\\') || (*pArchive_name == ':')) return MZ_FALSE; pArchive_name++; } return MZ_TRUE; } static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment( mz_zip_archive *pZip) { mz_uint32 n; if (!pZip->m_file_offset_alignment) return 0; n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1)); return (pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1); } static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n) { char buf[4096]; memset(buf, 0, MZ_MIN(sizeof(buf), n)); while (n) { mz_uint32 s = MZ_MIN(sizeof(buf), n); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s) return MZ_FALSE; cur_file_ofs += s; n -= s; } return MZ_TRUE; } mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32) { mz_uint16 method = 0, dos_time = 0, dos_date = 0; mz_uint level, ext_attributes = 0, num_alignment_padding_bytes; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; tdefl_compressor *pComp = NULL; mz_bool store_data_uncompressed; mz_zip_internal_state *pState; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)); if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (pZip->m_total_files == 0xFFFF) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; pState = pZip->m_pState; if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size)) return MZ_FALSE; // No zip64 support yet if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; #ifndef MINIZ_NO_TIME { time_t cur_time; time(&cur_time); mz_zip_time_to_dos_time(cur_time, &dos_time, &dos_date); } #endif // #ifndef MINIZ_NO_TIME archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/')) { // Set DOS Subdirectory attribute bit. ext_attributes |= 0x10; // Subdirectories cannot contain data. if ((buf_size) || (uncomp_size)) return MZ_FALSE; } // Try to do any allocations before writing to the archive, so if an // allocation fails the file remains unmodified. (A good idea if we're doing // an in-place modification.) if ((!mz_zip_array_ensure_room( pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size)) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1))) return MZ_FALSE; if ((!store_data_uncompressed) && (buf_size)) { if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)))) return MZ_FALSE; } if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) { uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size); uncomp_size = buf_size; if (uncomp_size <= 3) { level = 0; store_data_uncompressed = MZ_TRUE; } } if (store_data_uncompressed) { if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } cur_archive_file_ofs += buf_size; comp_size = buf_size; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) method = MZ_DEFLATED; } else if (buf_size) { mz_zip_writer_add_state state; state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) || (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pComp = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes; mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0; mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = 0, comp_size = 0; size_t archive_name_size; mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE]; MZ_FILE *pSrc_file = NULL; if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; level = level_and_flags & 0xF; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; archive_name_size = strlen(pArchive_name); if (archive_name_size > 0xFFFF) return MZ_FALSE; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + comment_size + archive_name_size) > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_get_file_modified_time(pSrc_filename, &dos_time, &dos_date)) return MZ_FALSE; pSrc_file = MZ_FOPEN(pSrc_filename, "rb"); if (!pSrc_file) return MZ_FALSE; MZ_FSEEK64(pSrc_file, 0, SEEK_END); uncomp_size = MZ_FTELL64(pSrc_file); MZ_FSEEK64(pSrc_file, 0, SEEK_SET); if (uncomp_size > 0xFFFFFFFF) { // No zip64 support yet MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (uncomp_size <= 3) level = 0; if (!mz_zip_writer_write_zeros( pZip, cur_archive_file_ofs, num_alignment_padding_bytes + sizeof(local_dir_header))) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } local_dir_header_ofs += num_alignment_padding_bytes; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } cur_archive_file_ofs += num_alignment_padding_bytes + sizeof(local_dir_header); MZ_CLEAR_OBJ(local_dir_header); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } cur_archive_file_ofs += archive_name_size; if (uncomp_size) { mz_uint64 uncomp_remaining = uncomp_size; void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE); if (!pRead_buf) { MZ_FCLOSE(pSrc_file); return MZ_FALSE; } if (!level) { while (uncomp_remaining) { mz_uint n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining); if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n); uncomp_remaining -= n; cur_archive_file_ofs += n; } comp_size = uncomp_size; } else { mz_bool result = MZ_FALSE; mz_zip_writer_add_state state; tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor)); if (!pComp) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } state.m_pZip = pZip; state.m_cur_archive_file_ofs = cur_archive_file_ofs; state.m_comp_size = 0; if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params( level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) { pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } for (;;) { size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE); tdefl_status status; if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size) break; uncomp_crc32 = (mz_uint32)mz_crc32( uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size); uncomp_remaining -= in_buf_size; status = tdefl_compress_buffer( pComp, pRead_buf, in_buf_size, uncomp_remaining ? TDEFL_NO_FLUSH : TDEFL_FINISH); if (status == TDEFL_STATUS_DONE) { result = MZ_TRUE; break; } else if (status != TDEFL_STATUS_OKAY) break; } pZip->m_pFree(pZip->m_pAlloc_opaque, pComp); if (!result) { pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); MZ_FCLOSE(pSrc_file); return MZ_FALSE; } comp_size = state.m_comp_size; cur_archive_file_ofs = state.m_cur_archive_file_ofs; method = MZ_DEFLATED; } pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf); } MZ_FCLOSE(pSrc_file); pSrc_file = NULL; // no zip64 support yet if ((comp_size > 0xFFFFFFFF) || (cur_archive_file_ofs > 0xFFFFFFFF)) return MZ_FALSE; if (!mz_zip_writer_create_local_dir_header( pZip, local_dir_header, (mz_uint16)archive_name_size, 0, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date)) return MZ_FALSE; if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header)) return MZ_FALSE; if (!mz_zip_writer_add_to_central_dir( pZip, pArchive_name, (mz_uint16)archive_name_size, NULL, 0, pComment, comment_size, uncomp_size, comp_size, uncomp_crc32, method, 0, dos_time, dos_date, local_dir_header_ofs, ext_attributes)) return MZ_FALSE; pZip->m_total_files++; pZip->m_archive_size = cur_archive_file_ofs; return MZ_TRUE; } #endif // #ifndef MINIZ_NO_STDIO mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint file_index) { mz_uint n, bit_flags, num_alignment_padding_bytes; mz_uint64 comp_bytes_remaining, local_dir_header_ofs; mz_uint64 cur_src_file_ofs, cur_dst_file_ofs; mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)]; mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32; mz_uint8 central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE]; size_t orig_central_dir_size; mz_zip_internal_state *pState; void *pBuf; const mz_uint8 *pSrc_central_header; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; if (NULL == (pSrc_central_header = mz_zip_reader_get_cdh(pSource_zip, file_index))) return MZ_FALSE; pState = pZip->m_pState; num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip); // no zip64 support yet if ((pZip->m_total_files == 0xFFFF) || ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; cur_src_file_ofs = MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS); cur_dst_file_ofs = pZip->m_archive_size; if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG) return MZ_FALSE; cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes)) return MZ_FALSE; cur_dst_file_ofs += num_alignment_padding_bytes; local_dir_header_ofs = cur_dst_file_ofs; if (pZip->m_file_offset_alignment) { MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0); } if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE) return MZ_FALSE; cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE; n = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS); comp_bytes_remaining = n + MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS); if (NULL == (pBuf = pZip->m_pAlloc( pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(sizeof(mz_uint32) * 4, MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining))))) return MZ_FALSE; while (comp_bytes_remaining) { n = (mz_uint)MZ_MIN((mz_uint)MZ_ZIP_MAX_IO_BUF_SIZE, comp_bytes_remaining); if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_dst_file_ofs += n; comp_bytes_remaining -= n; } bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS); if (bit_flags & 8) { // Copy data descriptor if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == 0x08074b50) ? 4 : 3); if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n) { pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); return MZ_FALSE; } cur_src_file_ofs += n; cur_dst_file_ofs += n; } pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf); // no zip64 support yet if (cur_dst_file_ofs > 0xFFFFFFFF) return MZ_FALSE; orig_central_dir_size = pState->m_central_dir.m_size; memcpy(central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE); MZ_WRITE_LE32(central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs); if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) return MZ_FALSE; n = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS); if (!mz_zip_array_push_back( pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } if (pState->m_central_dir.m_size > 0xFFFFFFFF) return MZ_FALSE; n = (mz_uint32)orig_central_dir_size; if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1)) { mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE); return MZ_FALSE; } pZip->m_total_files++; pZip->m_archive_size = cur_dst_file_ofs; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_uint64 central_dir_ofs, central_dir_size; mz_uint8 hdr[MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE]; if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING)) return MZ_FALSE; pState = pZip->m_pState; // no zip64 support yet if ((pZip->m_total_files > 0xFFFF) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > 0xFFFFFFFF)) return MZ_FALSE; central_dir_ofs = 0; central_dir_size = 0; if (pZip->m_total_files) { // Write central directory central_dir_ofs = pZip->m_archive_size; central_dir_size = pState->m_central_dir.m_size; pZip->m_central_directory_file_ofs = central_dir_ofs; if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size) return MZ_FALSE; pZip->m_archive_size += central_dir_size; } // Write end of central directory record MZ_CLEAR_OBJ(hdr); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files); MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, central_dir_size); MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, central_dir_ofs); if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, sizeof(hdr)) != sizeof(hdr)) return MZ_FALSE; #ifndef MINIZ_NO_STDIO if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF)) return MZ_FALSE; #endif // #ifndef MINIZ_NO_STDIO pZip->m_archive_size += sizeof(hdr); pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED; return MZ_TRUE; } mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **pBuf, size_t *pSize) { if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pSize)) return MZ_FALSE; if (pZip->m_pWrite != mz_zip_heap_write_func) return MZ_FALSE; if (!mz_zip_writer_finalize_archive(pZip)) return MZ_FALSE; *pBuf = pZip->m_pState->m_pMem; *pSize = pZip->m_pState->m_mem_size; pZip->m_pState->m_pMem = NULL; pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0; return MZ_TRUE; } mz_bool mz_zip_writer_end(mz_zip_archive *pZip) { mz_zip_internal_state *pState; mz_bool status = MZ_TRUE; if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))) return MZ_FALSE; pState = pZip->m_pState; pZip->m_pState = NULL; mz_zip_array_clear(pZip, &pState->m_central_dir); mz_zip_array_clear(pZip, &pState->m_central_dir_offsets); mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets); #ifndef MINIZ_NO_STDIO if (pState->m_pFile) { MZ_FCLOSE(pState->m_pFile); pState->m_pFile = NULL; } #endif // #ifndef MINIZ_NO_STDIO if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem)) { pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem); pState->m_pMem = NULL; } pZip->m_pFree(pZip->m_pAlloc_opaque, pState); pZip->m_zip_mode = MZ_ZIP_MODE_INVALID; return status; } #ifndef MINIZ_NO_STDIO mz_bool mz_zip_add_mem_to_archive_file_in_place( const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags) { mz_bool status, created_new_archive = MZ_FALSE; mz_zip_archive zip_archive; struct MZ_FILE_STAT_STRUCT file_stat; MZ_CLEAR_OBJ(zip_archive); if ((int)level_and_flags < 0) level_and_flags = MZ_DEFAULT_LEVEL; if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION)) return MZ_FALSE; if (!mz_zip_writer_validate_archive_name(pArchive_name)) return MZ_FALSE; if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0) { // Create a new archive. if (!mz_zip_writer_init_file(&zip_archive, pZip_filename, 0)) return MZ_FALSE; created_new_archive = MZ_TRUE; } else { // Append to an existing archive. if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return MZ_FALSE; if (!mz_zip_writer_init_from_reader(&zip_archive, pZip_filename)) { mz_zip_reader_end(&zip_archive); return MZ_FALSE; } } status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0); // Always finalize, even if adding failed for some reason, so we have a valid // central directory. (This may not always succeed, but we can try.) if (!mz_zip_writer_finalize_archive(&zip_archive)) status = MZ_FALSE; if (!mz_zip_writer_end(&zip_archive)) status = MZ_FALSE; if ((!status) && (created_new_archive)) { // It's a new archive and something went wrong, so just delete it. int ignoredStatus = MZ_DELETE_FILE(pZip_filename); (void)ignoredStatus; } return status; } void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags) { int file_index; mz_zip_archive zip_archive; void *p = NULL; if (pSize) *pSize = 0; if ((!pZip_filename) || (!pArchive_name)) return NULL; MZ_CLEAR_OBJ(zip_archive); if (!mz_zip_reader_init_file( &zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY)) return NULL; if ((file_index = mz_zip_reader_locate_file(&zip_archive, pArchive_name, NULL, flags)) >= 0) p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags); mz_zip_reader_end(&zip_archive); return p; } #endif // #ifndef MINIZ_NO_STDIO #endif // #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS #endif // #ifndef MINIZ_NO_ARCHIVE_APIS #ifdef __cplusplus } #endif #ifdef _MSC_VER #pragma warning(pop) #endif #endif // MINIZ_HEADER_FILE_ONLY /* This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> */ // ---------------------- end of miniz ---------------------------------------- #ifdef __clang__ #pragma clang diagnostic pop #endif } // namespace miniz #else // Reuse MINIZ_LITTE_ENDIAN macro #if defined(__sparcv9) // Big endian #else #if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU // Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. #define MINIZ_LITTLE_ENDIAN 1 #endif #endif #endif // TINYEXR_USE_MINIZ // static bool IsBigEndian(void) { // union { // unsigned int i; // char c[4]; // } bint = {0x01020304}; // // return bint.c[0] == 1; //} static void SetErrorMessage(const std::string &msg, const char **err) { if (err) { #ifdef _WIN32 (*err) = _strdup(msg.c_str()); #else (*err) = strdup(msg.c_str()); #endif } } static const int kEXRVersionSize = 8; static void cpy2(unsigned short *dst_val, const unsigned short *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; } static void swap2(unsigned short *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned short tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[1]; dst[1] = src[0]; #endif } #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-function" #endif #ifdef __GNUC__ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-function" #endif static void cpy4(int *dst_val, const int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(unsigned int *dst_val, const unsigned int *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } static void cpy4(float *dst_val, const float *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; } #ifdef __clang__ #pragma clang diagnostic pop #endif #ifdef __GNUC__ #pragma GCC diagnostic pop #endif static void swap4(unsigned int *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else unsigned int tmp = *val; unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[3]; dst[1] = src[2]; dst[2] = src[1]; dst[3] = src[0]; #endif } #if 0 static void cpy8(tinyexr::tinyexr_uint64 *dst_val, const tinyexr::tinyexr_uint64 *src_val) { unsigned char *dst = reinterpret_cast<unsigned char *>(dst_val); const unsigned char *src = reinterpret_cast<const unsigned char *>(src_val); dst[0] = src[0]; dst[1] = src[1]; dst[2] = src[2]; dst[3] = src[3]; dst[4] = src[4]; dst[5] = src[5]; dst[6] = src[6]; dst[7] = src[7]; } #endif static void swap8(tinyexr::tinyexr_uint64 *val) { #ifdef MINIZ_LITTLE_ENDIAN (void)val; #else tinyexr::tinyexr_uint64 tmp = (*val); unsigned char *dst = reinterpret_cast<unsigned char *>(val); unsigned char *src = reinterpret_cast<unsigned char *>(&tmp); dst[0] = src[7]; dst[1] = src[6]; dst[2] = src[5]; dst[3] = src[4]; dst[4] = src[3]; dst[5] = src[2]; dst[6] = src[1]; dst[7] = src[0]; #endif } // https://gist.github.com/rygorous/2156668 // Reuse MINIZ_LITTLE_ENDIAN flag from miniz. union FP32 { unsigned int u; float f; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 23; unsigned int Exponent : 8; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 8; unsigned int Mantissa : 23; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wpadded" #endif union FP16 { unsigned short u; struct { #if MINIZ_LITTLE_ENDIAN unsigned int Mantissa : 10; unsigned int Exponent : 5; unsigned int Sign : 1; #else unsigned int Sign : 1; unsigned int Exponent : 5; unsigned int Mantissa : 10; #endif } s; }; #ifdef __clang__ #pragma clang diagnostic pop #endif static FP32 half_to_float(FP16 h) { static const FP32 magic = {113 << 23}; static const unsigned int shifted_exp = 0x7c00 << 13; // exponent mask after shift FP32 o; o.u = (h.u & 0x7fffU) << 13U; // exponent/mantissa bits unsigned int exp_ = shifted_exp & o.u; // just the exponent o.u += (127 - 15) << 23; // exponent adjust // handle exponent special cases if (exp_ == shifted_exp) // Inf/NaN? o.u += (128 - 16) << 23; // extra exp adjust else if (exp_ == 0) // Zero/Denormal? { o.u += 1 << 23; // extra exp adjust o.f -= magic.f; // renormalize } o.u |= (h.u & 0x8000U) << 16U; // sign bit return o; } static FP16 float_to_half_full(FP32 f) { FP16 o = {0}; // Based on ISPC reference code (with minor modifications) if (f.s.Exponent == 0) // Signed zero/denormal (which will underflow) o.s.Exponent = 0; else if (f.s.Exponent == 255) // Inf or NaN (all exponent bits set) { o.s.Exponent = 31; o.s.Mantissa = f.s.Mantissa ? 0x200 : 0; // NaN->qNaN and Inf->Inf } else // Normalized number { // Exponent unbias the single, then bias the halfp int newexp = f.s.Exponent - 127 + 15; if (newexp >= 31) // Overflow, return signed infinity o.s.Exponent = 31; else if (newexp <= 0) // Underflow { if ((14 - newexp) <= 24) // Mantissa might be non-zero { unsigned int mant = f.s.Mantissa | 0x800000; // Hidden 1 bit o.s.Mantissa = mant >> (14 - newexp); if ((mant >> (13 - newexp)) & 1) // Check for rounding o.u++; // Round, might overflow into exp bit, but this is OK } } else { o.s.Exponent = static_cast<unsigned int>(newexp); o.s.Mantissa = f.s.Mantissa >> 13; if (f.s.Mantissa & 0x1000) // Check for rounding o.u++; // Round, might overflow to inf, this is OK } } o.s.Sign = f.s.Sign; return o; } // NOTE: From OpenEXR code // #define IMF_INCREASING_Y 0 // #define IMF_DECREASING_Y 1 // #define IMF_RAMDOM_Y 2 // // #define IMF_NO_COMPRESSION 0 // #define IMF_RLE_COMPRESSION 1 // #define IMF_ZIPS_COMPRESSION 2 // #define IMF_ZIP_COMPRESSION 3 // #define IMF_PIZ_COMPRESSION 4 // #define IMF_PXR24_COMPRESSION 5 // #define IMF_B44_COMPRESSION 6 // #define IMF_B44A_COMPRESSION 7 #ifdef __clang__ #pragma clang diagnostic push #if __has_warning("-Wzero-as-null-pointer-constant") #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" #endif #endif static const char *ReadString(std::string *s, const char *ptr, size_t len) { // Read untile NULL(\0). const char *p = ptr; const char *q = ptr; while ((size_t(q - ptr) < len) && (*q) != 0) { q++; } if (size_t(q - ptr) >= len) { (*s) = std::string(); return NULL; } (*s) = std::string(p, q); return q + 1; // skip '\0' } static bool ReadAttribute(std::string *name, std::string *type, std::vector<unsigned char> *data, size_t *marker_size, const char *marker, size_t size) { size_t name_len = strnlen(marker, size); if (name_len == size) { // String does not have a terminating character. return false; } *name = std::string(marker, name_len); marker += name_len + 1; size -= name_len + 1; size_t type_len = strnlen(marker, size); if (type_len == size) { return false; } *type = std::string(marker, type_len); marker += type_len + 1; size -= type_len + 1; if (size < sizeof(uint32_t)) { return false; } uint32_t data_len; memcpy(&data_len, marker, sizeof(uint32_t)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len == 0) { if ((*type).compare("string") == 0) { // Accept empty string attribute. marker += sizeof(uint32_t); size -= sizeof(uint32_t); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t); data->resize(1); (*data)[0] = '\0'; return true; } else { return false; } } marker += sizeof(uint32_t); size -= sizeof(uint32_t); if (size < data_len) { return false; } data->resize(static_cast<size_t>(data_len)); memcpy(&data->at(0), marker, static_cast<size_t>(data_len)); *marker_size = name_len + 1 + type_len + 1 + sizeof(uint32_t) + data_len; return true; } static void WriteAttributeToMemory(std::vector<unsigned char> *out, const char *name, const char *type, const unsigned char *data, int len) { out->insert(out->end(), name, name + strlen(name) + 1); out->insert(out->end(), type, type + strlen(type) + 1); int outLen = len; tinyexr::swap4(reinterpret_cast<unsigned int *>(&outLen)); out->insert(out->end(), reinterpret_cast<unsigned char *>(&outLen), reinterpret_cast<unsigned char *>(&outLen) + sizeof(int)); out->insert(out->end(), data, data + len); } typedef struct { std::string name; // less than 255 bytes long int pixel_type; int x_sampling; int y_sampling; unsigned char p_linear; unsigned char pad[3]; } ChannelInfo; struct HeaderInfo { std::vector<tinyexr::ChannelInfo> channels; std::vector<EXRAttribute> attributes; int data_window[4]; int line_order; int display_window[4]; float screen_window_center[2]; float screen_window_width; float pixel_aspect_ratio; int chunk_count; // Tiled format int tile_size_x; int tile_size_y; int tile_level_mode; int tile_rounding_mode; unsigned int header_len; int compression_type; void clear() { channels.clear(); attributes.clear(); data_window[0] = 0; data_window[1] = 0; data_window[2] = 0; data_window[3] = 0; line_order = 0; display_window[0] = 0; display_window[1] = 0; display_window[2] = 0; display_window[3] = 0; screen_window_center[0] = 0.0f; screen_window_center[1] = 0.0f; screen_window_width = 0.0f; pixel_aspect_ratio = 0.0f; chunk_count = 0; // Tiled format tile_size_x = 0; tile_size_y = 0; tile_level_mode = 0; tile_rounding_mode = 0; header_len = 0; compression_type = 0; } }; static bool ReadChannelInfo(std::vector<ChannelInfo> &channels, const std::vector<unsigned char> &data) { const char *p = reinterpret_cast<const char *>(&data.at(0)); for (;;) { if ((*p) == 0) { break; } ChannelInfo info; tinyexr_int64 data_len = static_cast<tinyexr_int64>(data.size()) - (p - reinterpret_cast<const char *>(data.data())); if (data_len < 0) { return false; } p = ReadString(&info.name, p, size_t(data_len)); if ((p == NULL) && (info.name.empty())) { // Buffer overrun. Issue #51. return false; } const unsigned char *data_end = reinterpret_cast<const unsigned char *>(p) + 16; if (data_end >= (data.data() + data.size())) { return false; } memcpy(&info.pixel_type, p, sizeof(int)); p += 4; info.p_linear = static_cast<unsigned char>(p[0]); // uchar p += 1 + 3; // reserved: uchar[3] memcpy(&info.x_sampling, p, sizeof(int)); // int p += 4; memcpy(&info.y_sampling, p, sizeof(int)); // int p += 4; tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info.y_sampling)); channels.push_back(info); } return true; } static void WriteChannelInfo(std::vector<unsigned char> &data, const std::vector<ChannelInfo> &channels) { size_t sz = 0; // Calculate total size. for (size_t c = 0; c < channels.size(); c++) { sz += strlen(channels[c].name.c_str()) + 1; // +1 for \0 sz += 16; // 4 * int } data.resize(sz + 1); unsigned char *p = &data.at(0); for (size_t c = 0; c < channels.size(); c++) { memcpy(p, channels[c].name.c_str(), strlen(channels[c].name.c_str())); p += strlen(channels[c].name.c_str()); (*p) = '\0'; p++; int pixel_type = channels[c].pixel_type; int x_sampling = channels[c].x_sampling; int y_sampling = channels[c].y_sampling; tinyexr::swap4(reinterpret_cast<unsigned int *>(&pixel_type)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x_sampling)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y_sampling)); memcpy(p, &pixel_type, sizeof(int)); p += sizeof(int); (*p) = channels[c].p_linear; p += 4; memcpy(p, &x_sampling, sizeof(int)); p += sizeof(int); memcpy(p, &y_sampling, sizeof(int)); p += sizeof(int); } (*p) = '\0'; } static void CompressZip(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } #if TINYEXR_USE_MINIZ // // Compress the data using miniz // miniz::mz_ulong outSize = miniz::mz_compressBound(src_size); int ret = miniz::mz_compress( dst, &outSize, static_cast<const unsigned char *>(&tmpBuf.at(0)), src_size); assert(ret == miniz::MZ_OK); (void)ret; compressedSize = outSize; #else uLong outSize = compressBound(static_cast<uLong>(src_size)); int ret = compress(dst, &outSize, static_cast<const Bytef *>(&tmpBuf.at(0)), src_size); assert(ret == Z_OK); compressedSize = outSize; #endif // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressZip(unsigned char *dst, unsigned long *uncompressed_size /* inout */, const unsigned char *src, unsigned long src_size) { if ((*uncompressed_size) == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } std::vector<unsigned char> tmpBuf(*uncompressed_size); #if TINYEXR_USE_MINIZ int ret = miniz::mz_uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (miniz::MZ_OK != ret) { return false; } #else int ret = uncompress(&tmpBuf.at(0), uncompressed_size, src, src_size); if (Z_OK != ret) { return false; } #endif // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfZipCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + (*uncompressed_size); while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (*uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + (*uncompressed_size); for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } // RLE code from OpenEXR -------------------------------------- #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-conversion" #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif #ifdef _MSC_VER #pragma warning(push) #pragma warning(disable : 4204) // nonstandard extension used : non-constant // aggregate initializer (also supported by GNU // C and C99, so no big deal) #pragma warning(disable : 4244) // 'initializing': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4267) // 'argument': conversion from '__int64' to // 'int', possible loss of data #pragma warning(disable : 4996) // 'strdup': The POSIX name for this item is // deprecated. Instead, use the ISO C and C++ // conformant name: _strdup. #endif const int MIN_RUN_LENGTH = 3; const int MAX_RUN_LENGTH = 127; // // Compress an array of bytes, using run-length encoding, // and return the length of the compressed data. // static int rleCompress(int inLength, const char in[], signed char out[]) { const char *inEnd = in + inLength; const char *runStart = in; const char *runEnd = in + 1; signed char *outWrite = out; while (runStart < inEnd) { while (runEnd < inEnd && *runStart == *runEnd && runEnd - runStart - 1 < MAX_RUN_LENGTH) { ++runEnd; } if (runEnd - runStart >= MIN_RUN_LENGTH) { // // Compressable run // *outWrite++ = static_cast<char>(runEnd - runStart) - 1; *outWrite++ = *(reinterpret_cast<const signed char *>(runStart)); runStart = runEnd; } else { // // Uncompressable run // while (runEnd < inEnd && ((runEnd + 1 >= inEnd || *runEnd != *(runEnd + 1)) || (runEnd + 2 >= inEnd || *(runEnd + 1) != *(runEnd + 2))) && runEnd - runStart < MAX_RUN_LENGTH) { ++runEnd; } *outWrite++ = static_cast<char>(runStart - runEnd); while (runStart < runEnd) { *outWrite++ = *(reinterpret_cast<const signed char *>(runStart++)); } } ++runEnd; } return static_cast<int>(outWrite - out); } // // Uncompress an array of bytes compressed with rleCompress(). // Returns the length of the oncompressed data, or 0 if the // length of the uncompressed data would be more than maxLength. // static int rleUncompress(int inLength, int maxLength, const signed char in[], char out[]) { char *outStart = out; while (inLength > 0) { if (*in < 0) { int count = -(static_cast<int>(*in++)); inLength -= count + 1; // Fixes #116: Add bounds check to in buffer. if ((0 > (maxLength -= count)) || (inLength < 0)) return 0; memcpy(out, in, count); out += count; in += count; } else { int count = *in++; inLength -= 2; if (0 > (maxLength -= count + 1)) return 0; memset(out, *reinterpret_cast<const char *>(in), count + 1); out += count + 1; in++; } } return static_cast<int>(out - outStart); } #ifdef __clang__ #pragma clang diagnostic pop #endif // End of RLE code from OpenEXR ----------------------------------- static void CompressRle(unsigned char *dst, tinyexr::tinyexr_uint64 &compressedSize, const unsigned char *src, unsigned long src_size) { std::vector<unsigned char> tmpBuf(src_size); // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // // Reorder the pixel data. // const char *srcPtr = reinterpret_cast<const char *>(src); { char *t1 = reinterpret_cast<char *>(&tmpBuf.at(0)); char *t2 = reinterpret_cast<char *>(&tmpBuf.at(0)) + (src_size + 1) / 2; const char *stop = srcPtr + src_size; for (;;) { if (srcPtr < stop) *(t1++) = *(srcPtr++); else break; if (srcPtr < stop) *(t2++) = *(srcPtr++); else break; } } // // Predictor. // { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + src_size; int p = t[-1]; while (t < stop) { int d = int(t[0]) - p + (128 + 256); p = t[0]; t[0] = static_cast<unsigned char>(d); ++t; } } // outSize will be (srcSiz * 3) / 2 at max. int outSize = rleCompress(static_cast<int>(src_size), reinterpret_cast<const char *>(&tmpBuf.at(0)), reinterpret_cast<signed char *>(dst)); assert(outSize > 0); compressedSize = static_cast<tinyexr::tinyexr_uint64>(outSize); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if (compressedSize >= src_size) { compressedSize = src_size; memcpy(dst, src, src_size); } } static bool DecompressRle(unsigned char *dst, const unsigned long uncompressed_size, const unsigned char *src, unsigned long src_size) { if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); return true; } // Workaround for issue #112. // TODO(syoyo): Add more robust out-of-bounds check in `rleUncompress`. if (src_size <= 2) { return false; } std::vector<unsigned char> tmpBuf(uncompressed_size); int ret = rleUncompress(static_cast<int>(src_size), static_cast<int>(uncompressed_size), reinterpret_cast<const signed char *>(src), reinterpret_cast<char *>(&tmpBuf.at(0))); if (ret != static_cast<int>(uncompressed_size)) { return false; } // // Apply EXR-specific? postprocess. Grabbed from OpenEXR's // ImfRleCompressor.cpp // // Predictor. { unsigned char *t = &tmpBuf.at(0) + 1; unsigned char *stop = &tmpBuf.at(0) + uncompressed_size; while (t < stop) { int d = int(t[-1]) + int(t[0]) - 128; t[0] = static_cast<unsigned char>(d); ++t; } } // Reorder the pixel data. { const char *t1 = reinterpret_cast<const char *>(&tmpBuf.at(0)); const char *t2 = reinterpret_cast<const char *>(&tmpBuf.at(0)) + (uncompressed_size + 1) / 2; char *s = reinterpret_cast<char *>(dst); char *stop = s + uncompressed_size; for (;;) { if (s < stop) *(s++) = *(t1++); else break; if (s < stop) *(s++) = *(t2++); else break; } } return true; } #if TINYEXR_USE_PIZ #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wc++11-long-long" #pragma clang diagnostic ignored "-Wold-style-cast" #pragma clang diagnostic ignored "-Wpadded" #pragma clang diagnostic ignored "-Wsign-conversion" #pragma clang diagnostic ignored "-Wc++11-extensions" #pragma clang diagnostic ignored "-Wconversion" #pragma clang diagnostic ignored "-Wc++98-compat-pedantic" #if __has_warning("-Wcast-qual") #pragma clang diagnostic ignored "-Wcast-qual" #endif #if __has_warning("-Wextra-semi-stmt") #pragma clang diagnostic ignored "-Wextra-semi-stmt" #endif #endif // // PIZ compress/uncompress, based on OpenEXR's ImfPizCompressor.cpp // // ----------------------------------------------------------------- // Copyright (c) 2004, Industrial Light & Magic, a division of Lucas // Digital Ltd. LLC) // (3 clause BSD license) // struct PIZChannelData { unsigned short *start; unsigned short *end; int nx; int ny; int ys; int size; }; //----------------------------------------------------------------------------- // // 16-bit Haar Wavelet encoding and decoding // // The source code in this file is derived from the encoding // and decoding routines written by Christian Rouet for his // PIZ image file format. // //----------------------------------------------------------------------------- // // Wavelet basis functions without modulo arithmetic; they produce // the best compression ratios when the wavelet-transformed data are // Huffman-encoded, but the wavelet transform works only for 14-bit // data (untransformed data values must be less than (1 << 14)). // inline void wenc14(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { short as = static_cast<short>(a); short bs = static_cast<short>(b); short ms = (as + bs) >> 1; short ds = as - bs; l = static_cast<unsigned short>(ms); h = static_cast<unsigned short>(ds); } inline void wdec14(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { short ls = static_cast<short>(l); short hs = static_cast<short>(h); int hi = hs; int ai = ls + (hi & 1) + (hi >> 1); short as = static_cast<short>(ai); short bs = static_cast<short>(ai - hi); a = static_cast<unsigned short>(as); b = static_cast<unsigned short>(bs); } // // Wavelet basis functions with modulo arithmetic; they work with full // 16-bit data, but Huffman-encoding the wavelet-transformed data doesn't // compress the data quite as well. // const int NBITS = 16; const int A_OFFSET = 1 << (NBITS - 1); const int M_OFFSET = 1 << (NBITS - 1); const int MOD_MASK = (1 << NBITS) - 1; inline void wenc16(unsigned short a, unsigned short b, unsigned short &l, unsigned short &h) { int ao = (a + A_OFFSET) & MOD_MASK; int m = ((ao + b) >> 1); int d = ao - b; if (d < 0) m = (m + M_OFFSET) & MOD_MASK; d &= MOD_MASK; l = static_cast<unsigned short>(m); h = static_cast<unsigned short>(d); } inline void wdec16(unsigned short l, unsigned short h, unsigned short &a, unsigned short &b) { int m = l; int d = h; int bb = (m - (d >> 1)) & MOD_MASK; int aa = (d + bb - A_OFFSET) & MOD_MASK; b = static_cast<unsigned short>(bb); a = static_cast<unsigned short>(aa); } // // 2D Wavelet encoding: // static void wav2Encode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; // == 1 << level int p2 = 2; // == 1 << (level+1) // // Hierachical loop on smaller dimension n // while (p2 <= n) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet encoding // if (w14) { wenc14(*px, *p01, i00, i01); wenc14(*p10, *p11, i10, i11); wenc14(i00, i10, *px, *p10); wenc14(i01, i11, *p01, *p11); } else { wenc16(*px, *p01, i00, i01); wenc16(*p10, *p11, i10, i11); wenc16(i00, i10, *px, *p10); wenc16(i01, i11, *p01, *p11); } } // // Encode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wenc14(*px, *p10, i00, *p10); else wenc16(*px, *p10, i00, *p10); *px = i00; } } // // Encode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wenc14(*px, *p01, i00, *p01); else wenc16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p = p2; p2 <<= 1; } } // // 2D Wavelet decoding: // static void wav2Decode( unsigned short *in, // io: values are transformed in place int nx, // i : x size int ox, // i : x offset int ny, // i : y size int oy, // i : y offset unsigned short mx) // i : maximum in[x][y] value { bool w14 = (mx < (1 << 14)); int n = (nx > ny) ? ny : nx; int p = 1; int p2; // // Search max level // while (p <= n) p <<= 1; p >>= 1; p2 = p; p >>= 1; // // Hierarchical loop on smaller dimension n // while (p >= 1) { unsigned short *py = in; unsigned short *ey = in + oy * (ny - p2); int oy1 = oy * p; int oy2 = oy * p2; int ox1 = ox * p; int ox2 = ox * p2; unsigned short i00, i01, i10, i11; // // Y loop // for (; py <= ey; py += oy2) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); // // X loop // for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; unsigned short *p10 = px + oy1; unsigned short *p11 = p10 + ox1; // // 2D wavelet decoding // if (w14) { wdec14(*px, *p10, i00, i10); wdec14(*p01, *p11, i01, i11); wdec14(i00, i01, *px, *p01); wdec14(i10, i11, *p10, *p11); } else { wdec16(*px, *p10, i00, i10); wdec16(*p01, *p11, i01, i11); wdec16(i00, i01, *px, *p01); wdec16(i10, i11, *p10, *p11); } } // // Decode (1D) odd column (still in Y loop) // if (nx & p) { unsigned short *p10 = px + oy1; if (w14) wdec14(*px, *p10, i00, *p10); else wdec16(*px, *p10, i00, *p10); *px = i00; } } // // Decode (1D) odd line (must loop in X) // if (ny & p) { unsigned short *px = py; unsigned short *ex = py + ox * (nx - p2); for (; px <= ex; px += ox2) { unsigned short *p01 = px + ox1; if (w14) wdec14(*px, *p01, i00, *p01); else wdec16(*px, *p01, i00, *p01); *px = i00; } } // // Next level // p2 = p; p >>= 1; } } //----------------------------------------------------------------------------- // // 16-bit Huffman compression and decompression. // // The source code in this file is derived from the 8-bit // Huffman compression and decompression routines written // by Christian Rouet for his PIZ image file format. // //----------------------------------------------------------------------------- // Adds some modification for tinyexr. const int HUF_ENCBITS = 16; // literal (value) bit length const int HUF_DECBITS = 14; // decoding bit size (>= 8) const int HUF_ENCSIZE = (1 << HUF_ENCBITS) + 1; // encoding table size const int HUF_DECSIZE = 1 << HUF_DECBITS; // decoding table size const int HUF_DECMASK = HUF_DECSIZE - 1; struct HufDec { // short code long code //------------------------------- int len : 8; // code length 0 int lit : 24; // lit p size int *p; // 0 lits }; inline long long hufLength(long long code) { return code & 63; } inline long long hufCode(long long code) { return code >> 6; } inline void outputBits(int nBits, long long bits, long long &c, int &lc, char *&out) { c <<= nBits; lc += nBits; c |= bits; while (lc >= 8) *out++ = static_cast<char>((c >> (lc -= 8))); } inline long long getBits(int nBits, long long &c, int &lc, const char *&in) { while (lc < nBits) { c = (c << 8) | *(reinterpret_cast<const unsigned char *>(in++)); lc += 8; } lc -= nBits; return (c >> lc) & ((1 << nBits) - 1); } // // ENCODING TABLE BUILDING & (UN)PACKING // // // Build a "canonical" Huffman code table: // - for each (uncompressed) symbol, hcode contains the length // of the corresponding code (in the compressed data) // - canonical codes are computed and stored in hcode // - the rules for constructing canonical codes are as follows: // * shorter codes (if filled with zeroes to the right) // have a numerically higher value than longer codes // * for codes with the same length, numerical values // increase with numerical symbol values // - because the canonical code table can be constructed from // symbol lengths alone, the code table can be transmitted // without sending the actual code values // - see http://www.compressconsult.com/huffman/ // static void hufCanonicalCodeTable(long long hcode[HUF_ENCSIZE]) { long long n[59]; // // For each i from 0 through 58, count the // number of different codes of length i, and // store the count in n[i]. // for (int i = 0; i <= 58; ++i) n[i] = 0; for (int i = 0; i < HUF_ENCSIZE; ++i) n[hcode[i]] += 1; // // For each i from 58 through 1, compute the // numerically lowest code with length i, and // store that code in n[i]. // long long c = 0; for (int i = 58; i > 0; --i) { long long nc = ((c + n[i]) >> 1); n[i] = c; c = nc; } // // hcode[i] contains the length, l, of the // code for symbol i. Assign the next available // code of length l to the symbol and store both // l and the code in hcode[i]. // for (int i = 0; i < HUF_ENCSIZE; ++i) { int l = static_cast<int>(hcode[i]); if (l > 0) hcode[i] = l | (n[l]++ << 6); } } // // Compute Huffman codes (based on frq input) and store them in frq: // - code structure is : [63:lsb - 6:msb] | [5-0: bit length]; // - max code length is 58 bits; // - codes outside the range [im-iM] have a null length (unused values); // - original frequencies are destroyed; // - encoding tables are used by hufEncode() and hufBuildDecTable(); // struct FHeapCompare { bool operator()(long long *a, long long *b) { return *a > *b; } }; static void hufBuildEncTable( long long *frq, // io: input frequencies [HUF_ENCSIZE], output table int *im, // o: min frq index int *iM) // o: max frq index { // // This function assumes that when it is called, array frq // indicates the frequency of all possible symbols in the data // that are to be Huffman-encoded. (frq[i] contains the number // of occurrences of symbol i in the data.) // // The loop below does three things: // // 1) Finds the minimum and maximum indices that point // to non-zero entries in frq: // // frq[im] != 0, and frq[i] == 0 for all i < im // frq[iM] != 0, and frq[i] == 0 for all i > iM // // 2) Fills array fHeap with pointers to all non-zero // entries in frq. // // 3) Initializes array hlink such that hlink[i] == i // for all array entries. // std::vector<int> hlink(HUF_ENCSIZE); std::vector<long long *> fHeap(HUF_ENCSIZE); *im = 0; while (!frq[*im]) (*im)++; int nf = 0; for (int i = *im; i < HUF_ENCSIZE; i++) { hlink[i] = i; if (frq[i]) { fHeap[nf] = &frq[i]; nf++; *iM = i; } } // // Add a pseudo-symbol, with a frequency count of 1, to frq; // adjust the fHeap and hlink array accordingly. Function // hufEncode() uses the pseudo-symbol for run-length encoding. // (*iM)++; frq[*iM] = 1; fHeap[nf] = &frq[*iM]; nf++; // // Build an array, scode, such that scode[i] contains the number // of bits assigned to symbol i. Conceptually this is done by // constructing a tree whose leaves are the symbols with non-zero // frequency: // // Make a heap that contains all symbols with a non-zero frequency, // with the least frequent symbol on top. // // Repeat until only one symbol is left on the heap: // // Take the two least frequent symbols off the top of the heap. // Create a new node that has first two nodes as children, and // whose frequency is the sum of the frequencies of the first // two nodes. Put the new node back into the heap. // // The last node left on the heap is the root of the tree. For each // leaf node, the distance between the root and the leaf is the length // of the code for the corresponding symbol. // // The loop below doesn't actually build the tree; instead we compute // the distances of the leaves from the root on the fly. When a new // node is added to the heap, then that node's descendants are linked // into a single linear list that starts at the new node, and the code // lengths of the descendants (that is, their distance from the root // of the tree) are incremented by one. // std::make_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); std::vector<long long> scode(HUF_ENCSIZE); memset(scode.data(), 0, sizeof(long long) * HUF_ENCSIZE); while (nf > 1) { // // Find the indices, mm and m, of the two smallest non-zero frq // values in fHeap, add the smallest frq to the second-smallest // frq, and remove the smallest frq value from fHeap. // int mm = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); --nf; int m = fHeap[0] - frq; std::pop_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); frq[m] += frq[mm]; std::push_heap(&fHeap[0], &fHeap[nf], FHeapCompare()); // // The entries in scode are linked into lists with the // entries in hlink serving as "next" pointers and with // the end of a list marked by hlink[j] == j. // // Traverse the lists that start at scode[m] and scode[mm]. // For each element visited, increment the length of the // corresponding code by one bit. (If we visit scode[j] // during the traversal, then the code for symbol j becomes // one bit longer.) // // Merge the lists that start at scode[m] and scode[mm] // into a single list that starts at scode[m]. // // // Add a bit to all codes in the first list. // for (int j = m;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) { // // Merge the two lists. // hlink[j] = mm; break; } } // // Add a bit to all codes in the second list // for (int j = mm;; j = hlink[j]) { scode[j]++; assert(scode[j] <= 58); if (hlink[j] == j) break; } } // // Build a canonical Huffman code table, replacing the code // lengths in scode with (code, code length) pairs. Copy the // code table from scode into frq. // hufCanonicalCodeTable(scode.data()); memcpy(frq, scode.data(), sizeof(long long) * HUF_ENCSIZE); } // // Pack an encoding table: // - only code lengths, not actual codes, are stored // - runs of zeroes are compressed as follows: // // unpacked packed // -------------------------------- // 1 zero 0 (6 bits) // 2 zeroes 59 // 3 zeroes 60 // 4 zeroes 61 // 5 zeroes 62 // n zeroes (6 or more) 63 n-6 (6 + 8 bits) // const int SHORT_ZEROCODE_RUN = 59; const int LONG_ZEROCODE_RUN = 63; const int SHORTEST_LONG_RUN = 2 + LONG_ZEROCODE_RUN - SHORT_ZEROCODE_RUN; const int LONGEST_LONG_RUN = 255 + SHORTEST_LONG_RUN; static void hufPackEncTable( const long long *hcode, // i : encoding table [HUF_ENCSIZE] int im, // i : min hcode index int iM, // i : max hcode index char **pcode) // o: ptr to packed table (updated) { char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { int l = hufLength(hcode[im]); if (l == 0) { int zerun = 1; while ((im < iM) && (zerun < LONGEST_LONG_RUN)) { if (hufLength(hcode[im + 1]) > 0) break; im++; zerun++; } if (zerun >= 2) { if (zerun >= SHORTEST_LONG_RUN) { outputBits(6, LONG_ZEROCODE_RUN, c, lc, p); outputBits(8, zerun - SHORTEST_LONG_RUN, c, lc, p); } else { outputBits(6, SHORT_ZEROCODE_RUN + zerun - 2, c, lc, p); } continue; } } outputBits(6, l, c, lc, p); } if (lc > 0) *p++ = (unsigned char)(c << (8 - lc)); *pcode = p; } // // Unpack an encoding table packed by hufPackEncTable(): // static bool hufUnpackEncTable( const char **pcode, // io: ptr to packed table (updated) int ni, // i : input size (in bytes) int im, // i : min hcode index int iM, // i : max hcode index long long *hcode) // o: encoding table [HUF_ENCSIZE] { memset(hcode, 0, sizeof(long long) * HUF_ENCSIZE); const char *p = *pcode; long long c = 0; int lc = 0; for (; im <= iM; im++) { if (p - *pcode >= ni) { return false; } long long l = hcode[im] = getBits(6, c, lc, p); // code length if (l == (long long)LONG_ZEROCODE_RUN) { if (p - *pcode > ni) { return false; } int zerun = getBits(8, c, lc, p) + SHORTEST_LONG_RUN; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } else if (l >= (long long)SHORT_ZEROCODE_RUN) { int zerun = l - SHORT_ZEROCODE_RUN + 2; if (im + zerun > iM + 1) { return false; } while (zerun--) hcode[im++] = 0; im--; } } *pcode = const_cast<char *>(p); hufCanonicalCodeTable(hcode); return true; } // // DECODING TABLE BUILDING // // // Clear a newly allocated decoding table so that it contains only zeroes. // static void hufClearDecTable(HufDec *hdecod) // io: (allocated by caller) // decoding table [HUF_DECSIZE] { for (int i = 0; i < HUF_DECSIZE; i++) { hdecod[i].len = 0; hdecod[i].lit = 0; hdecod[i].p = NULL; } // memset(hdecod, 0, sizeof(HufDec) * HUF_DECSIZE); } // // Build a decoding hash table based on the encoding table hcode: // - short codes (<= HUF_DECBITS) are resolved with a single table access; // - long code entry allocations are not optimized, because long codes are // unfrequent; // - decoding tables are used by hufDecode(); // static bool hufBuildDecTable(const long long *hcode, // i : encoding table int im, // i : min index in hcode int iM, // i : max index in hcode HufDec *hdecod) // o: (allocated by caller) // decoding table [HUF_DECSIZE] { // // Init hashtable & loop on all codes. // Assumes that hufClearDecTable(hdecod) has already been called. // for (; im <= iM; im++) { long long c = hufCode(hcode[im]); int l = hufLength(hcode[im]); if (c >> l) { // // Error: c is supposed to be an l-bit code, // but c contains a value that is greater // than the largest l-bit number. // // invalidTableEntry(); return false; } if (l > HUF_DECBITS) { // // Long code: add a secondary entry // HufDec *pl = hdecod + (c >> (l - HUF_DECBITS)); if (pl->len) { // // Error: a short code has already // been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->lit++; if (pl->p) { int *p = pl->p; pl->p = new int[pl->lit]; for (int i = 0; i < pl->lit - 1; ++i) pl->p[i] = p[i]; delete[] p; } else { pl->p = new int[1]; } pl->p[pl->lit - 1] = im; } else if (l) { // // Short code: init all primary entries // HufDec *pl = hdecod + (c << (HUF_DECBITS - l)); for (long long i = 1ULL << (HUF_DECBITS - l); i > 0; i--, pl++) { if (pl->len || pl->p) { // // Error: a short code or a long code has // already been stored in table entry *pl. // // invalidTableEntry(); return false; } pl->len = l; pl->lit = im; } } } return true; } // // Free the long code entries of a decoding table built by hufBuildDecTable() // static void hufFreeDecTable(HufDec *hdecod) // io: Decoding table { for (int i = 0; i < HUF_DECSIZE; i++) { if (hdecod[i].p) { delete[] hdecod[i].p; hdecod[i].p = 0; } } } // // ENCODING // inline void outputCode(long long code, long long &c, int &lc, char *&out) { outputBits(hufLength(code), hufCode(code), c, lc, out); } inline void sendCode(long long sCode, int runCount, long long runCode, long long &c, int &lc, char *&out) { // // Output a run of runCount instances of the symbol sCount. // Output the symbols explicitly, or if that is shorter, output // the sCode symbol once followed by a runCode symbol and runCount // expressed as an 8-bit number. // if (hufLength(sCode) + hufLength(runCode) + 8 < hufLength(sCode) * runCount) { outputCode(sCode, c, lc, out); outputCode(runCode, c, lc, out); outputBits(8, runCount, c, lc, out); } else { while (runCount-- >= 0) outputCode(sCode, c, lc, out); } } // // Encode (compress) ni values based on the Huffman encoding table hcode: // static int hufEncode // return: output size (in bits) (const long long *hcode, // i : encoding table const unsigned short *in, // i : uncompressed input buffer const int ni, // i : input buffer size (in bytes) int rlc, // i : rl code char *out) // o: compressed output buffer { char *outStart = out; long long c = 0; // bits not yet written to out int lc = 0; // number of valid bits in c (LSB) int s = in[0]; int cs = 0; // // Loop on input values // for (int i = 1; i < ni; i++) { // // Count same values or send code // if (s == in[i] && cs < 255) { cs++; } else { sendCode(hcode[s], cs, hcode[rlc], c, lc, out); cs = 0; } s = in[i]; } // // Send remaining code // sendCode(hcode[s], cs, hcode[rlc], c, lc, out); if (lc) *out = (c << (8 - lc)) & 0xff; return (out - outStart) * 8 + lc; } // // DECODING // // // In order to force the compiler to inline them, // getChar() and getCode() are implemented as macros // instead of "inline" functions. // #define getChar(c, lc, in) \ { \ c = (c << 8) | *(unsigned char *)(in++); \ lc += 8; \ } #if 0 #define getCode(po, rlc, c, lc, in, out, ob, oe) \ { \ if (po == rlc) { \ if (lc < 8) getChar(c, lc, in); \ \ lc -= 8; \ \ unsigned char cs = (c >> lc); \ \ if (out + cs > oe) return false; \ \ /* TinyEXR issue 78 */ \ unsigned short s = out[-1]; \ \ while (cs-- > 0) *out++ = s; \ } else if (out < oe) { \ *out++ = po; \ } else { \ return false; \ } \ } #else static bool getCode(int po, int rlc, long long &c, int &lc, const char *&in, const char *in_end, unsigned short *&out, const unsigned short *ob, const unsigned short *oe) { (void)ob; if (po == rlc) { if (lc < 8) { /* TinyEXR issue 78 */ if ((in + 1) >= in_end) { return false; } getChar(c, lc, in); } lc -= 8; unsigned char cs = (c >> lc); if (out + cs > oe) return false; // Bounds check for safety // Issue 100. if ((out - 1) < ob) return false; unsigned short s = out[-1]; while (cs-- > 0) *out++ = s; } else if (out < oe) { *out++ = po; } else { return false; } return true; } #endif // // Decode (uncompress) ni bits based on encoding & decoding tables: // static bool hufDecode(const long long *hcode, // i : encoding table const HufDec *hdecod, // i : decoding table const char *in, // i : compressed input buffer int ni, // i : input size (in bits) int rlc, // i : run-length code int no, // i : expected output size (in bytes) unsigned short *out) // o: uncompressed output buffer { long long c = 0; int lc = 0; unsigned short *outb = out; // begin unsigned short *oe = out + no; // end const char *ie = in + (ni + 7) / 8; // input byte size // // Loop on input bytes // while (in < ie) { getChar(c, lc, in); // // Access decoding table // while (lc >= HUF_DECBITS) { const HufDec pl = hdecod[(c >> (lc - HUF_DECBITS)) & HUF_DECMASK]; if (pl.len) { // // Get short code // lc -= pl.len; // std::cout << "lit = " << pl.lit << std::endl; // std::cout << "rlc = " << rlc << std::endl; // std::cout << "c = " << c << std::endl; // std::cout << "lc = " << lc << std::endl; // std::cout << "in = " << in << std::endl; // std::cout << "out = " << out << std::endl; // std::cout << "oe = " << oe << std::endl; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { if (!pl.p) { return false; } // invalidCode(); // wrong code // // Search long code // int j; for (j = 0; j < pl.lit; j++) { int l = hufLength(hcode[pl.p[j]]); while (lc < l && in < ie) // get more bits getChar(c, lc, in); if (lc >= l) { if (hufCode(hcode[pl.p[j]]) == ((c >> (lc - l)) & (((long long)(1) << l) - 1))) { // // Found : get long code // lc -= l; if (!getCode(pl.p[j], rlc, c, lc, in, ie, out, outb, oe)) { return false; } break; } } } if (j == pl.lit) { return false; // invalidCode(); // Not found } } } } // // Get remaining (short) codes // int i = (8 - ni) & 7; c >>= i; lc -= i; while (lc > 0) { const HufDec pl = hdecod[(c << (HUF_DECBITS - lc)) & HUF_DECMASK]; if (pl.len) { lc -= pl.len; if (!getCode(pl.lit, rlc, c, lc, in, ie, out, outb, oe)) { return false; } } else { return false; // invalidCode(); // wrong (long) code } } if (out - outb != no) { return false; } // notEnoughData (); return true; } static void countFrequencies(std::vector<long long> &freq, const unsigned short data[/*n*/], int n) { for (int i = 0; i < HUF_ENCSIZE; ++i) freq[i] = 0; for (int i = 0; i < n; ++i) ++freq[data[i]]; } static void writeUInt(char buf[4], unsigned int i) { unsigned char *b = (unsigned char *)buf; b[0] = i; b[1] = i >> 8; b[2] = i >> 16; b[3] = i >> 24; } static unsigned int readUInt(const char buf[4]) { const unsigned char *b = (const unsigned char *)buf; return (b[0] & 0x000000ff) | ((b[1] << 8) & 0x0000ff00) | ((b[2] << 16) & 0x00ff0000) | ((b[3] << 24) & 0xff000000); } // // EXTERNAL INTERFACE // static int hufCompress(const unsigned short raw[], int nRaw, char compressed[]) { if (nRaw == 0) return 0; std::vector<long long> freq(HUF_ENCSIZE); countFrequencies(freq, raw, nRaw); int im = 0; int iM = 0; hufBuildEncTable(freq.data(), &im, &iM); char *tableStart = compressed + 20; char *tableEnd = tableStart; hufPackEncTable(freq.data(), im, iM, &tableEnd); int tableLength = tableEnd - tableStart; char *dataStart = tableEnd; int nBits = hufEncode(freq.data(), raw, nRaw, iM, dataStart); int data_length = (nBits + 7) / 8; writeUInt(compressed, im); writeUInt(compressed + 4, iM); writeUInt(compressed + 8, tableLength); writeUInt(compressed + 12, nBits); writeUInt(compressed + 16, 0); // room for future extensions return dataStart + data_length - compressed; } static bool hufUncompress(const char compressed[], int nCompressed, std::vector<unsigned short> *raw) { if (nCompressed == 0) { if (raw->size() != 0) return false; return false; } int im = readUInt(compressed); int iM = readUInt(compressed + 4); // int tableLength = readUInt (compressed + 8); int nBits = readUInt(compressed + 12); if (im < 0 || im >= HUF_ENCSIZE || iM < 0 || iM >= HUF_ENCSIZE) return false; const char *ptr = compressed + 20; // // Fast decoder needs at least 2x64-bits of compressed data, and // needs to be run-able on this platform. Otherwise, fall back // to the original decoder // // if (FastHufDecoder::enabled() && nBits > 128) //{ // FastHufDecoder fhd (ptr, nCompressed - (ptr - compressed), im, iM, iM); // fhd.decode ((unsigned char*)ptr, nBits, raw, nRaw); //} // else { std::vector<long long> freq(HUF_ENCSIZE); std::vector<HufDec> hdec(HUF_DECSIZE); hufClearDecTable(&hdec.at(0)); hufUnpackEncTable(&ptr, nCompressed - (ptr - compressed), im, iM, &freq.at(0)); { if (nBits > 8 * (nCompressed - (ptr - compressed))) { return false; } hufBuildDecTable(&freq.at(0), im, iM, &hdec.at(0)); hufDecode(&freq.at(0), &hdec.at(0), ptr, nBits, iM, raw->size(), raw->data()); } // catch (...) //{ // hufFreeDecTable (hdec); // throw; //} hufFreeDecTable(&hdec.at(0)); } return true; } // // Functions to compress the range of values in the pixel data // const int USHORT_RANGE = (1 << 16); const int BITMAP_SIZE = (USHORT_RANGE >> 3); static void bitmapFromData(const unsigned short data[/*nData*/], int nData, unsigned char bitmap[BITMAP_SIZE], unsigned short &minNonZero, unsigned short &maxNonZero) { for (int i = 0; i < BITMAP_SIZE; ++i) bitmap[i] = 0; for (int i = 0; i < nData; ++i) bitmap[data[i] >> 3] |= (1 << (data[i] & 7)); bitmap[0] &= ~1; // zero is not explicitly stored in // the bitmap; we assume that the // data always contain zeroes minNonZero = BITMAP_SIZE - 1; maxNonZero = 0; for (int i = 0; i < BITMAP_SIZE; ++i) { if (bitmap[i]) { if (minNonZero > i) minNonZero = i; if (maxNonZero < i) maxNonZero = i; } } } static unsigned short forwardLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[i] = k++; else lut[i] = 0; } return k - 1; // maximum value stored in lut[], } // i.e. number of ones in bitmap minus 1 static unsigned short reverseLutFromBitmap( const unsigned char bitmap[BITMAP_SIZE], unsigned short lut[USHORT_RANGE]) { int k = 0; for (int i = 0; i < USHORT_RANGE; ++i) { if ((i == 0) || (bitmap[i >> 3] & (1 << (i & 7)))) lut[k++] = i; } int n = k - 1; while (k < USHORT_RANGE) lut[k++] = 0; return n; // maximum k where lut[k] is non-zero, } // i.e. number of ones in bitmap minus 1 static void applyLut(const unsigned short lut[USHORT_RANGE], unsigned short data[/*nData*/], int nData) { for (int i = 0; i < nData; ++i) data[i] = lut[data[i]]; } #ifdef __clang__ #pragma clang diagnostic pop #endif // __clang__ #ifdef _MSC_VER #pragma warning(pop) #endif static bool CompressPiz(unsigned char *outPtr, unsigned int *outSize, const unsigned char *inPtr, size_t inSize, const std::vector<ChannelInfo> &channelInfo, int data_width, int num_lines) { std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif // Assume `inSize` is multiple of 2 or 4. std::vector<unsigned short> tmpBuffer(inSize / sizeof(unsigned short)); std::vector<PIZChannelData> channelData(channelInfo.size()); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t c = 0; c < channelData.size(); c++) { PIZChannelData &cd = channelData[c]; cd.start = tmpBufferEnd; cd.end = cd.start; cd.nx = data_width; cd.ny = num_lines; // cd.ys = c.channel().ySampling; size_t pixelSize = sizeof(int); // UINT and FLOAT if (channelInfo[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } cd.size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += cd.nx * cd.ny * cd.size; } const unsigned char *ptr = inPtr; for (int y = 0; y < num_lines; ++y) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(cd.end, ptr, n * sizeof(unsigned short)); ptr += n * sizeof(unsigned short); cd.end += n; } } bitmapFromData(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), bitmap.data(), minNonZero, maxNonZero); std::vector<unsigned short> lut(USHORT_RANGE); unsigned short maxValue = forwardLutFromBitmap(bitmap.data(), lut.data()); applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBuffer.size())); // // Store range compression info in _outBuffer // char *buf = reinterpret_cast<char *>(outPtr); memcpy(buf, &minNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); memcpy(buf, &maxNonZero, sizeof(unsigned short)); buf += sizeof(unsigned short); if (minNonZero <= maxNonZero) { memcpy(buf, reinterpret_cast<char *>(&bitmap[0] + minNonZero), maxNonZero - minNonZero + 1); buf += maxNonZero - minNonZero + 1; } // // Apply wavelet encoding // for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Encode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Apply Huffman encoding; append the result to _outBuffer // // length header(4byte), then huff data. Initialize length header with zero, // then later fill it by `length`. char *lengthPtr = buf; int zero = 0; memcpy(buf, &zero, sizeof(int)); buf += sizeof(int); int length = hufCompress(&tmpBuffer.at(0), static_cast<int>(tmpBuffer.size()), buf); memcpy(lengthPtr, &length, sizeof(int)); (*outSize) = static_cast<unsigned int>( (reinterpret_cast<unsigned char *>(buf) - outPtr) + static_cast<unsigned int>(length)); // Use uncompressed data when compressed data is larger than uncompressed. // (Issue 40) if ((*outSize) >= inSize) { (*outSize) = static_cast<unsigned int>(inSize); memcpy(outPtr, inPtr, inSize); } return true; } static bool DecompressPiz(unsigned char *outPtr, const unsigned char *inPtr, size_t tmpBufSize, size_t inLen, int num_channels, const EXRChannelInfo *channels, int data_width, int num_lines) { if (inLen == tmpBufSize) { // Data is not compressed(Issue 40). memcpy(outPtr, inPtr, inLen); return true; } std::vector<unsigned char> bitmap(BITMAP_SIZE); unsigned short minNonZero; unsigned short maxNonZero; #if !MINIZ_LITTLE_ENDIAN // @todo { PIZ compression on BigEndian architecture. } assert(0); return false; #endif memset(bitmap.data(), 0, BITMAP_SIZE); const unsigned char *ptr = inPtr; // minNonZero = *(reinterpret_cast<const unsigned short *>(ptr)); tinyexr::cpy2(&minNonZero, reinterpret_cast<const unsigned short *>(ptr)); // maxNonZero = *(reinterpret_cast<const unsigned short *>(ptr + 2)); tinyexr::cpy2(&maxNonZero, reinterpret_cast<const unsigned short *>(ptr + 2)); ptr += 4; if (maxNonZero >= BITMAP_SIZE) { return false; } if (minNonZero <= maxNonZero) { memcpy(reinterpret_cast<char *>(&bitmap[0] + minNonZero), ptr, maxNonZero - minNonZero + 1); ptr += maxNonZero - minNonZero + 1; } std::vector<unsigned short> lut(USHORT_RANGE); memset(lut.data(), 0, sizeof(unsigned short) * USHORT_RANGE); unsigned short maxValue = reverseLutFromBitmap(bitmap.data(), lut.data()); // // Huffman decoding // int length; // length = *(reinterpret_cast<const int *>(ptr)); tinyexr::cpy4(&length, reinterpret_cast<const int *>(ptr)); ptr += sizeof(int); if (size_t((ptr - inPtr) + length) > inLen) { return false; } std::vector<unsigned short> tmpBuffer(tmpBufSize); hufUncompress(reinterpret_cast<const char *>(ptr), length, &tmpBuffer); // // Wavelet decoding // std::vector<PIZChannelData> channelData(static_cast<size_t>(num_channels)); unsigned short *tmpBufferEnd = &tmpBuffer.at(0); for (size_t i = 0; i < static_cast<size_t>(num_channels); ++i) { const EXRChannelInfo &chan = channels[i]; size_t pixelSize = sizeof(int); // UINT and FLOAT if (chan.pixel_type == TINYEXR_PIXELTYPE_HALF) { pixelSize = sizeof(short); } channelData[i].start = tmpBufferEnd; channelData[i].end = channelData[i].start; channelData[i].nx = data_width; channelData[i].ny = num_lines; // channelData[i].ys = 1; channelData[i].size = static_cast<int>(pixelSize / sizeof(short)); tmpBufferEnd += channelData[i].nx * channelData[i].ny * channelData[i].size; } for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; for (int j = 0; j < cd.size; ++j) { wav2Decode(cd.start + j, cd.nx, cd.size, cd.ny, cd.nx * cd.size, maxValue); } } // // Expand the pixel data to their original range // applyLut(lut.data(), &tmpBuffer.at(0), static_cast<int>(tmpBufSize)); for (int y = 0; y < num_lines; y++) { for (size_t i = 0; i < channelData.size(); ++i) { PIZChannelData &cd = channelData[i]; // if (modp (y, cd.ys) != 0) // continue; size_t n = static_cast<size_t>(cd.nx * cd.size); memcpy(outPtr, cd.end, static_cast<size_t>(n * sizeof(unsigned short))); outPtr += n * sizeof(unsigned short); cd.end += n; } } return true; } #endif // TINYEXR_USE_PIZ #if TINYEXR_USE_ZFP struct ZFPCompressionParam { double rate; int precision; double tolerance; int type; // TINYEXR_ZFP_COMPRESSIONTYPE_* ZFPCompressionParam() { type = TINYEXR_ZFP_COMPRESSIONTYPE_RATE; rate = 2.0; precision = 0; tolerance = 0.0f; } }; bool FindZFPCompressionParam(ZFPCompressionParam *param, const EXRAttribute *attributes, int num_attributes) { bool foundType = false; for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionType") == 0) && (attributes[i].size == 1)) { param->type = static_cast<int>(attributes[i].value[0]); foundType = true; } } if (!foundType) { return false; } if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionRate") == 0) && (attributes[i].size == 8)) { param->rate = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionPrecision") == 0) && (attributes[i].size == 4)) { param->rate = *(reinterpret_cast<int *>(attributes[i].value)); return true; } } } else if (param->type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { for (int i = 0; i < num_attributes; i++) { if ((strcmp(attributes[i].name, "zfpCompressionTolerance") == 0) && (attributes[i].size == 8)) { param->tolerance = *(reinterpret_cast<double *>(attributes[i].value)); return true; } } } else { assert(0); } return false; } // Assume pixel format is FLOAT for all channels. static bool DecompressZfp(float *dst, int dst_width, int dst_num_lines, int num_channels, const unsigned char *src, unsigned long src_size, const ZFPCompressionParam &param) { size_t uncompressed_size = dst_width * dst_num_lines * num_channels; if (uncompressed_size == src_size) { // Data is not compressed(Issue 40). memcpy(dst, src, src_size); } zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((dst_width % 4) == 0); assert((dst_num_lines % 4) == 0); if ((dst_width & 3U) || (dst_num_lines & 3U)) { return false; } field = zfp_field_2d(reinterpret_cast<void *>(const_cast<unsigned char *>(src)), zfp_type_float, dst_width, dst_num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, /* dimention */ 2, /* write random access */ 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); std::vector<unsigned char> buf(buf_size); memcpy(&buf.at(0), src, src_size); bitstream *stream = stream_open(&buf.at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_stream_rewind(zfp); size_t image_size = dst_width * dst_num_lines; for (int c = 0; c < num_channels; c++) { // decompress 4x4 pixel block. for (int y = 0; y < dst_num_lines; y += 4) { for (int x = 0; x < dst_width; x += 4) { float fblock[16]; zfp_decode_block_float_2(zfp, fblock); for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { dst[c * image_size + ((y + j) * dst_width + (x + i))] = fblock[j * 4 + i]; } } } } } zfp_field_free(field); zfp_stream_close(zfp); stream_close(stream); return true; } // Assume pixel format is FLOAT for all channels. bool CompressZfp(std::vector<unsigned char> *outBuf, unsigned int *outSize, const float *inPtr, int width, int num_lines, int num_channels, const ZFPCompressionParam &param) { zfp_stream *zfp = NULL; zfp_field *field = NULL; assert((width % 4) == 0); assert((num_lines % 4) == 0); if ((width & 3U) || (num_lines & 3U)) { return false; } // create input array. field = zfp_field_2d(reinterpret_cast<void *>(const_cast<float *>(inPtr)), zfp_type_float, width, num_lines * num_channels); zfp = zfp_stream_open(NULL); if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_RATE) { zfp_stream_set_rate(zfp, param.rate, zfp_type_float, 2, 0); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_PRECISION) { zfp_stream_set_precision(zfp, param.precision, zfp_type_float); } else if (param.type == TINYEXR_ZFP_COMPRESSIONTYPE_ACCURACY) { zfp_stream_set_accuracy(zfp, param.tolerance, zfp_type_float); } else { assert(0); } size_t buf_size = zfp_stream_maximum_size(zfp, field); outBuf->resize(buf_size); bitstream *stream = stream_open(&outBuf->at(0), buf_size); zfp_stream_set_bit_stream(zfp, stream); zfp_field_free(field); size_t image_size = width * num_lines; for (int c = 0; c < num_channels; c++) { // compress 4x4 pixel block. for (int y = 0; y < num_lines; y += 4) { for (int x = 0; x < width; x += 4) { float fblock[16]; for (int j = 0; j < 4; j++) { for (int i = 0; i < 4; i++) { fblock[j * 4 + i] = inPtr[c * image_size + ((y + j) * width + (x + i))]; } } zfp_encode_block_float_2(zfp, fblock); } } } zfp_stream_flush(zfp); (*outSize) = zfp_stream_compressed_size(zfp); zfp_stream_close(zfp); return true; } #endif // // ----------------------------------------------------------------- // // TODO(syoyo): Refactor function arguments. static bool DecodePixelData(/* out */ unsigned char **out_images, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int width, int height, int x_stride, int y, int line_no, int num_lines, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { if (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { // PIZ #if TINYEXR_USE_PIZ if ((width == 0) || (num_lines == 0) || (pixel_data_size == 0)) { // Invalid input #90 return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>( static_cast<size_t>(width * num_lines) * pixel_data_size)); size_t tmpBufLen = outBuf.size(); bool ret = tinyexr::DecompressPiz( reinterpret_cast<unsigned char *>(&outBuf.at(0)), data_ptr, tmpBufLen, data_len, static_cast<int>(num_channels), channels, width, num_lines); if (!ret) { return false; } // For PIZ_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { FP16 hf; // hf.u = line_ptr[u]; // use `cpy` to avoid unaligned memory access when compiler's // optimization is on. tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>(&outBuf.at( v * pixel_data_size * static_cast<size_t>(x_stride) + channel_offset_list[c] * static_cast<size_t>(x_stride))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += static_cast<size_t>( (height - 1 - (line_no + static_cast<int>(v)))) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); } } #else assert(0 && "PIZ is enabled in this build"); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS || compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); assert(dstLen > 0); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&outBuf.at(0)), &dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For ZIP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; size_t offset = 0; if (line_order == 0) { offset = (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { offset = (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } image += offset; *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = static_cast<unsigned long>(outBuf.size()); if (dstLen == 0) { return false; } if (!tinyexr::DecompressRle( reinterpret_cast<unsigned char *>(&outBuf.at(0)), dstLen, data_ptr, static_cast<unsigned long>(data_len))) { return false; } // For RLE_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &outBuf.at(v * static_cast<size_t>(pixel_data_size) * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *image = reinterpret_cast<unsigned short **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = hf.u; } else { // HALF -> FLOAT tinyexr::FP32 f32 = half_to_float(hf); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = f32.f; } } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const unsigned int *line_ptr = reinterpret_cast<unsigned int *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { unsigned int val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(&val); unsigned int *image = reinterpret_cast<unsigned int **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; // val = line_ptr[u]; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } } else if (compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; if (!FindZFPCompressionParam(&zfp_compression_param, attributes, num_attributes)) { assert(0); return false; } // Allocate original data size. std::vector<unsigned char> outBuf(static_cast<size_t>(width) * static_cast<size_t>(num_lines) * pixel_data_size); unsigned long dstLen = outBuf.size(); assert(dstLen > 0); tinyexr::DecompressZfp(reinterpret_cast<float *>(&outBuf.at(0)), width, num_lines, num_channels, data_ptr, static_cast<unsigned long>(data_len), zfp_compression_param); // For ZFP_COMPRESSION: // pixel sample data for channel 0 for scanline 0 // pixel sample data for channel 1 for scanline 0 // pixel sample data for channel ... for scanline 0 // pixel sample data for channel n for scanline 0 // pixel sample data for channel 0 for scanline 1 // pixel sample data for channel 1 for scanline 1 // pixel sample data for channel ... for scanline 1 // pixel sample data for channel n for scanline 1 // ... for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { assert(channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { assert(requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT); for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { const float *line_ptr = reinterpret_cast<float *>( &outBuf.at(v * pixel_data_size * static_cast<size_t>(width) + channel_offset_list[c] * static_cast<size_t>(width))); for (size_t u = 0; u < static_cast<size_t>(width); u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); float *image = reinterpret_cast<float **>(out_images)[c]; if (line_order == 0) { image += (static_cast<size_t>(line_no) + v) * static_cast<size_t>(x_stride) + u; } else { image += (static_cast<size_t>(height) - 1U - (static_cast<size_t>(line_no) + v)) * static_cast<size_t>(x_stride) + u; } *image = val; } } } else { assert(0); return false; } } #else (void)attributes; (void)num_attributes; (void)num_channels; assert(0); return false; #endif } else if (compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { for (size_t c = 0; c < num_channels; c++) { for (size_t v = 0; v < static_cast<size_t>(num_lines); v++) { if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { const unsigned short *line_ptr = reinterpret_cast<const unsigned short *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { unsigned short *outLine = reinterpret_cast<unsigned short *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); outLine[u] = hf.u; } } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { tinyexr::FP16 hf; // address may not be aliged. use byte-wise copy for safety.#76 // hf.u = line_ptr[u]; tinyexr::cpy2(&(hf.u), line_ptr + u); tinyexr::swap2(reinterpret_cast<unsigned short *>(&hf.u)); tinyexr::FP32 f32 = half_to_float(hf); outLine[u] = f32.f; } } else { assert(0); return false; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { const float *line_ptr = reinterpret_cast<const float *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); float *outLine = reinterpret_cast<float *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } if (reinterpret_cast<const unsigned char *>(line_ptr + width) > (data_ptr + data_len)) { // Insufficient data size return false; } for (int u = 0; u < width; u++) { float val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { const unsigned int *line_ptr = reinterpret_cast<const unsigned int *>( data_ptr + v * pixel_data_size * size_t(width) + channel_offset_list[c] * static_cast<size_t>(width)); unsigned int *outLine = reinterpret_cast<unsigned int *>(out_images[c]); if (line_order == 0) { outLine += (size_t(y) + v) * size_t(x_stride); } else { outLine += (size_t(height) - 1 - (size_t(y) + v)) * size_t(x_stride); } for (int u = 0; u < width; u++) { if (reinterpret_cast<const unsigned char *>(line_ptr + u) >= (data_ptr + data_len)) { // Corrupsed data? return false; } unsigned int val; tinyexr::cpy4(&val, line_ptr + u); tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); outLine[u] = val; } } } } } return true; } static bool DecodeTiledPixelData( unsigned char **out_images, int *width, int *height, const int *requested_pixel_types, const unsigned char *data_ptr, size_t data_len, int compression_type, int line_order, int data_width, int data_height, int tile_offset_x, int tile_offset_y, int tile_size_x, int tile_size_y, size_t pixel_data_size, size_t num_attributes, const EXRAttribute *attributes, size_t num_channels, const EXRChannelInfo *channels, const std::vector<size_t> &channel_offset_list) { assert(tile_offset_x * tile_size_x < data_width); assert(tile_offset_y * tile_size_y < data_height); // Compute actual image size in a tile. if ((tile_offset_x + 1) * tile_size_x >= data_width) { (*width) = data_width - (tile_offset_x * tile_size_x); } else { (*width) = tile_size_x; } if ((tile_offset_y + 1) * tile_size_y >= data_height) { (*height) = data_height - (tile_offset_y * tile_size_y); } else { (*height) = tile_size_y; } // Image size = tile size. return DecodePixelData(out_images, requested_pixel_types, data_ptr, data_len, compression_type, line_order, (*width), tile_size_y, /* stride */ tile_size_x, /* y */ 0, /* line_no */ 0, (*height), pixel_data_size, num_attributes, attributes, num_channels, channels, channel_offset_list); } static bool ComputeChannelLayout(std::vector<size_t> *channel_offset_list, int *pixel_data_size, size_t *channel_offset, int num_channels, const EXRChannelInfo *channels) { channel_offset_list->resize(static_cast<size_t>(num_channels)); (*pixel_data_size) = 0; (*channel_offset) = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { (*channel_offset_list)[c] = (*channel_offset); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { (*pixel_data_size) += sizeof(unsigned short); (*channel_offset) += sizeof(unsigned short); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { (*pixel_data_size) += sizeof(float); (*channel_offset) += sizeof(float); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { (*pixel_data_size) += sizeof(unsigned int); (*channel_offset) += sizeof(unsigned int); } else { // ??? return false; } } return true; } static unsigned char **AllocateImage(int num_channels, const EXRChannelInfo *channels, const int *requested_pixel_types, int data_width, int data_height) { unsigned char **images = reinterpret_cast<unsigned char **>(static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(num_channels)))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { size_t data_len = static_cast<size_t>(data_width) * static_cast<size_t>(data_height); if (channels[c].pixel_type == TINYEXR_PIXELTYPE_HALF) { // pixel_data_size += sizeof(unsigned short); // channel_offset += sizeof(unsigned short); // Alloc internal image for half type. if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { images[c] = reinterpret_cast<unsigned char *>(static_cast<unsigned short *>( malloc(sizeof(unsigned short) * data_len))); } else if (requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else { assert(0); } } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // pixel_data_size += sizeof(float); // channel_offset += sizeof(float); images[c] = reinterpret_cast<unsigned char *>( static_cast<float *>(malloc(sizeof(float) * data_len))); } else if (channels[c].pixel_type == TINYEXR_PIXELTYPE_UINT) { // pixel_data_size += sizeof(unsigned int); // channel_offset += sizeof(unsigned int); images[c] = reinterpret_cast<unsigned char *>( static_cast<unsigned int *>(malloc(sizeof(unsigned int) * data_len))); } else { assert(0); } } return images; } static int ParseEXRHeader(HeaderInfo *info, bool *empty_header, const EXRVersion *version, std::string *err, const unsigned char *buf, size_t size) { const char *marker = reinterpret_cast<const char *>(&buf[0]); if (empty_header) { (*empty_header) = false; } if (version->multipart) { if (size > 0 && marker[0] == '\0') { // End of header list. if (empty_header) { (*empty_header) = true; } return TINYEXR_SUCCESS; } } // According to the spec, the header of every OpenEXR file must contain at // least the following attributes: // // channels chlist // compression compression // dataWindow box2i // displayWindow box2i // lineOrder lineOrder // pixelAspectRatio float // screenWindowCenter v2f // screenWindowWidth float bool has_channels = false; bool has_compression = false; bool has_data_window = false; bool has_display_window = false; bool has_line_order = false; bool has_pixel_aspect_ratio = false; bool has_screen_window_center = false; bool has_screen_window_width = false; info->data_window[0] = 0; info->data_window[1] = 0; info->data_window[2] = 0; info->data_window[3] = 0; info->line_order = 0; // @fixme info->display_window[0] = 0; info->display_window[1] = 0; info->display_window[2] = 0; info->display_window[3] = 0; info->screen_window_center[0] = 0.0f; info->screen_window_center[1] = 0.0f; info->screen_window_width = -1.0f; info->pixel_aspect_ratio = -1.0f; info->tile_size_x = -1; info->tile_size_y = -1; info->tile_level_mode = -1; info->tile_rounding_mode = -1; info->attributes.clear(); // Read attributes size_t orig_size = size; for (size_t nattr = 0; nattr < TINYEXR_MAX_HEADER_ATTRIBUTES; nattr++) { if (0 == size) { if (err) { (*err) += "Insufficient data size for attributes.\n"; } return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { if (err) { (*err) += "Failed to read attribute.\n"; } return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (version->tiled && attr_name.compare("tiles") == 0) { unsigned int x_size, y_size; unsigned char tile_mode; assert(data.size() == 9); memcpy(&x_size, &data.at(0), sizeof(int)); memcpy(&y_size, &data.at(4), sizeof(int)); tile_mode = data[8]; tinyexr::swap4(&x_size); tinyexr::swap4(&y_size); info->tile_size_x = static_cast<int>(x_size); info->tile_size_y = static_cast<int>(y_size); // mode = levelMode + roundingMode * 16 info->tile_level_mode = tile_mode & 0x3; info->tile_rounding_mode = (tile_mode >> 4) & 0x1; } else if (attr_name.compare("compression") == 0) { bool ok = false; if (data[0] < TINYEXR_COMPRESSIONTYPE_PIZ) { ok = true; } if (data[0] == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ ok = true; #else if (err) { (*err) = "PIZ compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (data[0] == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP ok = true; #else if (err) { (*err) = "ZFP compression is not supported."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; #endif } if (!ok) { if (err) { (*err) = "Unknown compression type."; } return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } info->compression_type = static_cast<int>(data[0]); has_compression = true; } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!ReadChannelInfo(info->channels, data)) { if (err) { (*err) += "Failed to parse channel info.\n"; } return TINYEXR_ERROR_INVALID_DATA; } if (info->channels.size() < 1) { if (err) { (*err) += "# of channels is zero.\n"; } return TINYEXR_ERROR_INVALID_DATA; } has_channels = true; } else if (attr_name.compare("dataWindow") == 0) { if (data.size() >= 16) { memcpy(&info->data_window[0], &data.at(0), sizeof(int)); memcpy(&info->data_window[1], &data.at(4), sizeof(int)); memcpy(&info->data_window[2], &data.at(8), sizeof(int)); memcpy(&info->data_window[3], &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->data_window[3])); has_data_window = true; } } else if (attr_name.compare("displayWindow") == 0) { if (data.size() >= 16) { memcpy(&info->display_window[0], &data.at(0), sizeof(int)); memcpy(&info->display_window[1], &data.at(4), sizeof(int)); memcpy(&info->display_window[2], &data.at(8), sizeof(int)); memcpy(&info->display_window[3], &data.at(12), sizeof(int)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->display_window[3])); has_display_window = true; } } else if (attr_name.compare("lineOrder") == 0) { if (data.size() >= 1) { info->line_order = static_cast<int>(data[0]); has_line_order = true; } } else if (attr_name.compare("pixelAspectRatio") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->pixel_aspect_ratio, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->pixel_aspect_ratio)); has_pixel_aspect_ratio = true; } } else if (attr_name.compare("screenWindowCenter") == 0) { if (data.size() >= 8) { memcpy(&info->screen_window_center[0], &data.at(0), sizeof(float)); memcpy(&info->screen_window_center[1], &data.at(4), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_center[1])); has_screen_window_center = true; } } else if (attr_name.compare("screenWindowWidth") == 0) { if (data.size() >= sizeof(float)) { memcpy(&info->screen_window_width, &data.at(0), sizeof(float)); tinyexr::swap4( reinterpret_cast<unsigned int *>(&info->screen_window_width)); has_screen_window_width = true; } } else if (attr_name.compare("chunkCount") == 0) { if (data.size() >= sizeof(int)) { memcpy(&info->chunk_count, &data.at(0), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&info->chunk_count)); } } else { // Custom attribute(up to TINYEXR_MAX_CUSTOM_ATTRIBUTES) if (info->attributes.size() < TINYEXR_MAX_CUSTOM_ATTRIBUTES) { EXRAttribute attrib; #ifdef _MSC_VER strncpy_s(attrib.name, attr_name.c_str(), 255); strncpy_s(attrib.type, attr_type.c_str(), 255); #else strncpy(attrib.name, attr_name.c_str(), 255); strncpy(attrib.type, attr_type.c_str(), 255); #endif attrib.name[255] = '\0'; attrib.type[255] = '\0'; attrib.size = static_cast<int>(data.size()); attrib.value = static_cast<unsigned char *>(malloc(data.size())); memcpy(reinterpret_cast<char *>(attrib.value), &data.at(0), data.size()); info->attributes.push_back(attrib); } } } // Check if required attributes exist { std::stringstream ss_err; if (!has_compression) { ss_err << "\"compression\" attribute not found in the header." << std::endl; } if (!has_channels) { ss_err << "\"channels\" attribute not found in the header." << std::endl; } if (!has_line_order) { ss_err << "\"lineOrder\" attribute not found in the header." << std::endl; } if (!has_display_window) { ss_err << "\"displayWindow\" attribute not found in the header." << std::endl; } if (!has_data_window) { ss_err << "\"dataWindow\" attribute not found in the header or invalid." << std::endl; } if (!has_pixel_aspect_ratio) { ss_err << "\"pixelAspectRatio\" attribute not found in the header." << std::endl; } if (!has_screen_window_width) { ss_err << "\"screenWindowWidth\" attribute not found in the header." << std::endl; } if (!has_screen_window_center) { ss_err << "\"screenWindowCenter\" attribute not found in the header." << std::endl; } if (!(ss_err.str().empty())) { if (err) { (*err) += ss_err.str(); } return TINYEXR_ERROR_INVALID_HEADER; } } info->header_len = static_cast<unsigned int>(orig_size - size); return TINYEXR_SUCCESS; } // C++ HeaderInfo to C EXRHeader conversion. static void ConvertHeader(EXRHeader *exr_header, const HeaderInfo &info) { exr_header->pixel_aspect_ratio = info.pixel_aspect_ratio; exr_header->screen_window_center[0] = info.screen_window_center[0]; exr_header->screen_window_center[1] = info.screen_window_center[1]; exr_header->screen_window_width = info.screen_window_width; exr_header->chunk_count = info.chunk_count; exr_header->display_window[0] = info.display_window[0]; exr_header->display_window[1] = info.display_window[1]; exr_header->display_window[2] = info.display_window[2]; exr_header->display_window[3] = info.display_window[3]; exr_header->data_window[0] = info.data_window[0]; exr_header->data_window[1] = info.data_window[1]; exr_header->data_window[2] = info.data_window[2]; exr_header->data_window[3] = info.data_window[3]; exr_header->line_order = info.line_order; exr_header->compression_type = info.compression_type; exr_header->tile_size_x = info.tile_size_x; exr_header->tile_size_y = info.tile_size_y; exr_header->tile_level_mode = info.tile_level_mode; exr_header->tile_rounding_mode = info.tile_rounding_mode; exr_header->num_channels = static_cast<int>(info.channels.size()); exr_header->channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { #ifdef _MSC_VER strncpy_s(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #else strncpy(exr_header->channels[c].name, info.channels[c].name.c_str(), 255); #endif // manually add '\0' for safety. exr_header->channels[c].name[255] = '\0'; exr_header->channels[c].pixel_type = info.channels[c].pixel_type; exr_header->channels[c].p_linear = info.channels[c].p_linear; exr_header->channels[c].x_sampling = info.channels[c].x_sampling; exr_header->channels[c].y_sampling = info.channels[c].y_sampling; } exr_header->pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->pixel_types[c] = info.channels[c].pixel_type; } // Initially fill with values of `pixel_types` exr_header->requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(exr_header->num_channels))); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { exr_header->requested_pixel_types[c] = info.channels[c].pixel_type; } exr_header->num_custom_attributes = static_cast<int>(info.attributes.size()); if (exr_header->num_custom_attributes > 0) { // TODO(syoyo): Report warning when # of attributes exceeds // `TINYEXR_MAX_CUSTOM_ATTRIBUTES` if (exr_header->num_custom_attributes > TINYEXR_MAX_CUSTOM_ATTRIBUTES) { exr_header->num_custom_attributes = TINYEXR_MAX_CUSTOM_ATTRIBUTES; } exr_header->custom_attributes = static_cast<EXRAttribute *>(malloc( sizeof(EXRAttribute) * size_t(exr_header->num_custom_attributes))); for (size_t i = 0; i < info.attributes.size(); i++) { memcpy(exr_header->custom_attributes[i].name, info.attributes[i].name, 256); memcpy(exr_header->custom_attributes[i].type, info.attributes[i].type, 256); exr_header->custom_attributes[i].size = info.attributes[i].size; // Just copy poiner exr_header->custom_attributes[i].value = info.attributes[i].value; } } else { exr_header->custom_attributes = NULL; } exr_header->header_len = info.header_len; } static int DecodeChunk(EXRImage *exr_image, const EXRHeader *exr_header, const std::vector<tinyexr::tinyexr_uint64> &offsets, const unsigned char *head, const size_t size, std::string *err) { int num_channels = exr_header->num_channels; int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0] + 1; int data_height = exr_header->data_window[3] - exr_header->data_window[1] + 1; if ((data_width < 0) || (data_height < 0)) { if (err) { std::stringstream ss; ss << "Invalid data width or data height: " << data_width << ", " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if ((data_width > threshold) || (data_height > threshold)) { if (err) { std::stringstream ss; ss << "data_with or data_height too large. data_width: " << data_width << ", " << "data_height = " << data_height << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } } size_t num_blocks = offsets.size(); std::vector<size_t> channel_offset_list; int pixel_data_size = 0; size_t channel_offset = 0; if (!tinyexr::ComputeChannelLayout(&channel_offset_list, &pixel_data_size, &channel_offset, num_channels, exr_header->channels)) { if (err) { (*err) += "Failed to compute channel layout.\n"; } return TINYEXR_ERROR_INVALID_DATA; } bool invalid_data = false; // TODO(LTE): Use atomic lock for MT safety. if (exr_header->tiled) { // value check if (exr_header->tile_size_x < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size x : " << exr_header->tile_size_x << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } if (exr_header->tile_size_y < 0) { if (err) { std::stringstream ss; ss << "Invalid tile size y : " << exr_header->tile_size_y << "\n"; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_HEADER; } size_t num_tiles = offsets.size(); // = # of blocks exr_image->tiles = static_cast<EXRTile *>( calloc(sizeof(EXRTile), static_cast<size_t>(num_tiles))); int err_code = TINYEXR_SUCCESS; #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<size_t> tile_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_tiles)) { num_threads = int(num_tiles); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { size_t tile_idx = 0; while ((tile_idx = tile_count++) < num_tiles) { #else for (size_t tile_idx = 0; tile_idx < num_tiles; tile_idx++) { #endif // Allocate memory for each tile. exr_image->tiles[tile_idx].images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, exr_header->tile_size_x, exr_header->tile_size_y); // 16 byte: tile coordinates // 4 byte : data size // ~ : data(uncompressed or compressed) if (offsets[tile_idx] + sizeof(int) * 5 > size) { // TODO(LTE): atomic if (err) { (*err) += "Insufficient data size.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; break; } size_t data_size = size_t(size - (offsets[tile_idx] + sizeof(int) * 5)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[tile_idx]); int tile_coordinates[4]; memcpy(tile_coordinates, data_ptr, sizeof(int) * 4); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[0])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[1])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[2])); tinyexr::swap4( reinterpret_cast<unsigned int *>(&tile_coordinates[3])); // @todo{ LoD } if (tile_coordinates[2] != 0) { err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE; break; } if (tile_coordinates[3] != 0) { err_code = TINYEXR_ERROR_UNSUPPORTED_FEATURE; break; } int data_len; memcpy(&data_len, data_ptr + 16, sizeof(int)); // 16 = sizeof(tile_coordinates) tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (data_len < 4 || size_t(data_len) > data_size) { // TODO(LTE): atomic if (err) { (*err) += "Insufficient data length.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; break; } // Move to data addr: 20 = 16 + 4; data_ptr += 20; bool ret = tinyexr::DecodeTiledPixelData( exr_image->tiles[tile_idx].images, &(exr_image->tiles[tile_idx].width), &(exr_image->tiles[tile_idx].height), exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, tile_coordinates[0], tile_coordinates[1], exr_header->tile_size_x, exr_header->tile_size_y, static_cast<size_t>(pixel_data_size), static_cast<size_t>(exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list); if (!ret) { // TODO(LTE): atomic if (err) { (*err) += "Failed to decode tile data.\n"; } err_code = TINYEXR_ERROR_INVALID_DATA; } exr_image->tiles[tile_idx].offset_x = tile_coordinates[0]; exr_image->tiles[tile_idx].offset_y = tile_coordinates[1]; exr_image->tiles[tile_idx].level_x = tile_coordinates[2]; exr_image->tiles[tile_idx].level_y = tile_coordinates[3]; #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) } })); } // num_thread loop for (auto &t : workers) { t.join(); } #else } #endif if (err_code != TINYEXR_SUCCESS) { return err_code; } exr_image->num_tiles = static_cast<int>(num_tiles); } else { // scanline format // Don't allow too large image(256GB * pixel_data_size or more). Workaround // for #104. size_t total_data_len = size_t(data_width) * size_t(data_height) * size_t(num_channels); const bool total_data_len_overflown = sizeof(void *) == 8 ? (total_data_len >= 0x4000000000) : false; if ((total_data_len == 0) || total_data_len_overflown) { if (err) { std::stringstream ss; ss << "Image data size is zero or too large: width = " << data_width << ", height = " << data_height << ", channels = " << num_channels << std::endl; (*err) += ss.str(); } return TINYEXR_ERROR_INVALID_DATA; } exr_image->images = tinyexr::AllocateImage( num_channels, exr_header->channels, exr_header->requested_pixel_types, data_width, data_height); #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) std::vector<std::thread> workers; std::atomic<int> y_count(0); int num_threads = std::max(1, int(std::thread::hardware_concurrency())); if (num_threads > int(num_blocks)) { num_threads = int(num_blocks); } for (int t = 0; t < num_threads; t++) { workers.emplace_back(std::thread([&]() { int y = 0; while ((y = y_count++) < int(num_blocks)) { #else #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int y = 0; y < static_cast<int>(num_blocks); y++) { #endif size_t y_idx = static_cast<size_t>(y); if (offsets[y_idx] + sizeof(int) * 2 > size) { invalid_data = true; } else { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed or compressed) size_t data_size = size_t(size - (offsets[y_idx] + sizeof(int) * 2)); const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y_idx]); int line_no; memcpy(&line_no, data_ptr, sizeof(int)); int data_len; memcpy(&data_len, data_ptr + 4, sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); if (size_t(data_len) > data_size) { invalid_data = true; } else if ((line_no > (2 << 20)) || (line_no < -(2 << 20))) { // Too large value. Assume this is invalid // 2**20 = 1048576 = heuristic value. invalid_data = true; } else if (data_len == 0) { // TODO(syoyo): May be ok to raise the threshold for example // `data_len < 4` invalid_data = true; } else { // line_no may be negative. int end_line_no = (std::min)(line_no + num_scanline_blocks, (exr_header->data_window[3] + 1)); int num_lines = end_line_no - line_no; if (num_lines <= 0) { invalid_data = true; } else { // Move to data addr: 8 = 4 + 4; data_ptr += 8; // Adjust line_no with data_window.bmin.y // overflow check tinyexr_int64 lno = static_cast<tinyexr_int64>(line_no) - static_cast<tinyexr_int64>(exr_header->data_window[1]); if (lno > std::numeric_limits<int>::max()) { line_no = -1; // invalid } else if (lno < -std::numeric_limits<int>::max()) { line_no = -1; // invalid } else { line_no -= exr_header->data_window[1]; } if (line_no < 0) { invalid_data = true; } else { if (!tinyexr::DecodePixelData( exr_image->images, exr_header->requested_pixel_types, data_ptr, static_cast<size_t>(data_len), exr_header->compression_type, exr_header->line_order, data_width, data_height, data_width, y, line_no, num_lines, static_cast<size_t>(pixel_data_size), static_cast<size_t>( exr_header->num_custom_attributes), exr_header->custom_attributes, static_cast<size_t>(exr_header->num_channels), exr_header->channels, channel_offset_list)) { invalid_data = true; } } } } } #if (__cplusplus > 199711L) && (TINYEXR_USE_THREAD > 0) } })); } for (auto &t : workers) { t.join(); } #else } // omp parallel #endif } if (invalid_data) { if (err) { std::stringstream ss; (*err) += "Invalid data found when decoding pixels.\n"; } return TINYEXR_ERROR_INVALID_DATA; } // Overwrite `pixel_type` with `requested_pixel_type`. { for (int c = 0; c < exr_header->num_channels; c++) { exr_header->pixel_types[c] = exr_header->requested_pixel_types[c]; } } { exr_image->num_channels = num_channels; exr_image->width = data_width; exr_image->height = data_height; } return TINYEXR_SUCCESS; } static bool ReconstructLineOffsets( std::vector<tinyexr::tinyexr_uint64> *offsets, size_t n, const unsigned char *head, const unsigned char *marker, const size_t size) { assert(head < marker); assert(offsets->size() == n); for (size_t i = 0; i < n; i++) { size_t offset = static_cast<size_t>(marker - head); // Offset should not exceed whole EXR file/data size. if ((offset + sizeof(tinyexr::tinyexr_uint64)) >= size) { return false; } int y; unsigned int data_len; memcpy(&y, marker, sizeof(int)); memcpy(&data_len, marker + 4, sizeof(unsigned int)); if (data_len >= size) { return false; } tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data_len)); (*offsets)[i] = offset; marker += data_len + 8; // 8 = 4 bytes(y) + 4 bytes(data_len) } return true; } static int DecodeEXRImage(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *head, const unsigned char *marker, const size_t size, const char **err) { if (exr_image == NULL || exr_header == NULL || head == NULL || marker == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for DecodeEXRImage().", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } int num_scanline_blocks = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanline_blocks = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanline_blocks = 16; } int data_width = exr_header->data_window[2] - exr_header->data_window[0]; if (data_width >= std::numeric_limits<int>::max()) { // Issue 63 tinyexr::SetErrorMessage("Invalid data width value", err); return TINYEXR_ERROR_INVALID_DATA; } data_width++; int data_height = exr_header->data_window[3] - exr_header->data_window[1]; if (data_height >= std::numeric_limits<int>::max()) { tinyexr::SetErrorMessage("Invalid data height value", err); return TINYEXR_ERROR_INVALID_DATA; } data_height++; if ((data_width < 0) || (data_height < 0)) { tinyexr::SetErrorMessage("data width or data height is negative.", err); return TINYEXR_ERROR_INVALID_DATA; } // Do not allow too large data_width and data_height. header invalid? { const int threshold = 1024 * 8192; // heuristics if (data_width > threshold) { tinyexr::SetErrorMessage("data width too large.", err); return TINYEXR_ERROR_INVALID_DATA; } if (data_height > threshold) { tinyexr::SetErrorMessage("data height too large.", err); return TINYEXR_ERROR_INVALID_DATA; } } // Read offset tables. size_t num_blocks = 0; if (exr_header->chunk_count > 0) { // Use `chunkCount` attribute. num_blocks = static_cast<size_t>(exr_header->chunk_count); } else if (exr_header->tiled) { // @todo { LoD } size_t num_x_tiles = static_cast<size_t>(data_width) / static_cast<size_t>(exr_header->tile_size_x); if (num_x_tiles * static_cast<size_t>(exr_header->tile_size_x) < static_cast<size_t>(data_width)) { num_x_tiles++; } size_t num_y_tiles = static_cast<size_t>(data_height) / static_cast<size_t>(exr_header->tile_size_y); if (num_y_tiles * static_cast<size_t>(exr_header->tile_size_y) < static_cast<size_t>(data_height)) { num_y_tiles++; } num_blocks = num_x_tiles * num_y_tiles; } else { num_blocks = static_cast<size_t>(data_height) / static_cast<size_t>(num_scanline_blocks); if (num_blocks * static_cast<size_t>(num_scanline_blocks) < static_cast<size_t>(data_height)) { num_blocks++; } } std::vector<tinyexr::tinyexr_uint64> offsets(num_blocks); for (size_t y = 0; y < num_blocks; y++) { tinyexr::tinyexr_uint64 offset; // Issue #81 if ((marker + sizeof(tinyexr_uint64)) >= (head + size)) { tinyexr::SetErrorMessage("Insufficient data size in offset table.", err); return TINYEXR_ERROR_INVALID_DATA; } memcpy(&offset, marker, sizeof(tinyexr::tinyexr_uint64)); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset value in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } marker += sizeof(tinyexr::tinyexr_uint64); // = 8 offsets[y] = offset; } // If line offsets are invalid, we try to reconstruct it. // See OpenEXR/IlmImf/ImfScanLineInputFile.cpp::readLineOffsets() for details. for (size_t y = 0; y < num_blocks; y++) { if (offsets[y] <= 0) { // TODO(syoyo) Report as warning? // if (err) { // stringstream ss; // ss << "Incomplete lineOffsets." << std::endl; // (*err) += ss.str(); //} bool ret = ReconstructLineOffsets(&offsets, num_blocks, head, marker, size); if (ret) { // OK break; } else { tinyexr::SetErrorMessage( "Cannot reconstruct lineOffset table in DecodeEXRImage.", err); return TINYEXR_ERROR_INVALID_DATA; } } } { std::string e; int ret = DecodeChunk(exr_image, exr_header, offsets, head, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } #if 1 FreeEXRImage(exr_image); #else // release memory(if exists) if ((exr_header->num_channels > 0) && exr_image && exr_image->images) { for (size_t c = 0; c < size_t(exr_header->num_channels); c++) { if (exr_image->images[c]) { free(exr_image->images[c]); exr_image->images[c] = NULL; } } free(exr_image->images); exr_image->images = NULL; } #endif } return ret; } } static void GetLayers(const EXRHeader& exr_header, std::vector<std::string>& layer_names) { // Naive implementation // Group channels by layers // go over all channel names, split by periods // collect unique names layer_names.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string full_name(exr_header.channels[c].name); const size_t pos = full_name.find_last_of('.'); if (pos != std::string::npos && pos != 0 && pos + 1 < full_name.size()) { full_name.erase(pos); if (std::find(layer_names.begin(), layer_names.end(), full_name) == layer_names.end()) layer_names.push_back(full_name); } } } struct LayerChannel { explicit LayerChannel (size_t i, std::string n) : index(i) , name(n) {} size_t index; std::string name; }; static void ChannelsInLayer(const EXRHeader& exr_header, const std::string layer_name, std::vector<LayerChannel>& channels) { channels.clear(); for (int c = 0; c < exr_header.num_channels; c++) { std::string ch_name(exr_header.channels[c].name); if (layer_name.empty()) { const size_t pos = ch_name.find_last_of('.'); if (pos != std::string::npos && pos < ch_name.size()) { ch_name = ch_name.substr(pos + 1); } } else { const size_t pos = ch_name.find(layer_name + '.'); if (pos == std::string::npos) continue; if (pos == 0) { ch_name = ch_name.substr(layer_name.size() + 1); } } LayerChannel ch(size_t(c), ch_name); channels.push_back(ch); } } } // namespace tinyexr int EXRLayers(const char *filename, const char **layer_names[], int *num_layers, const char **err) { EXRVersion exr_version; EXRHeader exr_header; InitEXRHeader(&exr_header); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage("Invalid EXR header.", err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } std::vector<std::string> layer_vec; tinyexr::GetLayers(exr_header, layer_vec); (*num_layers) = int(layer_vec.size()); (*layer_names) = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(layer_vec.size()))); for (size_t c = 0; c < static_cast<size_t>(layer_vec.size()); c++) { #ifdef _MSC_VER (*layer_names)[c] = _strdup(layer_vec[c].c_str()); #else (*layer_names)[c] = strdup(layer_vec[c].c_str()); #endif } FreeEXRHeader(&exr_header); return TINYEXR_SUCCESS; } int LoadEXR(float **out_rgba, int *width, int *height, const char *filename, const char **err) { return LoadEXRWithLayer(out_rgba, width, height, filename, /* layername */NULL, err); } int LoadEXRWithLayer(float **out_rgba, int *width, int *height, const char *filename, const char *layername, const char **err) { if (out_rgba == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXR()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); InitEXRImage(&exr_image); { int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to open EXR file or read version info from EXR file. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } if (exr_version.multipart || exr_version.non_image) { tinyexr::SetErrorMessage( "Loading multipart or DeepImage is not supported in LoadEXR() API", err); return TINYEXR_ERROR_INVALID_DATA; // @fixme. } } { int ret = ParseEXRHeaderFromFile(&exr_header, &exr_version, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } // TODO: Probably limit loading to layers (channels) selected by layer index { int ret = LoadEXRImageFromFile(&exr_image, &exr_header, filename, err); if (ret != TINYEXR_SUCCESS) { FreeEXRHeader(&exr_header); return ret; } } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; std::vector<std::string> layer_names; tinyexr::GetLayers(exr_header, layer_names); std::vector<tinyexr::LayerChannel> channels; tinyexr::ChannelsInLayer(exr_header, layername == NULL ? "" : std::string(layername), channels); if (channels.size() < 1) { tinyexr::SetErrorMessage("Layer Not Found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_LAYER_NOT_FOUND; } size_t ch_count = channels.size() < 4 ? channels.size() : 4; for (size_t c = 0; c < ch_count; c++) { const tinyexr::LayerChannel &ch = channels[c]; if (ch.name == "R") { idxR = int(ch.index); } else if (ch.name == "G") { idxG = int(ch.index); } else if (ch.name == "B") { idxB = int(ch.index); } else if (ch.name == "A") { idxA = int(ch.index); } } if (channels.size() == 1) { int chIdx = int(channels.front().index); // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[chIdx][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[chIdx][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // Assume RGB(A) if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int IsEXR(const char *filename) { EXRVersion exr_version; int ret = ParseEXRVersionFromFile(&exr_version, filename); if (ret != TINYEXR_SUCCESS) { return ret; } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromMemory(EXRHeader *exr_header, const EXRVersion *version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_header == NULL) { tinyexr::SetErrorMessage( "Invalid argument. `memory` or `exr_header` argument is null in " "ParseEXRHeaderFromMemory()", err); // Invalid argument return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Insufficient header/data size.\n", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; tinyexr::HeaderInfo info; info.clear(); std::string err_str; int ret = ParseEXRHeader(&info, NULL, version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { if (err && !err_str.empty()) { tinyexr::SetErrorMessage(err_str, err); } } ConvertHeader(exr_header, info); // transfoer `tiled` from version. exr_header->tiled = version->tiled; return ret; } int LoadEXRFromMemory(float **out_rgba, int *width, int *height, const unsigned char *memory, size_t size, const char **err) { if (out_rgba == NULL || memory == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRVersion exr_version; EXRImage exr_image; EXRHeader exr_header; InitEXRHeader(&exr_header); int ret = ParseEXRVersionFromMemory(&exr_version, memory, size); if (ret != TINYEXR_SUCCESS) { std::stringstream ss; ss << "Failed to parse EXR version. code(" << ret << ")"; tinyexr::SetErrorMessage(ss.str(), err); return ret; } ret = ParseEXRHeaderFromMemory(&exr_header, &exr_version, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // Read HALF channel as FLOAT. for (int i = 0; i < exr_header.num_channels; i++) { if (exr_header.pixel_types[i] == TINYEXR_PIXELTYPE_HALF) { exr_header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; } } InitEXRImage(&exr_image); ret = LoadEXRImageFromMemory(&exr_image, &exr_header, memory, size, err); if (ret != TINYEXR_SUCCESS) { return ret; } // RGBA int idxR = -1; int idxG = -1; int idxB = -1; int idxA = -1; for (int c = 0; c < exr_header.num_channels; c++) { if (strcmp(exr_header.channels[c].name, "R") == 0) { idxR = c; } else if (strcmp(exr_header.channels[c].name, "G") == 0) { idxG = c; } else if (strcmp(exr_header.channels[c].name, "B") == 0) { idxB = c; } else if (strcmp(exr_header.channels[c].name, "A") == 0) { idxA = c; } } // TODO(syoyo): Refactor removing same code as used in LoadEXR(). if (exr_header.num_channels == 1) { // Grayscale channel only. (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) { for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[0][srcIdx]; (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[0][srcIdx]; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { const float val = reinterpret_cast<float **>(exr_image.images)[0][i]; (*out_rgba)[4 * i + 0] = val; (*out_rgba)[4 * i + 1] = val; (*out_rgba)[4 * i + 2] = val; (*out_rgba)[4 * i + 3] = val; } } } else { // TODO(syoyo): Support non RGBA image. if (idxR == -1) { tinyexr::SetErrorMessage("R channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxG == -1) { tinyexr::SetErrorMessage("G channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } if (idxB == -1) { tinyexr::SetErrorMessage("B channel not found", err); // @todo { free exr_image } return TINYEXR_ERROR_INVALID_DATA; } (*out_rgba) = reinterpret_cast<float *>( malloc(4 * sizeof(float) * static_cast<size_t>(exr_image.width) * static_cast<size_t>(exr_image.height))); if (exr_header.tiled) { for (int it = 0; it < exr_image.num_tiles; it++) { for (int j = 0; j < exr_header.tile_size_y; j++) for (int i = 0; i < exr_header.tile_size_x; i++) { const int ii = exr_image.tiles[it].offset_x * exr_header.tile_size_x + i; const int jj = exr_image.tiles[it].offset_y * exr_header.tile_size_y + j; const int idx = ii + jj * exr_image.width; // out of region check. if (ii >= exr_image.width) { continue; } if (jj >= exr_image.height) { continue; } const int srcIdx = i + j * exr_header.tile_size_x; unsigned char **src = exr_image.tiles[it].images; (*out_rgba)[4 * idx + 0] = reinterpret_cast<float **>(src)[idxR][srcIdx]; (*out_rgba)[4 * idx + 1] = reinterpret_cast<float **>(src)[idxG][srcIdx]; (*out_rgba)[4 * idx + 2] = reinterpret_cast<float **>(src)[idxB][srcIdx]; if (idxA != -1) { (*out_rgba)[4 * idx + 3] = reinterpret_cast<float **>(src)[idxA][srcIdx]; } else { (*out_rgba)[4 * idx + 3] = 1.0; } } } } else { for (int i = 0; i < exr_image.width * exr_image.height; i++) { (*out_rgba)[4 * i + 0] = reinterpret_cast<float **>(exr_image.images)[idxR][i]; (*out_rgba)[4 * i + 1] = reinterpret_cast<float **>(exr_image.images)[idxG][i]; (*out_rgba)[4 * i + 2] = reinterpret_cast<float **>(exr_image.images)[idxB][i]; if (idxA != -1) { (*out_rgba)[4 * i + 3] = reinterpret_cast<float **>(exr_image.images)[idxA][i]; } else { (*out_rgba)[4 * i + 3] = 1.0; } } } } (*width) = exr_image.width; (*height) = exr_image.height; FreeEXRHeader(&exr_header); FreeEXRImage(&exr_image); return TINYEXR_SUCCESS; } int LoadEXRImageFromFile(EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize < 16) { tinyexr::SetErrorMessage("File size too short " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRImageFromMemory(exr_image, exr_header, &buf.at(0), filesize, err); } int LoadEXRImageFromMemory(EXRImage *exr_image, const EXRHeader *exr_header, const unsigned char *memory, const size_t size, const char **err) { if (exr_image == NULL || memory == NULL || (size < tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage("Invalid argument for LoadEXRImageFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } const unsigned char *head = memory; const unsigned char *marker = reinterpret_cast<const unsigned char *>( memory + exr_header->header_len + 8); // +8 for magic number + version header. return tinyexr::DecodeEXRImage(exr_image, exr_header, head, marker, size, err); } size_t SaveEXRImageToMemory(const EXRImage *exr_image, const EXRHeader *exr_header, unsigned char **memory_out, const char **err) { if (exr_image == NULL || memory_out == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToMemory", err); return 0; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return 0; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return 0; } #endif #if TINYEXR_USE_ZFP for (size_t i = 0; i < static_cast<size_t>(exr_header->num_channels); i++) { if (exr_header->requested_pixel_types[i] != TINYEXR_PIXELTYPE_FLOAT) { tinyexr::SetErrorMessage("Pixel type must be FLOAT for ZFP compression", err); return 0; } } #endif std::vector<unsigned char> memory; // Header { const char header[] = {0x76, 0x2f, 0x31, 0x01}; memory.insert(memory.end(), header, header + 4); } // Version, scanline. { char marker[] = {2, 0, 0, 0}; /* @todo if (exr_header->tiled) { marker[1] |= 0x2; } if (exr_header->long_name) { marker[1] |= 0x4; } if (exr_header->non_image) { marker[1] |= 0x8; } if (exr_header->multipart) { marker[1] |= 0x10; } */ memory.insert(memory.end(), marker, marker + 4); } int num_scanlines = 1; if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanlines = 16; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { num_scanlines = 32; } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { num_scanlines = 16; } // Write attributes. std::vector<tinyexr::ChannelInfo> channels; { std::vector<unsigned char> data; for (int c = 0; c < exr_header->num_channels; c++) { tinyexr::ChannelInfo info; info.p_linear = 0; info.pixel_type = exr_header->requested_pixel_types[c]; info.x_sampling = 1; info.y_sampling = 1; info.name = std::string(exr_header->channels[c].name); channels.push_back(info); } tinyexr::WriteChannelInfo(data, channels); tinyexr::WriteAttributeToMemory(&memory, "channels", "chlist", &data.at(0), static_cast<int>(data.size())); } { int comp = exr_header->compression_type; tinyexr::swap4(reinterpret_cast<unsigned int *>(&comp)); tinyexr::WriteAttributeToMemory( &memory, "compression", "compression", reinterpret_cast<const unsigned char *>(&comp), 1); } { int data[4] = {0, 0, exr_image->width - 1, exr_image->height - 1}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[1])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[2])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&data[3])); tinyexr::WriteAttributeToMemory( &memory, "dataWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); tinyexr::WriteAttributeToMemory( &memory, "displayWindow", "box2i", reinterpret_cast<const unsigned char *>(data), sizeof(int) * 4); } { unsigned char line_order = 0; // @fixme { read line_order from EXRHeader } tinyexr::WriteAttributeToMemory(&memory, "lineOrder", "lineOrder", &line_order, 1); } { float aspectRatio = 1.0f; tinyexr::swap4(reinterpret_cast<unsigned int *>(&aspectRatio)); tinyexr::WriteAttributeToMemory( &memory, "pixelAspectRatio", "float", reinterpret_cast<const unsigned char *>(&aspectRatio), sizeof(float)); } { float center[2] = {0.0f, 0.0f}; tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[0])); tinyexr::swap4(reinterpret_cast<unsigned int *>(&center[1])); tinyexr::WriteAttributeToMemory( &memory, "screenWindowCenter", "v2f", reinterpret_cast<const unsigned char *>(center), 2 * sizeof(float)); } { float w = static_cast<float>(exr_image->width); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::WriteAttributeToMemory(&memory, "screenWindowWidth", "float", reinterpret_cast<const unsigned char *>(&w), sizeof(float)); } // Custom attributes if (exr_header->num_custom_attributes > 0) { for (int i = 0; i < exr_header->num_custom_attributes; i++) { tinyexr::WriteAttributeToMemory( &memory, exr_header->custom_attributes[i].name, exr_header->custom_attributes[i].type, reinterpret_cast<const unsigned char *>( exr_header->custom_attributes[i].value), exr_header->custom_attributes[i].size); } } { // end of header unsigned char e = 0; memory.push_back(e); } int num_blocks = exr_image->height / num_scanlines; if (num_blocks * num_scanlines < exr_image->height) { num_blocks++; } std::vector<tinyexr::tinyexr_uint64> offsets(static_cast<size_t>(num_blocks)); size_t headerSize = memory.size(); tinyexr::tinyexr_uint64 offset = headerSize + static_cast<size_t>(num_blocks) * sizeof( tinyexr::tinyexr_int64); // sizeof(header) + sizeof(offsetTable) std::vector<std::vector<unsigned char> > data_list( static_cast<size_t>(num_blocks)); std::vector<size_t> channel_offset_list( static_cast<size_t>(exr_header->num_channels)); int pixel_data_size = 0; size_t channel_offset = 0; for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { channel_offset_list[c] = channel_offset; if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { pixel_data_size += sizeof(unsigned short); channel_offset += sizeof(unsigned short); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { pixel_data_size += sizeof(float); channel_offset += sizeof(float); } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { pixel_data_size += sizeof(unsigned int); channel_offset += sizeof(unsigned int); } else { assert(0); } } #if TINYEXR_USE_ZFP tinyexr::ZFPCompressionParam zfp_compression_param; // Use ZFP compression parameter from custom attributes(if such a parameter // exists) { bool ret = tinyexr::FindZFPCompressionParam( &zfp_compression_param, exr_header->custom_attributes, exr_header->num_custom_attributes); if (!ret) { // Use predefined compression parameter. zfp_compression_param.type = 0; zfp_compression_param.rate = 2; } } #endif // TOOD(LTE): C++11 thread // Use signed int since some OpenMP compiler doesn't allow unsigned type for // `parallel for` #if TINYEXR_USE_OPENMP #pragma omp parallel for #endif for (int i = 0; i < num_blocks; i++) { size_t ii = static_cast<size_t>(i); int start_y = num_scanlines * i; int endY = (std::min)(num_scanlines * (i + 1), exr_image->height); int h = endY - start_y; std::vector<unsigned char> buf( static_cast<size_t>(exr_image->width * h * pixel_data_size)); for (size_t c = 0; c < static_cast<size_t>(exr_header->num_channels); c++) { if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP16 h16; h16.u = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP32 f32 = half_to_float(h16); tinyexr::swap4(reinterpret_cast<unsigned int *>(&f32.f)); // line_ptr[x] = f32.f; tinyexr::cpy4(line_ptr + x, &(f32.f)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned short val = reinterpret_cast<unsigned short **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap2(&val); // line_ptr[x] = val; tinyexr::cpy2(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_HALF) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned short *line_ptr = reinterpret_cast<unsigned short *>( &buf.at(static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { tinyexr::FP32 f32; f32.f = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::FP16 h16; h16 = float_to_half_full(f32); tinyexr::swap2(reinterpret_cast<unsigned short *>(&h16.u)); // line_ptr[x] = h16.u; tinyexr::cpy2(line_ptr + x, &(h16.u)); } } } else if (exr_header->requested_pixel_types[c] == TINYEXR_PIXELTYPE_FLOAT) { for (int y = 0; y < h; y++) { // Assume increasing Y float *line_ptr = reinterpret_cast<float *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { float val = reinterpret_cast<float **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(reinterpret_cast<unsigned int *>(&val)); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } else { assert(0); } } else if (exr_header->pixel_types[c] == TINYEXR_PIXELTYPE_UINT) { for (int y = 0; y < h; y++) { // Assume increasing Y unsigned int *line_ptr = reinterpret_cast<unsigned int *>(&buf.at( static_cast<size_t>(pixel_data_size * y * exr_image->width) + channel_offset_list[c] * static_cast<size_t>(exr_image->width))); for (int x = 0; x < exr_image->width; x++) { unsigned int val = reinterpret_cast<unsigned int **>( exr_image->images)[c][(y + start_y) * exr_image->width + x]; tinyexr::swap4(&val); // line_ptr[x] = val; tinyexr::cpy4(line_ptr + x, &val); } } } } if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_NONE) { // 4 byte: scan line // 4 byte: data size // ~ : pixel data(uncompressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(buf.size()); memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), buf.begin(), buf.begin() + data_len); } else if ((exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #if TINYEXR_USE_MINIZ std::vector<unsigned char> block(tinyexr::miniz::mz_compressBound( static_cast<unsigned long>(buf.size()))); #else std::vector<unsigned char> block( compressBound(static_cast<uLong>(buf.size()))); #endif tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressZip(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_RLE) { // (buf.size() * 3) / 2 would be enough. std::vector<unsigned char> block((buf.size() * 3) / 2); tinyexr::tinyexr_uint64 outSize = block.size(); tinyexr::CompressRle(&block.at(0), outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), static_cast<unsigned long>(buf.size())); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = static_cast<unsigned int>(outSize); // truncate memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { #if TINYEXR_USE_PIZ unsigned int bufLen = 8192 + static_cast<unsigned int>( 2 * static_cast<unsigned int>( buf.size())); // @fixme { compute good bound. } std::vector<unsigned char> block(bufLen); unsigned int outSize = static_cast<unsigned int>(block.size()); CompressPiz(&block.at(0), &outSize, reinterpret_cast<const unsigned char *>(&buf.at(0)), buf.size(), channels, exr_image->width, h); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { #if TINYEXR_USE_ZFP std::vector<unsigned char> block; unsigned int outSize; tinyexr::CompressZfp( &block, &outSize, reinterpret_cast<const float *>(&buf.at(0)), exr_image->width, h, exr_header->num_channels, zfp_compression_param); // 4 byte: scan line // 4 byte: data size // ~ : pixel data(compressed) std::vector<unsigned char> header(8); unsigned int data_len = outSize; memcpy(&header.at(0), &start_y, sizeof(int)); memcpy(&header.at(4), &data_len, sizeof(unsigned int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(0))); tinyexr::swap4(reinterpret_cast<unsigned int *>(&header.at(4))); data_list[ii].insert(data_list[ii].end(), header.begin(), header.end()); data_list[ii].insert(data_list[ii].end(), block.begin(), block.begin() + data_len); #else assert(0); #endif } else { assert(0); } } // omp parallel for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { offsets[i] = offset; tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offsets[i])); offset += data_list[i].size(); } size_t totalSize = static_cast<size_t>(offset); { memory.insert( memory.end(), reinterpret_cast<unsigned char *>(&offsets.at(0)), reinterpret_cast<unsigned char *>(&offsets.at(0)) + sizeof(tinyexr::tinyexr_uint64) * static_cast<size_t>(num_blocks)); } if (memory.size() == 0) { tinyexr::SetErrorMessage("Output memory size is zero", err); return 0; } (*memory_out) = static_cast<unsigned char *>(malloc(totalSize)); memcpy((*memory_out), &memory.at(0), memory.size()); unsigned char *memory_ptr = *memory_out + memory.size(); for (size_t i = 0; i < static_cast<size_t>(num_blocks); i++) { memcpy(memory_ptr, &data_list[i].at(0), data_list[i].size()); memory_ptr += data_list[i].size(); } return totalSize; // OK } int SaveEXRImageToFile(const EXRImage *exr_image, const EXRHeader *exr_header, const char *filename, const char **err) { if (exr_image == NULL || filename == NULL || exr_header->compression_type < 0) { tinyexr::SetErrorMessage("Invalid argument for SaveEXRImageToFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #if !TINYEXR_USE_PIZ if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_PIZ) { tinyexr::SetErrorMessage("PIZ compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #if !TINYEXR_USE_ZFP if (exr_header->compression_type == TINYEXR_COMPRESSIONTYPE_ZFP) { tinyexr::SetErrorMessage("ZFP compression is not supported in this build", err); return TINYEXR_ERROR_UNSUPPORTED_FEATURE; } #endif #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "wb"); #else FILE *fp = fopen(filename, "wb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } unsigned char *mem = NULL; size_t mem_size = SaveEXRImageToMemory(exr_image, exr_header, &mem, err); if (mem_size == 0) { return TINYEXR_ERROR_SERIALZATION_FAILED; } size_t written_size = 0; if ((mem_size > 0) && mem) { written_size = fwrite(mem, 1, mem_size, fp); } free(mem); fclose(fp); if (written_size != mem_size) { tinyexr::SetErrorMessage("Cannot write a file", err); return TINYEXR_ERROR_CANT_WRITE_FILE; } return TINYEXR_SUCCESS; } int LoadDeepEXR(DeepImage *deep_image, const char *filename, const char **err) { if (deep_image == NULL) { tinyexr::SetErrorMessage("Invalid argument for LoadDeepEXR", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _MSC_VER FILE *fp = NULL; errno_t errcode = fopen_s(&fp, filename, "rb"); if ((0 != errcode) || (!fp)) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #else FILE *fp = fopen(filename, "rb"); if (!fp) { tinyexr::SetErrorMessage("Cannot read a file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } #endif size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (filesize == 0) { fclose(fp); tinyexr::SetErrorMessage("File size is zero : " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } std::vector<char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); (void)ret; } fclose(fp); const char *head = &buf[0]; const char *marker = &buf[0]; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { tinyexr::SetErrorMessage("Invalid magic number", err); return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } // Version, scanline. { // ver 2.0, scanline, deep bit on(0x800) // must be [2, 0, 0, 0] if (marker[0] != 2 || marker[1] != 8 || marker[2] != 0 || marker[3] != 0) { tinyexr::SetErrorMessage("Unsupported version or scanline", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } marker += 4; } int dx = -1; int dy = -1; int dw = -1; int dh = -1; int num_scanline_blocks = 1; // 16 for ZIP compression. int compression_type = -1; int num_channels = -1; std::vector<tinyexr::ChannelInfo> channels; // Read attributes size_t size = filesize - tinyexr::kEXRVersionSize; for (;;) { if (0 == size) { return TINYEXR_ERROR_INVALID_DATA; } else if (marker[0] == '\0') { marker++; size--; break; } std::string attr_name; std::string attr_type; std::vector<unsigned char> data; size_t marker_size; if (!tinyexr::ReadAttribute(&attr_name, &attr_type, &data, &marker_size, marker, size)) { std::stringstream ss; ss << "Failed to parse attribute\n"; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_DATA; } marker += marker_size; size -= marker_size; if (attr_name.compare("compression") == 0) { compression_type = data[0]; if (compression_type > TINYEXR_COMPRESSIONTYPE_PIZ) { std::stringstream ss; ss << "Unsupported compression type : " << compression_type; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } if (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) { num_scanline_blocks = 16; } } else if (attr_name.compare("channels") == 0) { // name: zero-terminated string, from 1 to 255 bytes long // pixel type: int, possible values are: UINT = 0 HALF = 1 FLOAT = 2 // pLinear: unsigned char, possible values are 0 and 1 // reserved: three chars, should be zero // xSampling: int // ySampling: int if (!tinyexr::ReadChannelInfo(channels, data)) { tinyexr::SetErrorMessage("Failed to parse channel info", err); return TINYEXR_ERROR_INVALID_DATA; } num_channels = static_cast<int>(channels.size()); if (num_channels < 1) { tinyexr::SetErrorMessage("Invalid channels format", err); return TINYEXR_ERROR_INVALID_DATA; } } else if (attr_name.compare("dataWindow") == 0) { memcpy(&dx, &data.at(0), sizeof(int)); memcpy(&dy, &data.at(4), sizeof(int)); memcpy(&dw, &data.at(8), sizeof(int)); memcpy(&dh, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dx)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dy)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dw)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&dh)); } else if (attr_name.compare("displayWindow") == 0) { int x; int y; int w; int h; memcpy(&x, &data.at(0), sizeof(int)); memcpy(&y, &data.at(4), sizeof(int)); memcpy(&w, &data.at(8), sizeof(int)); memcpy(&h, &data.at(12), sizeof(int)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&x)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&y)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&w)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&h)); } } assert(dx >= 0); assert(dy >= 0); assert(dw >= 0); assert(dh >= 0); assert(num_channels >= 1); int data_width = dw - dx + 1; int data_height = dh - dy + 1; std::vector<float> image( static_cast<size_t>(data_width * data_height * 4)); // 4 = RGBA // Read offset tables. int num_blocks = data_height / num_scanline_blocks; if (num_blocks * num_scanline_blocks < data_height) { num_blocks++; } std::vector<tinyexr::tinyexr_int64> offsets(static_cast<size_t>(num_blocks)); for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { tinyexr::tinyexr_int64 offset; memcpy(&offset, marker, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap8(reinterpret_cast<tinyexr::tinyexr_uint64 *>(&offset)); marker += sizeof(tinyexr::tinyexr_int64); // = 8 offsets[y] = offset; } #if TINYEXR_USE_PIZ if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP) || (compression_type == TINYEXR_COMPRESSIONTYPE_PIZ)) { #else if ((compression_type == TINYEXR_COMPRESSIONTYPE_NONE) || (compression_type == TINYEXR_COMPRESSIONTYPE_RLE) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIPS) || (compression_type == TINYEXR_COMPRESSIONTYPE_ZIP)) { #endif // OK } else { tinyexr::SetErrorMessage("Unsupported compression format", err); return TINYEXR_ERROR_UNSUPPORTED_FORMAT; } deep_image->image = static_cast<float ***>( malloc(sizeof(float **) * static_cast<size_t>(num_channels))); for (int c = 0; c < num_channels; c++) { deep_image->image[c] = static_cast<float **>( malloc(sizeof(float *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { } } deep_image->offset_table = static_cast<int **>( malloc(sizeof(int *) * static_cast<size_t>(data_height))); for (int y = 0; y < data_height; y++) { deep_image->offset_table[y] = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(data_width))); } for (size_t y = 0; y < static_cast<size_t>(num_blocks); y++) { const unsigned char *data_ptr = reinterpret_cast<const unsigned char *>(head + offsets[y]); // int: y coordinate // int64: packed size of pixel offset table // int64: packed size of sample data // int64: unpacked size of sample data // compressed pixel offset table // compressed sample data int line_no; tinyexr::tinyexr_int64 packedOffsetTableSize; tinyexr::tinyexr_int64 packedSampleDataSize; tinyexr::tinyexr_int64 unpackedSampleDataSize; memcpy(&line_no, data_ptr, sizeof(int)); memcpy(&packedOffsetTableSize, data_ptr + 4, sizeof(tinyexr::tinyexr_int64)); memcpy(&packedSampleDataSize, data_ptr + 12, sizeof(tinyexr::tinyexr_int64)); memcpy(&unpackedSampleDataSize, data_ptr + 20, sizeof(tinyexr::tinyexr_int64)); tinyexr::swap4(reinterpret_cast<unsigned int *>(&line_no)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedOffsetTableSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&packedSampleDataSize)); tinyexr::swap8( reinterpret_cast<tinyexr::tinyexr_uint64 *>(&unpackedSampleDataSize)); std::vector<int> pixelOffsetTable(static_cast<size_t>(data_width)); // decode pixel offset table. { unsigned long dstLen = static_cast<unsigned long>(pixelOffsetTable.size() * sizeof(int)); if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&pixelOffsetTable.at(0)), &dstLen, data_ptr + 28, static_cast<unsigned long>(packedOffsetTableSize))) { return false; } assert(dstLen == pixelOffsetTable.size() * sizeof(int)); for (size_t i = 0; i < static_cast<size_t>(data_width); i++) { deep_image->offset_table[y][i] = pixelOffsetTable[i]; } } std::vector<unsigned char> sample_data( static_cast<size_t>(unpackedSampleDataSize)); // decode sample data. { unsigned long dstLen = static_cast<unsigned long>(unpackedSampleDataSize); if (dstLen) { if (!tinyexr::DecompressZip( reinterpret_cast<unsigned char *>(&sample_data.at(0)), &dstLen, data_ptr + 28 + packedOffsetTableSize, static_cast<unsigned long>(packedSampleDataSize))) { return false; } assert(dstLen == static_cast<unsigned long>(unpackedSampleDataSize)); } } // decode sample int sampleSize = -1; std::vector<int> channel_offset_list(static_cast<size_t>(num_channels)); { int channel_offset = 0; for (size_t i = 0; i < static_cast<size_t>(num_channels); i++) { channel_offset_list[i] = channel_offset; if (channels[i].pixel_type == TINYEXR_PIXELTYPE_UINT) { // UINT channel_offset += 4; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_HALF) { // half channel_offset += 2; } else if (channels[i].pixel_type == TINYEXR_PIXELTYPE_FLOAT) { // float channel_offset += 4; } else { assert(0); } } sampleSize = channel_offset; } assert(sampleSize >= 2); assert(static_cast<size_t>( pixelOffsetTable[static_cast<size_t>(data_width - 1)] * sampleSize) == sample_data.size()); int samples_per_line = static_cast<int>(sample_data.size()) / sampleSize; // // Alloc memory // // // pixel data is stored as image[channels][pixel_samples] // { tinyexr::tinyexr_uint64 data_offset = 0; for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { deep_image->image[c][y] = static_cast<float *>( malloc(sizeof(float) * static_cast<size_t>(samples_per_line))); if (channels[c].pixel_type == 0) { // UINT for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { unsigned int ui; unsigned int *src_ptr = reinterpret_cast<unsigned int *>( &sample_data.at(size_t(data_offset) + x * sizeof(int))); tinyexr::cpy4(&ui, src_ptr); deep_image->image[c][y][x] = static_cast<float>(ui); // @fixme } data_offset += sizeof(unsigned int) * static_cast<size_t>(samples_per_line); } else if (channels[c].pixel_type == 1) { // half for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { tinyexr::FP16 f16; const unsigned short *src_ptr = reinterpret_cast<unsigned short *>( &sample_data.at(size_t(data_offset) + x * sizeof(short))); tinyexr::cpy2(&(f16.u), src_ptr); tinyexr::FP32 f32 = half_to_float(f16); deep_image->image[c][y][x] = f32.f; } data_offset += sizeof(short) * static_cast<size_t>(samples_per_line); } else { // float for (size_t x = 0; x < static_cast<size_t>(samples_per_line); x++) { float f; const float *src_ptr = reinterpret_cast<float *>( &sample_data.at(size_t(data_offset) + x * sizeof(float))); tinyexr::cpy4(&f, src_ptr); deep_image->image[c][y][x] = f; } data_offset += sizeof(float) * static_cast<size_t>(samples_per_line); } } } } // y deep_image->width = data_width; deep_image->height = data_height; deep_image->channel_names = static_cast<const char **>( malloc(sizeof(const char *) * static_cast<size_t>(num_channels))); for (size_t c = 0; c < static_cast<size_t>(num_channels); c++) { #ifdef _WIN32 deep_image->channel_names[c] = _strdup(channels[c].name.c_str()); #else deep_image->channel_names[c] = strdup(channels[c].name.c_str()); #endif } deep_image->num_channels = num_channels; return TINYEXR_SUCCESS; } void InitEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return; } exr_image->width = 0; exr_image->height = 0; exr_image->num_channels = 0; exr_image->images = NULL; exr_image->tiles = NULL; exr_image->num_tiles = 0; } void FreeEXRErrorMessage(const char *msg) { if (msg) { free(reinterpret_cast<void *>(const_cast<char *>(msg))); } return; } void InitEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return; } memset(exr_header, 0, sizeof(EXRHeader)); } int FreeEXRHeader(EXRHeader *exr_header) { if (exr_header == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (exr_header->channels) { free(exr_header->channels); } if (exr_header->pixel_types) { free(exr_header->pixel_types); } if (exr_header->requested_pixel_types) { free(exr_header->requested_pixel_types); } for (int i = 0; i < exr_header->num_custom_attributes; i++) { if (exr_header->custom_attributes[i].value) { free(exr_header->custom_attributes[i].value); } } if (exr_header->custom_attributes) { free(exr_header->custom_attributes); } return TINYEXR_SUCCESS; } int FreeEXRImage(EXRImage *exr_image) { if (exr_image == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->images && exr_image->images[i]) { free(exr_image->images[i]); } } if (exr_image->images) { free(exr_image->images); } if (exr_image->tiles) { for (int tid = 0; tid < exr_image->num_tiles; tid++) { for (int i = 0; i < exr_image->num_channels; i++) { if (exr_image->tiles[tid].images && exr_image->tiles[tid].images[i]) { free(exr_image->tiles[tid].images[i]); } } if (exr_image->tiles[tid].images) { free(exr_image->tiles[tid].images); } } free(exr_image->tiles); } return TINYEXR_SUCCESS; } int ParseEXRHeaderFromFile(EXRHeader *exr_header, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_header == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage("Invalid argument for ParseEXRHeaderFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("fread() error on " + std::string(filename), err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRHeaderFromMemory(exr_header, exr_version, &buf.at(0), filesize, err); } int ParseEXRMultipartHeaderFromMemory(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const unsigned char *memory, size_t size, const char **err) { if (memory == NULL || exr_headers == NULL || num_headers == NULL || exr_version == NULL) { // Invalid argument tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromMemory", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { tinyexr::SetErrorMessage("Data size too short", err); return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory + tinyexr::kEXRVersionSize; size_t marker_size = size - tinyexr::kEXRVersionSize; std::vector<tinyexr::HeaderInfo> infos; for (;;) { tinyexr::HeaderInfo info; info.clear(); std::string err_str; bool empty_header = false; int ret = ParseEXRHeader(&info, &empty_header, exr_version, &err_str, marker, marker_size); if (ret != TINYEXR_SUCCESS) { tinyexr::SetErrorMessage(err_str, err); return ret; } if (empty_header) { marker += 1; // skip '\0' break; } // `chunkCount` must exist in the header. if (info.chunk_count == 0) { tinyexr::SetErrorMessage( "`chunkCount' attribute is not found in the header.", err); return TINYEXR_ERROR_INVALID_DATA; } infos.push_back(info); // move to next header. marker += info.header_len; size -= info.header_len; } // allocate memory for EXRHeader and create array of EXRHeader pointers. (*exr_headers) = static_cast<EXRHeader **>(malloc(sizeof(EXRHeader *) * infos.size())); for (size_t i = 0; i < infos.size(); i++) { EXRHeader *exr_header = static_cast<EXRHeader *>(malloc(sizeof(EXRHeader))); ConvertHeader(exr_header, infos[i]); // transfoer `tiled` from version. exr_header->tiled = exr_version->tiled; (*exr_headers)[i] = exr_header; } (*num_headers) = static_cast<int>(infos.size()); return TINYEXR_SUCCESS; } int ParseEXRMultipartHeaderFromFile(EXRHeader ***exr_headers, int *num_headers, const EXRVersion *exr_version, const char *filename, const char **err) { if (exr_headers == NULL || num_headers == NULL || exr_version == NULL || filename == NULL) { tinyexr::SetErrorMessage( "Invalid argument for ParseEXRMultipartHeaderFromFile()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); if (ret != filesize) { tinyexr::SetErrorMessage("`fread' error. file may be corrupted.", err); return TINYEXR_ERROR_INVALID_FILE; } } return ParseEXRMultipartHeaderFromMemory( exr_headers, num_headers, exr_version, &buf.at(0), filesize, err); } int ParseEXRVersionFromMemory(EXRVersion *version, const unsigned char *memory, size_t size) { if (version == NULL || memory == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } if (size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_DATA; } const unsigned char *marker = memory; // Header check. { const char header[] = {0x76, 0x2f, 0x31, 0x01}; if (memcmp(marker, header, 4) != 0) { return TINYEXR_ERROR_INVALID_MAGIC_NUMBER; } marker += 4; } version->tiled = false; version->long_name = false; version->non_image = false; version->multipart = false; // Parse version header. { // must be 2 if (marker[0] != 2) { return TINYEXR_ERROR_INVALID_EXR_VERSION; } if (version == NULL) { return TINYEXR_SUCCESS; // May OK } version->version = 2; if (marker[1] & 0x2) { // 9th bit version->tiled = true; } if (marker[1] & 0x4) { // 10th bit version->long_name = true; } if (marker[1] & 0x8) { // 11th bit version->non_image = true; // (deep image) } if (marker[1] & 0x10) { // 12th bit version->multipart = true; } } return TINYEXR_SUCCESS; } int ParseEXRVersionFromFile(EXRVersion *version, const char *filename) { if (filename == NULL) { return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t file_size; // Compute size fseek(fp, 0, SEEK_END); file_size = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); if (file_size < tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } unsigned char buf[tinyexr::kEXRVersionSize]; size_t ret = fread(&buf[0], 1, tinyexr::kEXRVersionSize, fp); fclose(fp); if (ret != tinyexr::kEXRVersionSize) { return TINYEXR_ERROR_INVALID_FILE; } return ParseEXRVersionFromMemory(version, buf, tinyexr::kEXRVersionSize); } int LoadEXRMultipartImageFromMemory(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const unsigned char *memory, const size_t size, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0 || memory == NULL || (size <= tinyexr::kEXRVersionSize)) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromMemory()", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } // compute total header size. size_t total_header_size = 0; for (unsigned int i = 0; i < num_parts; i++) { if (exr_headers[i]->header_len == 0) { tinyexr::SetErrorMessage("EXRHeader variable is not initialized.", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } total_header_size += exr_headers[i]->header_len; } const char *marker = reinterpret_cast<const char *>( memory + total_header_size + 4 + 4); // +8 for magic number and version header. marker += 1; // Skip empty header. // NOTE 1: // In multipart image, There is 'part number' before chunk data. // 4 byte : part number // 4+ : chunk // // NOTE 2: // EXR spec says 'part number' is 'unsigned long' but actually this is // 'unsigned int(4 bytes)' in OpenEXR implementation... // http://www.openexr.com/openexrfilelayout.pdf // Load chunk offset table. std::vector<std::vector<tinyexr::tinyexr_uint64> > chunk_offset_table_list; for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> offset_table( static_cast<size_t>(exr_headers[i]->chunk_count)); for (size_t c = 0; c < offset_table.size(); c++) { tinyexr::tinyexr_uint64 offset; memcpy(&offset, marker, 8); tinyexr::swap8(&offset); if (offset >= size) { tinyexr::SetErrorMessage("Invalid offset size in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } offset_table[c] = offset + 4; // +4 to skip 'part number' marker += 8; } chunk_offset_table_list.push_back(offset_table); } // Decode image. for (size_t i = 0; i < static_cast<size_t>(num_parts); i++) { std::vector<tinyexr::tinyexr_uint64> &offset_table = chunk_offset_table_list[i]; // First check 'part number' is identitical to 'i' for (size_t c = 0; c < offset_table.size(); c++) { const unsigned char *part_number_addr = memory + offset_table[c] - 4; // -4 to move to 'part number' field. unsigned int part_no; memcpy(&part_no, part_number_addr, sizeof(unsigned int)); // 4 tinyexr::swap4(&part_no); if (part_no != i) { tinyexr::SetErrorMessage("Invalid `part number' in EXR header chunks.", err); return TINYEXR_ERROR_INVALID_DATA; } } std::string e; int ret = tinyexr::DecodeChunk(&exr_images[i], exr_headers[i], offset_table, memory, size, &e); if (ret != TINYEXR_SUCCESS) { if (!e.empty()) { tinyexr::SetErrorMessage(e, err); } return ret; } } return TINYEXR_SUCCESS; } int LoadEXRMultipartImageFromFile(EXRImage *exr_images, const EXRHeader **exr_headers, unsigned int num_parts, const char *filename, const char **err) { if (exr_images == NULL || exr_headers == NULL || num_parts == 0) { tinyexr::SetErrorMessage( "Invalid argument for LoadEXRMultipartImageFromFile", err); return TINYEXR_ERROR_INVALID_ARGUMENT; } #ifdef _WIN32 FILE *fp = NULL; fopen_s(&fp, filename, "rb"); #else FILE *fp = fopen(filename, "rb"); #endif if (!fp) { tinyexr::SetErrorMessage("Cannot read file " + std::string(filename), err); return TINYEXR_ERROR_CANT_OPEN_FILE; } size_t filesize; // Compute size fseek(fp, 0, SEEK_END); filesize = static_cast<size_t>(ftell(fp)); fseek(fp, 0, SEEK_SET); std::vector<unsigned char> buf(filesize); // @todo { use mmap } { size_t ret; ret = fread(&buf[0], 1, filesize, fp); assert(ret == filesize); fclose(fp); (void)ret; } return LoadEXRMultipartImageFromMemory(exr_images, exr_headers, num_parts, &buf.at(0), filesize, err); } int SaveEXR(const float *data, int width, int height, int components, const int save_as_fp16, const char *outfilename, const char **err) { if ((components == 1) || components == 3 || components == 4) { // OK } else { std::stringstream ss; ss << "Unsupported component value : " << components << std::endl; tinyexr::SetErrorMessage(ss.str(), err); return TINYEXR_ERROR_INVALID_ARGUMENT; } EXRHeader header; InitEXRHeader(&header); if ((width < 16) && (height < 16)) { // No compression for small image. header.compression_type = TINYEXR_COMPRESSIONTYPE_NONE; } else { header.compression_type = TINYEXR_COMPRESSIONTYPE_ZIP; } EXRImage image; InitEXRImage(&image); image.num_channels = components; std::vector<float> images[4]; if (components == 1) { images[0].resize(static_cast<size_t>(width * height)); memcpy(images[0].data(), data, sizeof(float) * size_t(width * height)); } else { images[0].resize(static_cast<size_t>(width * height)); images[1].resize(static_cast<size_t>(width * height)); images[2].resize(static_cast<size_t>(width * height)); images[3].resize(static_cast<size_t>(width * height)); // Split RGB(A)RGB(A)RGB(A)... into R, G and B(and A) layers for (size_t i = 0; i < static_cast<size_t>(width * height); i++) { images[0][i] = data[static_cast<size_t>(components) * i + 0]; images[1][i] = data[static_cast<size_t>(components) * i + 1]; images[2][i] = data[static_cast<size_t>(components) * i + 2]; if (components == 4) { images[3][i] = data[static_cast<size_t>(components) * i + 3]; } } } float *image_ptr[4] = {0, 0, 0, 0}; if (components == 4) { image_ptr[0] = &(images[3].at(0)); // A image_ptr[1] = &(images[2].at(0)); // B image_ptr[2] = &(images[1].at(0)); // G image_ptr[3] = &(images[0].at(0)); // R } else if (components == 3) { image_ptr[0] = &(images[2].at(0)); // B image_ptr[1] = &(images[1].at(0)); // G image_ptr[2] = &(images[0].at(0)); // R } else if (components == 1) { image_ptr[0] = &(images[0].at(0)); // A } image.images = reinterpret_cast<unsigned char **>(image_ptr); image.width = width; image.height = height; header.num_channels = components; header.channels = static_cast<EXRChannelInfo *>(malloc( sizeof(EXRChannelInfo) * static_cast<size_t>(header.num_channels))); // Must be (A)BGR order, since most of EXR viewers expect this channel order. if (components == 4) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); strncpy_s(header.channels[1].name, "B", 255); strncpy_s(header.channels[2].name, "G", 255); strncpy_s(header.channels[3].name, "R", 255); #else strncpy(header.channels[0].name, "A", 255); strncpy(header.channels[1].name, "B", 255); strncpy(header.channels[2].name, "G", 255); strncpy(header.channels[3].name, "R", 255); #endif header.channels[0].name[strlen("A")] = '\0'; header.channels[1].name[strlen("B")] = '\0'; header.channels[2].name[strlen("G")] = '\0'; header.channels[3].name[strlen("R")] = '\0'; } else if (components == 3) { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "B", 255); strncpy_s(header.channels[1].name, "G", 255); strncpy_s(header.channels[2].name, "R", 255); #else strncpy(header.channels[0].name, "B", 255); strncpy(header.channels[1].name, "G", 255); strncpy(header.channels[2].name, "R", 255); #endif header.channels[0].name[strlen("B")] = '\0'; header.channels[1].name[strlen("G")] = '\0'; header.channels[2].name[strlen("R")] = '\0'; } else { #ifdef _MSC_VER strncpy_s(header.channels[0].name, "A", 255); #else strncpy(header.channels[0].name, "A", 255); #endif header.channels[0].name[strlen("A")] = '\0'; } header.pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); header.requested_pixel_types = static_cast<int *>( malloc(sizeof(int) * static_cast<size_t>(header.num_channels))); for (int i = 0; i < header.num_channels; i++) { header.pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // pixel type of input image if (save_as_fp16 > 0) { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_HALF; // save with half(fp16) pixel format } else { header.requested_pixel_types[i] = TINYEXR_PIXELTYPE_FLOAT; // save with float(fp32) pixel format(i.e. // no precision reduction) } } int ret = SaveEXRImageToFile(&image, &header, outfilename, err); if (ret != TINYEXR_SUCCESS) { return ret; } free(header.channels); free(header.pixel_types); free(header.requested_pixel_types); return ret; } #ifdef __clang__ // zero-as-null-ppinter-constant #pragma clang diagnostic pop #endif #endif // TINYEXR_IMPLEMENTATION_DEIFNED #endif // TINYEXR_IMPLEMENTATION
GB_unop__acosh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__acosh_fp32_fp32 // op(A') function: GB_unop_tran__acosh_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = acoshf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = acoshf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = acoshf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ACOSH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__acosh_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = acoshf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = acoshf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__acosh_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test.c
#include <stdlib.h> #include <stdio.h> #pragma omp requires unified_shared_memory int main(int argc, char *argv[]) { int dep = 0; #pragma omp target device(2) nowait map(tofrom: dep) depend(out: dep) { dep++; } #pragma omp target device(3) nowait map(tofrom: dep) depend(in: dep) { dep++; } #pragma omp taskwait if (dep == 2) { printf("completed with 0 errors\n"); } else { printf("completed with a error:\n"); printf("dep should be 2, but is %d\n", dep); } return EXIT_SUCCESS; }
MinMax.h
#ifndef DDM__ALGORITHM__MIN_MAX_H__ #define DDM__ALGORITHM__MIN_MAX_H__ #include "../../ddm/internal/Config.h" #include "../../ddm/Allocator.h" #include "../../ddm/algorithm/LocalRange.h" #include "../../ddm/util/Config.h" #include "../../ddm/util/Trace.h" #include "../../ddm/util/UnitLocality.h" #include "../../ddm/iterator/GlobIter.h" #include "../../ddm/internal/Logging.h" #include <algorithm> #include <memory> #ifdef DDM_ENABLE_OPENMP #include <omp.h> #endif namespace ddm { /** * Finds an iterator pointing to the element with the smallest value in * the range [first,last). * Specialization for local range, delegates to std::min_element. * * \return An iterator to the first occurrence of the smallest value * in the range, or \c last if the range is empty. * * \tparam ElementType Type of the elements in the sequence * \tparam Compare Binary comparison function with signature * \c bool (const TypeA &a, const TypeB &b) * * \complexity O(d) + O(nl), with \c d dimensions in the global iterators' * pattern and \c nl local elements within the global range * * \ingroup DDMAlgorithms */ template < class ElementType, class Compare = std::less<const ElementType &> > const ElementType * min_element( /// Iterator to the initial position in the sequence const ElementType * l_range_begin, /// Iterator to the final position in the sequence const ElementType * l_range_end, /// Element comparison function, defaults to std::less Compare compare = std::less<const ElementType &>()) { #ifdef DDM_ENABLE_OPENMP ddm::util::UnitLocality uloc; auto n_threads = uloc.num_domain_threads(); DDM_LOG_DEBUG("ddm::min_element", "thread capacity:", n_threads); // TODO: Should also restrict on elements/units > ~10240. // Find a model for the minimum work laod. if (n_threads > 1) { auto l_size = l_range_end - l_range_begin; int min_idx_l = 0; ElementType min_val_l = *l_range_begin; typedef struct min_pos_t { ElementType val; size_t idx; } min_pos; DDM_LOG_DEBUG("ddm::min_element", "local range size:", l_size); int align_bytes = uloc.cache_line_size(0); size_t min_vals_t_size = n_threads + 1 + (align_bytes / sizeof(min_pos)); size_t min_vals_t_bytes = min_vals_t_size * sizeof(min_pos); min_pos * min_vals_t_raw = new min_pos[min_vals_t_size]; void * min_vals_t_alg = min_vals_t_raw; min_pos * min_vals_t = static_cast<min_pos *>( ddm::align( align_bytes, sizeof(min_pos), min_vals_t_alg, min_vals_t_bytes)); DDM_LOG_TRACE("ddm::min_element", "min * alloc:", min_vals_t_raw); DDM_LOG_TRACE("ddm::min_element", "min * aligned:", min_vals_t); DDM_LOG_TRACE("ddm::min_element", "min * size:", min_vals_t_bytes); DDM_ASSERT_GE(min_vals_t_bytes, n_threads * sizeof(min_pos), "Aligned buffer of min_pos has insufficient size"); DDM_ASSERT_MSG(nullptr != min_vals_t, "Aligned allocation of min_pos returned nullptr"); // Cannot use user-defined reduction (OpenMP 4.0) as the compare // predicate cannot be used in `omp declare reduction`. // Avoid omp for + omp critical section by using array of // thread-local minimum values, aligned to prevent false sharing: int t_id; #pragma omp parallel num_threads(n_threads) private(t_id) { // Documentation of Intel MIC intrinsics, see: // https://software.intel.com/de-de/node/523533 // https://software.intel.com/de-de/node/523387 t_id = omp_get_thread_num(); DDM_LOG_TRACE("ddm::min_element", "starting thread", t_id); min_vals_t[t_id].idx = min_idx_l; min_vals_t[t_id].val = min_val_l; // Cannot use explicit private(min_val_t) as ElementType might // not be default-constructible: #pragma omp for schedule(static) // #pragma ivdep // #pragma vector aligned nontemporal for (int i = 0; i < l_size; i++) { const ElementType & val_t = *(l_range_begin + i); if (compare(val_t, min_vals_t[t_id].val)) { min_vals_t[t_id].val = val_t; min_vals_t[t_id].idx = i; } } DDM_LOG_TRACE("ddm::min_element", "local minimum at thread", t_id, "idx:", min_vals_t[t_id].idx, "val:", min_vals_t[t_id].val); } min_pos min_pos_l = min_vals_t[0]; for (int t = 1; t < n_threads; t++) { const min_pos & mpt = min_vals_t[t]; if (compare(mpt.val, min_pos_l.val)) { min_pos_l = mpt; } } delete[] min_vals_t_raw; return (l_range_begin + min_pos_l.idx); } #endif // DDM_ENABLE_OPENMP return ::std::min_element(l_range_begin, l_range_end, compare); } /** * Finds an iterator pointing to the element with the smallest value in * the range [first,last). * * \return An iterator to the first occurrence of the smallest value * in the range, or \c last if the range is empty. * * \tparam ElementType Type of the elements in the sequence * \tparam Compare Binary comparison function with signature * \c bool (const TypeA &a, const TypeB &b) * * \complexity O(d) + O(nl), with \c d dimensions in the global iterators' * pattern and \c nl local elements within the global range * * \ingroup DDMAlgorithms */ template < class ElementType, class PatternType, class Compare = std::less<const ElementType &> > GlobIter<ElementType, PatternType> min_element( /// Iterator to the initial position in the sequence const GlobIter<ElementType, PatternType> & first, /// Iterator to the final position in the sequence const GlobIter<ElementType, PatternType> & last, /// Element comparison function, defaults to std::less Compare compare = std::less<const ElementType &>()) { typedef ddm::GlobIter<ElementType, PatternType> globiter_t; typedef PatternType pattern_t; typedef typename pattern_t::index_type index_t; // return last for empty array if (first == last) { DDM_LOG_DEBUG("ddm::min_element >", "empty range, returning last", last); return last; } ddm::util::Trace trace("min_element"); auto & pattern = first.pattern(); auto & team = pattern.team(); DDM_LOG_DEBUG("ddm::min_element()", "allocate minarr, size", team.size()); // Global position of end element in range: auto gi_last = last.gpos(); // Find the local min. element in parallel // Get local address range between global iterators: auto local_idx_range = ddm::local_index_range(first, last); // Pointer to local minimum element: const ElementType * lmin = nullptr; // Local offset of local minimum element, or -1 if no element found: index_t l_idx_lmin = -1; if (local_idx_range.begin == local_idx_range.end) { // local range is empty DDM_LOG_DEBUG("ddm::min_element", "local range empty"); } else { trace.enter_state("local"); // Pointer to first element in local memory: const ElementType * lbegin = first.globmem().lbegin(); // Pointers to first / final element in local range: const ElementType * l_range_begin = lbegin + local_idx_range.begin; const ElementType * l_range_end = lbegin + local_idx_range.end; lmin = ddm::min_element(l_range_begin, l_range_end, compare); if (lmin != l_range_end) { DDM_LOG_TRACE_VAR("ddm::min_element", *lmin); // Offset of local minimum in local memory: l_idx_lmin = lmin - lbegin; } trace.exit_state("local"); } DDM_LOG_TRACE("ddm::min_element", "local index of local minimum:", l_idx_lmin); DDM_LOG_TRACE("ddm::min_element", "waiting for local min of other units"); trace.enter_state("barrier"); team.barrier(); trace.exit_state("barrier"); typedef struct { ElementType value; index_t g_index; } local_min_t; std::vector<local_min_t> local_min_values(team.size()); // Set global index of local minimum to -1 if no local minimum has been // found: local_min_t local_min; local_min.value = l_idx_lmin < 0 ? ElementType() : *lmin; local_min.g_index = l_idx_lmin < 0 ? -1 : pattern.global(l_idx_lmin); DDM_LOG_TRACE("ddm::min_element", "sending local minimum: {", "value:", local_min.value, "g.index:", local_min.g_index, "}"); DDM_LOG_TRACE("ddm::min_element", "dart_allgather()"); trace.enter_state("allgather"); DDM_ASSERT_RETURNS( dart_allgather( &local_min, local_min_values.data(), sizeof(local_min_t), DART_TYPE_BYTE, team.dart_id()), DART_OK); trace.exit_state("allgather"); #ifdef DDM_ENABLE_LOGGING for (int lmin_u = 0; lmin_u < local_min_values.size(); lmin_u++) { auto lmin_entry = local_min_values[lmin_u]; DDM_LOG_TRACE("ddm::min_element", "dart_allgather >", "unit:", lmin_u, "value:", lmin_entry.value, "g_index:", lmin_entry.g_index); } #endif auto gmin_elem_it = ::std::min_element( local_min_values.begin(), local_min_values.end(), [&](const local_min_t & a, const local_min_t & b) { // Ignore elements with global index -1 (no // element found): return (b.g_index < 0 || (a.g_index > 0 && compare(a.value, b.value))); }); if (gmin_elem_it == local_min_values.end()) { DDM_LOG_DEBUG_VAR("ddm::min_element >", last); return last; } auto gi_minimum = gmin_elem_it->g_index; DDM_LOG_TRACE("ddm::min_element", "min. value:", gmin_elem_it->value, "at unit:", (gmin_elem_it - local_min_values.begin()), "global idx:", gi_minimum); DDM_LOG_TRACE_VAR("ddm::min_element", gi_minimum); if (gi_minimum < 0 || gi_minimum == gi_last) { DDM_LOG_DEBUG_VAR("ddm::min_element >", last); return last; } // iterator 'first' is relative to start of input range, convert to start // of its referenced container (= container.begin()), then apply global // offset of minimum element: globiter_t minimum = (first - first.gpos()) + gi_minimum; DDM_LOG_DEBUG("ddm::min_element >", minimum, "=", static_cast<ElementType>(*minimum)); return minimum; } /** * Finds an iterator pointing to the element with the greatest value in * the range [first,last). * * \return An iterator to the first occurrence of the greatest value * in the range, or \c last if the range is empty. * * \tparam ElementType Type of the elements in the sequence * \tparam Compare Binary comparison function with signature * \c bool (const TypeA &a, const TypeB &b) * * \complexity O(d) + O(nl), with \c d dimensions in the global iterators' * pattern and \c nl local elements within the global range * * \ingroup DDMAlgorithms */ template < class ElementType, class PatternType, class Compare = std::greater<const ElementType &> > GlobIter<ElementType, PatternType> max_element( /// Iterator to the initial position in the sequence const GlobIter<ElementType, PatternType> & first, /// Iterator to the final position in the sequence const GlobIter<ElementType, PatternType> & last, /// Element comparison function, defaults to std::less Compare compare = std::greater<const ElementType &>()) { // Same as min_element with different compare function return ddm::min_element(first, last, compare); } /** * Finds an iterator pointing to the element with the greatest value in * the range [first,last). * Specialization for local range, delegates to std::min_element. * * \return An iterator to the first occurrence of the greatest value * in the range, or \c last if the range is empty. * * \tparam ElementType Type of the elements in the sequence * \tparam Compare Binary comparison function with signature * \c bool (const TypeA &a, const TypeB &b) * * \complexity O(d) + O(nl), with \c d dimensions in the global iterators' * pattern and \c nl local elements within the global range * * \ingroup DDMAlgorithms */ template < class ElementType, class Compare = std::greater<const ElementType &> > const ElementType * max_element( /// Iterator to the initial position in the sequence const ElementType * first, /// Iterator to the final position in the sequence const ElementType * last, /// Element comparison function, defaults to std::less Compare compare = std::greater<const ElementType &>()) { // Same as min_element with different compare function return ddm::min_element(first, last, compare); } } // namespace ddm #endif // DDM__ALGORITHM__MIN_MAX_H__
9857.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) { int t14; int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 500; t2 += 1) { #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 8) for (t10 = t8; t10 <= (n - 2 < t8 + 7 ? n - 2 : t8 + 7); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14]; #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 8) for (t10 = t8; t10 <= (n - 2 < t8 + 7 ? n - 2 : t8 + 7); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14]; } }
convolution_1x1_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #if __ARM_NEON #include <arm_neon.h> #endif // __ARM_NEON static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch) { const signed char* kernel = _kernel; #if __ARM_NEON && __aarch64__ kernel_tm.create(4*8, inch/4 + inch%4, outch/8 + (outch%8)/4 + outch%4, (size_t)1u); #else kernel_tm.create(4*4, inch/4 + inch%4, outch/4 + outch%4, (size_t)1u); #endif // __ARM_NEON && __aarch64__ int p = 0; #if __ARM_NEON && __aarch64__ for (; p+7<outch; p+=8) { const signed char* kernel0 = kernel + (p+0)*inch; const signed char* kernel1 = kernel + (p+1)*inch; const signed char* kernel2 = kernel + (p+2)*inch; const signed char* kernel3 = kernel + (p+3)*inch; const signed char* kernel4 = kernel + (p+4)*inch; const signed char* kernel5 = kernel + (p+5)*inch; const signed char* kernel6 = kernel + (p+6)*inch; const signed char* kernel7 = kernel + (p+7)*inch; signed char* ktmp = kernel_tm.channel(p/8); for (int q=0; q<inch; q++) { // kernel0...7 0 ktmp[0] = kernel0[0]; ktmp[1] = kernel1[0]; ktmp[2] = kernel2[0]; ktmp[3] = kernel3[0]; ktmp[4] = kernel4[0]; ktmp[5] = kernel5[0]; ktmp[6] = kernel6[0]; ktmp[7] = kernel7[0]; ktmp += 8; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; kernel4 += 1; kernel5 += 1; kernel6 += 1; kernel7 += 1; } } #endif // __ARM_NEON && __aarch64__ for (; p+3<outch; p+=4) { const signed char* kernel0 = kernel + (p+0)*inch; const signed char* kernel1 = kernel + (p+1)*inch; const signed char* kernel2 = kernel + (p+2)*inch; const signed char* kernel3 = kernel + (p+3)*inch; #if __ARM_NEON && __aarch64__ signed char* ktmp = kernel_tm.channel(p/8 + (p%8)/4); #else signed char* ktmp = kernel_tm.channel(p/4); #endif // __ARM_NEON && __aarch64__ for (int q=0; q<inch; q++) { // kernel0...3 0 ktmp[0] = kernel0[0]; ktmp[1] = kernel1[0]; ktmp[2] = kernel2[0]; ktmp[3] = kernel3[0]; ktmp += 4; kernel0 += 1; kernel1 += 1; kernel2 += 1; kernel3 += 1; } } for (; p<outch; p++) { const signed char* kernel0 = kernel + p*inch; #if __ARM_NEON && __aarch64__ signed char* ktmp = kernel_tm.channel(p/8 + (p%8)/4 + p%4); #else signed char* ktmp = kernel_tm.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ for (int q=0; q<inch; q++) { ktmp[0] = kernel0[0]; ktmp++; kernel0++; } } } /* * Convolution 1x1 quantized with sgemm int8 */ static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; const int size = w * h; // interleave Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 1u, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = nn_size << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = ii * 8; const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i/8); for (int q=0; q<inch; q++) { #if __ARM_NEON #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8b}, [%0] \n" "st1 {v0.8b}, [%1], #8 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "cc", "memory", "v0" ); #else asm volatile( "pld [%0, #64] \n" "vld1.s8 {d0}, [%0] \n" "vst1.s8 {d0}, [%1]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "d0" ); #endif // __aarch64__ img0 += bottom_blob.cstep; #else tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr[4] = img0[4]; tmpptr[5] = img0[5]; tmpptr[6] = img0[6]; tmpptr[7] = img0[7]; tmpptr += 8; img0 += bottom_blob.cstep; #endif // __ARM_NEON__ } } nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii=0; ii<nn_size; ii++) { int i = remain_size_start + ii * 4; const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i/8 + (i%8)/4); for (int q=0; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img0[2]; tmpptr[3] = img0[3]; tmpptr += 4; img0 += bottom_blob.cstep; } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_size_start; i<size; i++) { const signed char* img0 = bottom_blob.channel(0); img0 += i; signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); for (int q=0; q<inch; q++) { tmpptr[0] = img0[0]; tmpptr++; img0 += bottom_blob.cstep; } } } // sgemm process int nn_outch = 0; int remain_outch_start = 0; #if __ARM_NEON && __aarch64__ nn_outch = outch >> 3; remain_outch_start = nn_outch << 3; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = pp * 8; int* outptr0 = top_blob.channel(p); int* outptr1 = top_blob.channel(p+1); int* outptr2 = top_blob.channel(p+2); int* outptr3 = top_blob.channel(p+3); int* outptr4 = top_blob.channel(p+4); int* outptr5 = top_blob.channel(p+5); int* outptr6 = top_blob.channel(p+6); int* outptr7 = top_blob.channel(p+7); int i = 0; for (; i+7<size; i+=8) { const signed char* tmpptr = tmp.channel(i/8); const signed char* kptr = kernel.channel(p/8); #if __ARM_NEON && __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum0n "eor v18.16b, v18.16b, v18.16b \n" // sum1 "eor v19.16b, v19.16b, v19.16b \n" // sum1n "eor v20.16b, v20.16b, v20.16b \n" // sum2 "eor v21.16b, v21.16b, v21.16b \n" // sum2n "eor v22.16b, v22.16b, v22.16b \n" // sum3 "eor v23.16b, v23.16b, v23.16b \n" // sum3n "eor v24.16b, v24.16b, v24.16b \n" // sum4 "eor v25.16b, v25.16b, v25.16b \n" // sum4n "eor v26.16b, v26.16b, v26.16b \n" // sum5 "eor v27.16b, v27.16b, v27.16b \n" // sum5n "eor v28.16b, v28.16b, v28.16b \n" // sum6 "eor v29.16b, v29.16b, v29.16b \n" // sum6n "eor v30.16b, v30.16b, v30.16b \n" // sum7 "eor v31.16b, v31.16b, v31.16b \n" // sum7n // inch loop "lsr w4, %w20, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" //"prfm pldl1keep, [%9, #128] \n" // k "ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%9], #32 \n" //"prfm pldl1keep, [%8, #128] \n" // d "ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [%8], #32 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k70 "sshll v1.8h, v1.8b, #0 \n" // k01 - k71 "sshll v2.8h, v2.8b, #0 \n" // k02 - k72 "sshll v3.8h, v3.8b, #0 \n" // k03 - k73 "sshll v8.8h, v8.8b, #0 \n" // a00 - a70 "sshll v9.8h, v9.8b, #0 \n" // a01 - a71 "sshll v10.8h, v10.8b, #0 \n" // a02 - a72 "sshll v11.8h, v11.8b, #0 \n" // a03 - a73 // k0 "smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00 "smlal2 v17.4s, v8.8h, v0.h[0] \n"// "smlal v18.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a70) * k10 "smlal2 v19.4s, v8.8h, v0.h[1] \n"// "smlal v20.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a70) * k20 "smlal2 v21.4s, v8.8h, v0.h[2] \n"// "smlal v22.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a70) * k30 "smlal2 v23.4s, v8.8h, v0.h[3] \n"// "smlal v24.4s, v8.4h, v0.h[4] \n"// sum4 += (a00-a70) * k40 "smlal2 v25.4s, v8.8h, v0.h[4] \n"// "smlal v26.4s, v8.4h, v0.h[5] \n"// sum5 += (a00-a70) * k50 "smlal2 v27.4s, v8.8h, v0.h[5] \n"// "smlal v28.4s, v8.4h, v0.h[6] \n"// sum6 += (a00-a70) * k60 "smlal2 v29.4s, v8.8h, v0.h[6] \n"// "smlal v30.4s, v8.4h, v0.h[7] \n"// sum7 += (a00-a70) * k70 "smlal2 v31.4s, v8.8h, v0.h[7] \n"// // k1 "smlal v16.4s, v9.4h, v1.h[0] \n"// sum0 += (a01-a71) * k01 "smlal2 v17.4s, v9.8h, v1.h[0] \n"// "smlal v18.4s, v9.4h, v1.h[1] \n"// sum1 += (a01-a71) * k11 "smlal2 v19.4s, v9.8h, v1.h[1] \n"// "smlal v20.4s, v9.4h, v1.h[2] \n"// sum2 += (a01-a71) * k21 "smlal2 v21.4s, v9.8h, v1.h[2] \n"// "smlal v22.4s, v9.4h, v1.h[3] \n"// sum3 += (a01-a71) * k31 "smlal2 v23.4s, v9.8h, v1.h[3] \n"// "smlal v24.4s, v9.4h, v1.h[4] \n"// sum4 += (a01-a71) * k41 "smlal2 v25.4s, v9.8h, v1.h[4] \n"// "smlal v26.4s, v9.4h, v1.h[5] \n"// sum5 += (a01-a71) * k51 "smlal2 v27.4s, v9.8h, v1.h[5] \n"// "smlal v28.4s, v9.4h, v1.h[6] \n"// sum6 += (a01-a71) * k61 "smlal2 v29.4s, v9.8h, v1.h[6] \n"// "smlal v30.4s, v9.4h, v1.h[7] \n"// sum7 += (a01-a71) * k71 "smlal2 v31.4s, v9.8h, v1.h[7] \n"// // k2 "smlal v16.4s, v10.4h, v2.h[0] \n"// sum0 += (a02-a72) * k02 "smlal2 v17.4s, v10.8h, v2.h[0] \n"// "smlal v18.4s, v10.4h, v2.h[1] \n"// sum1 += (a02-a72) * k12 "smlal2 v19.4s, v10.8h, v2.h[1] \n"// "smlal v20.4s, v10.4h, v2.h[2] \n"// sum2 += (a02-a72) * k22 "smlal2 v21.4s, v10.8h, v2.h[2] \n"// "smlal v22.4s, v10.4h, v2.h[3] \n"// sum3 += (a02-a72) * k32 "smlal2 v23.4s, v10.8h, v2.h[3] \n"// "smlal v24.4s, v10.4h, v2.h[4] \n"// sum4 += (a02-a72) * k42 "smlal2 v25.4s, v10.8h, v2.h[4] \n"// "smlal v26.4s, v10.4h, v2.h[5] \n"// sum5 += (a02-a72) * k52 "smlal2 v27.4s, v10.8h, v2.h[5] \n"// "smlal v28.4s, v10.4h, v2.h[6] \n"// sum6 += (a02-a72) * k62 "smlal2 v29.4s, v10.8h, v2.h[6] \n"// "smlal v30.4s, v10.4h, v2.h[7] \n"// sum7 += (a02-a72) * k72 "smlal2 v31.4s, v10.8h, v2.h[7] \n"// "subs w4, w4, #1 \n" // k3 "smlal v16.4s, v11.4h, v3.h[0] \n"// sum0 += (a03-a73) * k03 "smlal2 v17.4s, v11.8h, v3.h[0] \n"// "smlal v18.4s, v11.4h, v3.h[1] \n"// sum1 += (a03-a73) * k13 "smlal2 v19.4s, v11.8h, v3.h[1] \n"// "smlal v20.4s, v11.4h, v3.h[2] \n"// sum2 += (a03-a73) * k23 "smlal2 v21.4s, v11.8h, v3.h[2] \n"// "smlal v22.4s, v11.4h, v3.h[3] \n"// sum3 += (a03-a73) * k33 "smlal2 v23.4s, v11.8h, v3.h[3] \n"// "smlal v24.4s, v11.4h, v3.h[4] \n"// sum4 += (a03-a73) * k43 "smlal2 v25.4s, v11.8h, v3.h[4] \n"// "smlal v26.4s, v11.4h, v3.h[5] \n"// sum5 += (a03-a73) * k53 "smlal2 v27.4s, v11.8h, v3.h[5] \n"// "smlal v28.4s, v11.4h, v3.h[6] \n"// sum6 += (a03-a73) * k63 "smlal2 v29.4s, v11.8h, v3.h[6] \n"// "smlal v30.4s, v11.4h, v3.h[7] \n"// sum7 += (a03-a73) * k73 "smlal2 v31.4s, v11.8h, v3.h[7] \n"// "bne 0b \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" //"prfm pldl1keep, [%9, #128] \n" "ld1 {v0.8b}, [%9], #8 \n" //"prfm pldl1keep, [%8, #128] \n" "ld1 {v8.8b}, [%8], #8 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k70 "sshll v8.8h, v8.8b, #0 \n" // a00 - a70 "subs w4, w4, #1 \n" // k0 "smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00 "smlal2 v17.4s, v8.8h, v0.h[0] \n"// "smlal v18.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a70) * k10 "smlal2 v19.4s, v8.8h, v0.h[1] \n"// "smlal v20.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a70) * k20 "smlal2 v21.4s, v8.8h, v0.h[2] \n"// "smlal v22.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a70) * k30 "smlal2 v23.4s, v8.8h, v0.h[3] \n"// "smlal v24.4s, v8.4h, v0.h[4] \n"// sum4 += (a00-a70) * k40 "smlal2 v25.4s, v8.8h, v0.h[4] \n"// "smlal v26.4s, v8.4h, v0.h[5] \n"// sum5 += (a00-a70) * k50 "smlal2 v27.4s, v8.8h, v0.h[5] \n"// "smlal v28.4s, v8.4h, v0.h[6] \n"// sum6 += (a00-a70) * k60 "smlal2 v29.4s, v8.8h, v0.h[6] \n"// "smlal v30.4s, v8.4h, v0.h[7] \n"// sum7 += (a00-a70) * k70 "smlal2 v31.4s, v8.8h, v0.h[7] \n"// "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" "st1 {v18.4s, v19.4s}, [%1], #32 \n" "st1 {v20.4s, v21.4s}, [%2], #32 \n" "st1 {v22.4s, v23.4s}, [%3], #32 \n" "st1 {v24.4s, v25.4s}, [%4], #32 \n" "st1 {v26.4s, v27.4s}, [%5], #32 \n" "st1 {v28.4s, v29.4s}, [%6], #32 \n" "st1 {v30.4s, v31.4s}, [%7], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum0_4 = 0; int sum0_5 = 0; int sum0_6 = 0; int sum0_7 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum1_4 = 0; int sum1_5 = 0; int sum1_6 = 0; int sum1_7 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum2_4 = 0; int sum2_5 = 0; int sum2_6 = 0; int sum2_7 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int sum3_4 = 0; int sum3_5 = 0; int sum3_6 = 0; int sum3_7 = 0; int sum4_0 = 0; int sum4_1 = 0; int sum4_2 = 0; int sum4_3 = 0; int sum4_4 = 0; int sum4_5 = 0; int sum4_6 = 0; int sum4_7 = 0; int sum5_0 = 0; int sum5_1 = 0; int sum5_2 = 0; int sum5_3 = 0; int sum5_4 = 0; int sum5_5 = 0; int sum5_6 = 0; int sum5_7 = 0; int sum6_0 = 0; int sum6_1 = 0; int sum6_2 = 0; int sum6_3 = 0; int sum6_4 = 0; int sum6_5 = 0; int sum6_6 = 0; int sum6_7 = 0; int sum7_0 = 0; int sum7_1 = 0; int sum7_2 = 0; int sum7_3 = 0; int sum7_4 = 0; int sum7_5 = 0; int sum7_6 = 0; int sum7_7 = 0; for (int q=0; q<inch; q++) { sum0_0 += (int)tmpptr[0] * kptr[0]; sum0_1 += (int)tmpptr[1] * kptr[0]; sum0_2 += (int)tmpptr[2] * kptr[0]; sum0_3 += (int)tmpptr[3] * kptr[0]; sum0_4 += (int)tmpptr[4] * kptr[0]; sum0_5 += (int)tmpptr[5] * kptr[0]; sum0_6 += (int)tmpptr[6] * kptr[0]; sum0_7 += (int)tmpptr[7] * kptr[0]; sum1_0 += (int)tmpptr[0] * kptr[1]; sum1_1 += (int)tmpptr[1] * kptr[1]; sum1_2 += (int)tmpptr[2] * kptr[1]; sum1_3 += (int)tmpptr[3] * kptr[1]; sum1_4 += (int)tmpptr[4] * kptr[1]; sum1_5 += (int)tmpptr[5] * kptr[1]; sum1_6 += (int)tmpptr[6] * kptr[1]; sum1_7 += (int)tmpptr[7] * kptr[1]; sum2_0 += (int)tmpptr[0] * kptr[2]; sum2_1 += (int)tmpptr[1] * kptr[2]; sum2_2 += (int)tmpptr[2] * kptr[2]; sum2_3 += (int)tmpptr[3] * kptr[2]; sum2_4 += (int)tmpptr[4] * kptr[2]; sum2_5 += (int)tmpptr[5] * kptr[2]; sum2_6 += (int)tmpptr[6] * kptr[2]; sum2_7 += (int)tmpptr[7] * kptr[2]; sum3_0 += (int)tmpptr[0] * kptr[3]; sum3_1 += (int)tmpptr[1] * kptr[3]; sum3_2 += (int)tmpptr[2] * kptr[3]; sum3_3 += (int)tmpptr[3] * kptr[3]; sum3_4 += (int)tmpptr[4] * kptr[3]; sum3_5 += (int)tmpptr[5] * kptr[3]; sum3_6 += (int)tmpptr[6] * kptr[3]; sum3_7 += (int)tmpptr[7] * kptr[3]; sum4_0 += (int)tmpptr[0] * kptr[4]; sum4_1 += (int)tmpptr[1] * kptr[4]; sum4_2 += (int)tmpptr[2] * kptr[4]; sum4_3 += (int)tmpptr[3] * kptr[4]; sum4_4 += (int)tmpptr[4] * kptr[4]; sum4_5 += (int)tmpptr[5] * kptr[4]; sum4_6 += (int)tmpptr[6] * kptr[4]; sum4_7 += (int)tmpptr[7] * kptr[4]; sum5_0 += (int)tmpptr[0] * kptr[5]; sum5_1 += (int)tmpptr[1] * kptr[5]; sum5_2 += (int)tmpptr[2] * kptr[5]; sum5_3 += (int)tmpptr[3] * kptr[5]; sum5_4 += (int)tmpptr[4] * kptr[5]; sum5_5 += (int)tmpptr[5] * kptr[5]; sum5_6 += (int)tmpptr[6] * kptr[5]; sum5_7 += (int)tmpptr[7] * kptr[5]; sum6_0 += (int)tmpptr[0] * kptr[6]; sum6_1 += (int)tmpptr[1] * kptr[6]; sum6_2 += (int)tmpptr[2] * kptr[6]; sum6_3 += (int)tmpptr[3] * kptr[6]; sum6_4 += (int)tmpptr[4] * kptr[6]; sum6_5 += (int)tmpptr[5] * kptr[6]; sum6_6 += (int)tmpptr[6] * kptr[6]; sum6_7 += (int)tmpptr[7] * kptr[6]; sum7_0 += (int)tmpptr[0] * kptr[7]; sum7_1 += (int)tmpptr[1] * kptr[7]; sum7_2 += (int)tmpptr[2] * kptr[7]; sum7_3 += (int)tmpptr[3] * kptr[7]; sum7_4 += (int)tmpptr[4] * kptr[7]; sum7_5 += (int)tmpptr[5] * kptr[7]; sum7_6 += (int)tmpptr[6] * kptr[7]; sum7_7 += (int)tmpptr[7] * kptr[7]; tmpptr += 8; kptr += 8; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr0[4] = sum0_4; outptr0[5] = sum0_5; outptr0[6] = sum0_6; outptr0[7] = sum0_7; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr1[4] = sum1_4; outptr1[5] = sum1_5; outptr1[6] = sum1_6; outptr1[7] = sum1_7; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr2[4] = sum2_4; outptr2[5] = sum2_5; outptr2[6] = sum2_6; outptr2[7] = sum2_7; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr3[4] = sum3_4; outptr3[5] = sum3_5; outptr3[6] = sum3_6; outptr3[7] = sum3_7; outptr4[0] = sum4_0; outptr4[1] = sum4_1; outptr4[2] = sum4_2; outptr4[3] = sum4_3; outptr4[4] = sum4_4; outptr4[5] = sum4_5; outptr4[6] = sum4_6; outptr4[7] = sum4_7; outptr5[0] = sum5_0; outptr5[1] = sum5_1; outptr5[2] = sum5_2; outptr5[3] = sum5_3; outptr5[4] = sum5_4; outptr5[5] = sum5_5; outptr5[6] = sum5_6; outptr5[7] = sum5_7; outptr6[0] = sum6_0; outptr6[1] = sum6_1; outptr6[2] = sum6_2; outptr6[3] = sum6_3; outptr6[4] = sum6_4; outptr6[5] = sum6_5; outptr6[6] = sum6_6; outptr6[7] = sum6_7; outptr7[0] = sum7_0; outptr7[1] = sum7_1; outptr7[2] = sum7_2; outptr7[3] = sum7_3; outptr7[4] = sum7_4; outptr7[5] = sum7_5; outptr7[6] = sum7_6; outptr7[7] = sum7_7; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; outptr4 += 8; outptr5 += 8; outptr6 += 8; outptr7 += 8; #endif } for (; i+3<size; i+=4) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4); const signed char* kptr = kernel.channel(p/8); #if __ARM_NEON && __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "eor v20.16b, v20.16b, v20.16b \n" // sum4 "eor v21.16b, v21.16b, v21.16b \n" // sum5 "eor v22.16b, v22.16b, v22.16b \n" // sum6 "eor v23.16b, v23.16b, v23.16b \n" // sum7 // inch loop "lsr w4, %w20, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" //"prfm pldl1keep, [%9, #128] \n" // k "ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%9], #32 \n" //"prfm pldl1keep, [%8, #128] \n" // d "ld1 {v8.8b, v9.8b}, [%8], #16 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k70 "sshll v1.8h, v1.8b, #0 \n" // k01 - k71 "sshll v2.8h, v2.8b, #0 \n" // k02 - k72 "sshll v3.8h, v3.8b, #0 \n" // k03 - k73 "sshll v8.8h, v8.8b, #0 \n" // a00 - a30,a01 - a31 "sshll v9.8h, v9.8b, #0 \n" // a02 - a32,a03 - a33 // k0 "smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a30) * k00 "smlal v17.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a30) * k10 "smlal v18.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a30) * k20 "smlal v19.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a30) * k30 "smlal v20.4s, v8.4h, v0.h[4] \n"// sum4 += (a00-a30) * k40 "smlal v21.4s, v8.4h, v0.h[5] \n"// sum5 += (a00-a30) * k50 "smlal v22.4s, v8.4h, v0.h[6] \n"// sum6 += (a00-a30) * k60 "smlal v23.4s, v8.4h, v0.h[7] \n"// sum7 += (a00-a30) * k70 // k1 "smlal2 v16.4s, v8.8h, v1.h[0] \n"// sum0 += (a01-a31) * k01 "smlal2 v17.4s, v8.8h, v1.h[1] \n"// sum1 += (a01-a31) * k11 "smlal2 v18.4s, v8.8h, v1.h[2] \n"// sum2 += (a01-a31) * k21 "smlal2 v19.4s, v8.8h, v1.h[3] \n"// sum3 += (a01-a31) * k31 "smlal2 v20.4s, v8.8h, v1.h[4] \n"// sum4 += (a01-a31) * k41 "smlal2 v21.4s, v8.8h, v1.h[5] \n"// sum5 += (a01-a31) * k51 "smlal2 v22.4s, v8.8h, v1.h[6] \n"// sum6 += (a01-a31) * k61 "smlal2 v23.4s, v8.8h, v1.h[7] \n"// sum7 += (a01-a31) * k71 // k2 "smlal v16.4s, v9.4h, v2.h[0] \n"// sum0 += (a02-a32) * k02 "smlal v17.4s, v9.4h, v2.h[1] \n"// sum1 += (a02-a32) * k12 "smlal v18.4s, v9.4h, v2.h[2] \n"// sum2 += (a02-a32) * k22 "smlal v19.4s, v9.4h, v2.h[3] \n"// sum3 += (a02-a32) * k32 "smlal v20.4s, v9.4h, v2.h[4] \n"// sum4 += (a02-a32) * k42 "smlal v21.4s, v9.4h, v2.h[5] \n"// sum5 += (a02-a32) * k52 "smlal v22.4s, v9.4h, v2.h[6] \n"// sum6 += (a02-a32) * k62 "smlal v23.4s, v9.4h, v2.h[7] \n"// sum7 += (a02-a32) * k72 "subs w4, w4, #1 \n" // k3 "smlal2 v16.4s, v9.8h, v3.h[0] \n"// sum0 += (a03-a33) * k03 "smlal2 v17.4s, v9.8h, v3.h[1] \n"// sum1 += (a03-a33) * k13 "smlal2 v18.4s, v9.8h, v3.h[2] \n"// sum2 += (a03-a33) * k23 "smlal2 v19.4s, v9.8h, v3.h[3] \n"// sum3 += (a03-a33) * k33 "smlal2 v20.4s, v9.8h, v3.h[4] \n"// sum4 += (a03-a33) * k43 "smlal2 v21.4s, v9.8h, v3.h[5] \n"// sum5 += (a03-a33) * k53 "smlal2 v22.4s, v9.8h, v3.h[6] \n"// sum6 += (a03-a33) * k63 "smlal2 v23.4s, v9.8h, v3.h[7] \n"// sum7 += (a03-a33) * k73 "bne 0b \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" //"prfm pldl1keep, [%9, #128] \n" "ld1 {v0.8b}, [%9], #8 \n" //"prfm pldl1keep, [%8, #128] \n" "ld1 {v8.8b}, [%8], #8 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k70 "sshll v8.8h, v8.8b, #0 \n" // a00 - a70 "subs w4, w4, #1 \n" // k0 "smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a30) * k00 "smlal v17.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a30) * k10 "smlal v18.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a30) * k20 "smlal v19.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a30) * k30 "smlal v20.4s, v8.4h, v0.h[4] \n"// sum4 += (a00-a30) * k40 "smlal v21.4s, v8.4h, v0.h[5] \n"// sum5 += (a00-a30) * k50 "smlal v22.4s, v8.4h, v0.h[6] \n"// sum6 += (a00-a30) * k60 "smlal v23.4s, v8.4h, v0.h[7] \n"// sum7 += (a00-a30) * k70 "bne 2b \n" "3: \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" "st1 {v20.4s}, [%4], #16 \n" "st1 {v21.4s}, [%5], #16 \n" "st1 {v22.4s}, [%6], #16 \n" "st1 {v23.4s}, [%7], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int sum4_0 = 0; int sum4_1 = 0; int sum4_2 = 0; int sum4_3 = 0; int sum5_0 = 0; int sum5_1 = 0; int sum5_2 = 0; int sum5_3 = 0; int sum6_0 = 0; int sum6_1 = 0; int sum6_2 = 0; int sum6_3 = 0; int sum7_0 = 0; int sum7_1 = 0; int sum7_2 = 0; int sum7_3 = 0; for (int q=0; q<inch; q++) { sum0_0 += (int)tmpptr[0] * kptr[0]; sum0_1 += (int)tmpptr[1] * kptr[0]; sum0_2 += (int)tmpptr[2] * kptr[0]; sum0_3 += (int)tmpptr[3] * kptr[0]; sum1_0 += (int)tmpptr[0] * kptr[1]; sum1_1 += (int)tmpptr[1] * kptr[1]; sum1_2 += (int)tmpptr[2] * kptr[1]; sum1_3 += (int)tmpptr[3] * kptr[1]; sum2_0 += (int)tmpptr[0] * kptr[2]; sum2_1 += (int)tmpptr[1] * kptr[2]; sum2_2 += (int)tmpptr[2] * kptr[2]; sum2_3 += (int)tmpptr[3] * kptr[2]; sum3_0 += (int)tmpptr[0] * kptr[3]; sum3_1 += (int)tmpptr[1] * kptr[3]; sum3_2 += (int)tmpptr[2] * kptr[3]; sum3_3 += (int)tmpptr[3] * kptr[3]; sum4_0 += (int)tmpptr[0] * kptr[4]; sum4_1 += (int)tmpptr[1] * kptr[4]; sum4_2 += (int)tmpptr[2] * kptr[4]; sum4_3 += (int)tmpptr[3] * kptr[4]; sum5_0 += (int)tmpptr[0] * kptr[5]; sum5_1 += (int)tmpptr[1] * kptr[5]; sum5_2 += (int)tmpptr[2] * kptr[5]; sum5_3 += (int)tmpptr[3] * kptr[5]; sum6_0 += (int)tmpptr[0] * kptr[6]; sum6_1 += (int)tmpptr[1] * kptr[6]; sum6_2 += (int)tmpptr[2] * kptr[6]; sum6_3 += (int)tmpptr[3] * kptr[6]; sum7_0 += (int)tmpptr[0] * kptr[7]; sum7_1 += (int)tmpptr[1] * kptr[7]; sum7_2 += (int)tmpptr[2] * kptr[7]; sum7_3 += (int)tmpptr[3] * kptr[7]; tmpptr += 4; kptr += 8; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr4[0] = sum4_0; outptr4[1] = sum4_1; outptr4[2] = sum4_2; outptr4[3] = sum4_3; outptr5[0] = sum5_0; outptr5[1] = sum5_1; outptr5[2] = sum5_2; outptr5[3] = sum5_3; outptr6[0] = sum6_0; outptr6[1] = sum6_1; outptr6[2] = sum6_2; outptr6[3] = sum6_3; outptr7[0] = sum7_0; outptr7[1] = sum7_1; outptr7[2] = sum7_2; outptr7[3] = sum7_3; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; outptr4 += 4; outptr5 += 4; outptr6 += 4; outptr7 += 4; #endif // __ARM_NEON && __aarch64__ } for (; i<size; i++) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); const signed char* kptr = kernel.channel(p/8); #if __ARM_NEON && __aarch64__ asm volatile( "eor v14.16b, v14.16b, v14.16b \n" // sum0_3 "eor v15.16b, v15.16b, v15.16b \n" // sum4_7 "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "eor v20.16b, v20.16b, v20.16b \n" // sum4 "eor v21.16b, v21.16b, v21.16b \n" // sum5 "eor v22.16b, v22.16b, v22.16b \n" // sum6 "eor v23.16b, v23.16b, v23.16b \n" // sum7 // inch loop "lsr w4, %w20, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" //"prfm pldl1keep, [%9, #128] \n" // k "ld1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%9], #32 \n" //"prfm pldl1keep, [%8, #64] \n" // d "ld1 {v4.8b}, [%8] \n" "add %8, %8, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k70 "sshll v1.8h, v1.8b, #0 \n" // k01 - k71 "sshll v2.8h, v2.8b, #0 \n" // k02 - k72 "sshll v3.8h, v3.8b, #0 \n" // k03 - k73 "sshll v4.8h, v4.8b, #0 \n" // a00 - a30 "subs w4, w4, #1 \n" // "smlal v16.4s, v0.4h, v4.h[0] \n"// sum0 += (k00-k70) * a00 "smlal2 v17.4s, v0.8h, v4.h[0] \n"// "smlal v18.4s, v1.4h, v4.h[1] \n"// sum2 += (k01-k71) * a10 "smlal2 v19.4s, v1.8h, v4.h[1] \n"// "smlal v20.4s, v2.4h, v4.h[2] \n"// sum4 += (k02-k72) * a20 "smlal2 v21.4s, v2.8h, v4.h[2] \n"// "smlal v22.4s, v3.4h, v4.h[3] \n"// sum6 += (k03-k73) * a30 "smlal2 v23.4s, v3.8h, v4.h[3] \n"// "bne 0b \n" "add v16.4s, v16.4s, v18.4s \n" "add v17.4s, v17.4s, v19.4s \n" "add v20.4s, v20.4s, v22.4s \n" "add v21.4s, v21.4s, v23.4s \n" "add v14.4s, v16.4s, v20.4s \n" "add v15.4s, v17.4s, v21.4s \n" "1: \n" // remain loop "and w4, %w20, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" //"prfm pldl1keep, [%9, #128] \n" "ld1 {v0.8b}, [%9], #8 \n"// k "ld1 {v4.8b}, [%8] \n"// d "add %8, %8, #1 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k70 "sshll v4.8h, v4.8b, #0 \n" // a00 - a70 "subs w4, w4, #1 \n" // k0 "smlal v14.4s, v0.4h, v4.h[0] \n"// sum0_3 += (k00-k30) * a00 "smlal2 v15.4s, v8.8h, v4.h[0] \n"// sum4_7 += (k40-k70) * a00 "bne 2b \n" "3: \n" "st1 {v14.s}[0], [%0], #4 \n" "st1 {v14.s}[1], [%1], #4 \n" "st1 {v14.s}[2], [%2], #4 \n" "st1 {v14.s}[3], [%3], #4 \n" "st1 {v15.s}[0], [%4], #4 \n" "st1 {v15.s}[1], [%5], #4 \n" "st1 {v15.s}[2], [%6], #4 \n" "st1 {v15.s}[3], [%7], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(outptr4), // %4 "=r"(outptr5), // %5 "=r"(outptr6), // %6 "=r"(outptr7), // %7 "=r"(tmpptr), // %8 "=r"(kptr) // %9 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(outptr4), "5"(outptr5), "6"(outptr6), "7"(outptr7), "8"(tmpptr), "9"(kptr), "r"(inch) // %20 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; for (int q=0; q<inch; q++) { sum0 += (int)tmpptr[0] * kptr[0]; sum1 += (int)tmpptr[0] * kptr[1]; sum2 += (int)tmpptr[0] * kptr[2]; sum3 += (int)tmpptr[0] * kptr[3]; sum4 += (int)tmpptr[0] * kptr[4]; sum5 += (int)tmpptr[0] * kptr[5]; sum6 += (int)tmpptr[0] * kptr[6]; sum7 += (int)tmpptr[0] * kptr[7]; tmpptr++; kptr += 8; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; outptr4[0] = sum4; outptr5[0] = sum5; outptr6[0] = sum6; outptr7[0] = sum7; outptr0++; outptr1++; outptr2++; outptr3++; outptr4++; outptr5++; outptr6++; outptr7++; #endif } } #endif // __ARM_NEON && __aarch64__ nn_outch = (outch - remain_outch_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int p = remain_outch_start + pp * 4; int* outptr0 = top_blob.channel(p); int* outptr1 = top_blob.channel(p+1); int* outptr2 = top_blob.channel(p+2); int* outptr3 = top_blob.channel(p+3); int i = 0; for (; i+7<size; i+=8) { const signed char* tmpptr = tmp.channel(i/8); #if __ARM_NEON && __aarch64__ const signed char* kptr = kernel.channel(p/8 + (p%8)/4); #else const signed char* kptr = kernel.channel(p/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 "eor v20.16b, v20.16b, v20.16b \n" // sum4 "eor v21.16b, v21.16b, v21.16b \n" // sum5 "eor v22.16b, v22.16b, v22.16b \n" // sum6 "eor v23.16b, v23.16b, v23.16b \n" // sum7 // inch loop "lsr w4, %w12, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" //"prfm pldl1keep, [%5, #128] \n" // k "ld1 {v0.8b, v1.8b}, [%5], #16 \n" //"prfm pldl1keep, [%4, #128] \n" // d "ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [%4], #32 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k30,k01 - k31 "sshll v1.8h, v1.8b, #0 \n" // k02 - k32,k03 - k33 "sshll v8.8h, v8.8b, #0 \n" // a00 - a70 "sshll v9.8h, v9.8b, #0 \n" // a01 - a71 "sshll v10.8h, v10.8b, #0 \n" // a02 - a72 "sshll v11.8h, v11.8b, #0 \n" // a03 - a73 // k0 "smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00 "smlal2 v17.4s, v8.8h, v0.h[0] \n"// "smlal v18.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a70) * k10 "smlal2 v19.4s, v8.8h, v0.h[1] \n"// "smlal v20.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a70) * k20 "smlal2 v21.4s, v8.8h, v0.h[2] \n"// "smlal v22.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a70) * k30 "smlal2 v23.4s, v8.8h, v0.h[3] \n"// // k1 "smlal v16.4s, v9.4h, v0.h[4] \n"// sum0 += (a01-a71) * k01 "smlal2 v17.4s, v9.8h, v0.h[4] \n"// "smlal v18.4s, v9.4h, v0.h[5] \n"// sum1 += (a01-a71) * k11 "smlal2 v19.4s, v9.8h, v0.h[5] \n"// "smlal v20.4s, v9.4h, v0.h[6] \n"// sum2 += (a01-a71) * k21 "smlal2 v21.4s, v9.8h, v0.h[6] \n"// "smlal v22.4s, v9.4h, v0.h[7] \n"// sum3 += (a01-a71) * k31 "smlal2 v23.4s, v9.8h, v0.h[7] \n"// // k2 "smlal v16.4s, v10.4h, v1.h[0] \n"// sum0 += (a02-a72) * k02 "smlal2 v17.4s, v10.8h, v1.h[0] \n"// "smlal v18.4s, v10.4h, v1.h[1] \n"// sum1 += (a02-a72) * k12 "smlal2 v19.4s, v10.8h, v1.h[1] \n"// "smlal v20.4s, v10.4h, v1.h[2] \n"// sum2 += (a02-a72) * k22 "smlal2 v21.4s, v10.8h, v1.h[2] \n"// "smlal v22.4s, v10.4h, v1.h[3] \n"// sum3 += (a02-a72) * k32 "smlal2 v23.4s, v10.8h, v1.h[3] \n"// "subs w4, w4, #1 \n" // k3 "smlal v16.4s, v11.4h, v1.h[4] \n"// sum0 += (a03-a73) * k03 "smlal2 v17.4s, v11.8h, v1.h[4] \n"// "smlal v18.4s, v11.4h, v1.h[5] \n"// sum1 += (a03-a73) * k13 "smlal2 v19.4s, v11.8h, v1.h[5] \n"// "smlal v20.4s, v11.4h, v1.h[6] \n"// sum2 += (a03-a73) * k23 "smlal2 v21.4s, v11.8h, v1.h[6] \n"// "smlal v22.4s, v11.4h, v1.h[7] \n"// sum3 += (a03-a73) * k33 "smlal2 v23.4s, v11.8h, v1.h[7] \n"// "bne 0b \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" //"prfm pldl1keep, [%5, #128] \n" "ld1 {v0.8b}, [%5] \n" //"prfm pldl1keep, [%4, #128] \n" "ld1 {v8.8b}, [%4], #8 \n" "add %5, %5, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k30,k01 - k31 "sshll v8.8h, v8.8b, #0 \n" // a00 - a70 "subs w4, w4, #1 \n" // k0 "smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a70) * k00 "smlal2 v17.4s, v8.8h, v0.h[0] \n"// "smlal v18.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a70) * k10 "smlal2 v19.4s, v8.8h, v0.h[1] \n"// "smlal v20.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a70) * k20 "smlal2 v21.4s, v8.8h, v0.h[2] \n"// "smlal v22.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a70) * k30 "smlal2 v23.4s, v8.8h, v0.h[3] \n"// "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" "st1 {v18.4s, v19.4s}, [%1], #32 \n" "st1 {v20.4s, v21.4s}, [%2], #32 \n" "st1 {v22.4s, v23.4s}, [%3], #32 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23" ); #else asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "vmov.s32 q10, #0 \n" "vmov.s32 q11, #0 \n" "vmov.s32 q12, #0 \n" "vmov.s32 q13, #0 \n" "lsr r4, %12, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4-d7}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data) "vmovl.s8 q5, d7 \n"// a30-a37 "vmovl.s8 q4, d6 \n"// a20-a27 "vmovl.s8 q3, d5 \n"// a10-a17 "vmovl.s8 q2, d4 \n"// a00-a07 "vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n"// k02-k32,k03-k33 "vmovl.s8 q0, d0 \n"// k00-k30,k01-k31 "vmlal.s16 q6, d4, d0[0] \n"// sum0 = (a00-a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q8, d4, d0[1] \n"// sum1 = (a00-a07) * k10 "vmlal.s16 q9, d5, d0[1] \n" "vmlal.s16 q10, d4, d0[2] \n"// sum2 = (a00-a07) * k20 "vmlal.s16 q11, d5, d0[2] \n" "vmlal.s16 q12, d4, d0[3] \n"// sum3 = (a00-a07) * k30 "vmlal.s16 q13, d5, d0[3] \n" "vmlal.s16 q6, d6, d1[0] \n"// sum0 += (a10-a17) * k01 "vmlal.s16 q7, d7, d1[0] \n" "vmlal.s16 q8, d6, d1[1] \n"// sum1 += (a10-a17) * k11 "vmlal.s16 q9, d7, d1[1] \n" "vmlal.s16 q10, d6, d1[2] \n"// sum2 += (a10-a17) * k21 "vmlal.s16 q11, d7, d1[2] \n" "vmlal.s16 q12, d6, d1[3] \n"// sum3 += (a10-a17) * k31 "vmlal.s16 q13, d7, d1[3] \n" "vmlal.s16 q6, d8, d2[0] \n"// sum0 += (a20-a27) * k02 "vmlal.s16 q7, d9, d2[0] \n" "vmlal.s16 q8, d8, d2[1] \n"// sum1 += (a20-a27) * k12 "vmlal.s16 q9, d9, d2[1] \n" "vmlal.s16 q10, d8, d2[2] \n"// sum2 += (a20-a27) * k22 "vmlal.s16 q11, d9, d2[2] \n" "vmlal.s16 q12, d8, d2[3] \n"// sum3 += (a20-a27) * k32 "vmlal.s16 q13, d9, d2[3] \n" "vmlal.s16 q6, d10, d3[0] \n"// sum0 += (a30-a37) * k03 "vmlal.s16 q7, d11, d3[0] \n" "vmlal.s16 q8, d10, d3[1] \n"// sum1 += (a30-a37) * k13 "vmlal.s16 q9, d11, d3[1] \n" "vmlal.s16 q10, d10, d3[2] \n"// sum2 += (a30-a37) * k23 "vmlal.s16 q11, d11, d3[2] \n" "vmlal.s16 q12, d10, d3[3] \n"// sum3 += (a30-a37) * k33 "vmlal.s16 q13, d11, d3[3] \n" "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%4]! \n"// tmpr a00-a07 a(inch)(data) "vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "vmlal.s16 q8, d2, d0[1] \n"// sum1 += (a00-a07) * k10 "vmlal.s16 q9, d3, d0[1] \n" "vmlal.s16 q10, d2, d0[2] \n"// sum2 += (a00-a07) * k20 "vmlal.s16 q11, d3, d0[2] \n" "vmlal.s16 q12, d2, d0[3] \n"// sum3 += (a00-a07) * k30 "vmlal.s16 q13, d3, d0[3] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.s32 {d12-d15}, [%0]! \n" "vst1.s32 {d16-d19}, [%1]! \n" "vst1.s32 {d20-d23}, [%2]! \n" "vst1.s32 {d24-d27}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum0_4 = 0; int sum0_5 = 0; int sum0_6 = 0; int sum0_7 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum1_4 = 0; int sum1_5 = 0; int sum1_6 = 0; int sum1_7 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum2_4 = 0; int sum2_5 = 0; int sum2_6 = 0; int sum2_7 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; int sum3_4 = 0; int sum3_5 = 0; int sum3_6 = 0; int sum3_7 = 0; for (int q=0; q<inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum0_4 += tmpptr[4] * kptr[0]; sum0_5 += tmpptr[5] * kptr[0]; sum0_6 += tmpptr[6] * kptr[0]; sum0_7 += tmpptr[7] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum1_4 += tmpptr[4] * kptr[1]; sum1_5 += tmpptr[5] * kptr[1]; sum1_6 += tmpptr[6] * kptr[1]; sum1_7 += tmpptr[7] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum2_4 += tmpptr[4] * kptr[2]; sum2_5 += tmpptr[5] * kptr[2]; sum2_6 += tmpptr[6] * kptr[2]; sum2_7 += tmpptr[7] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; sum3_4 += tmpptr[4] * kptr[3]; sum3_5 += tmpptr[5] * kptr[3]; sum3_6 += tmpptr[6] * kptr[3]; sum3_7 += tmpptr[7] * kptr[3]; tmpptr += 8; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr0[4] = sum0_4; outptr0[5] = sum0_5; outptr0[6] = sum0_6; outptr0[7] = sum0_7; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr1[4] = sum1_4; outptr1[5] = sum1_5; outptr1[6] = sum1_6; outptr1[7] = sum1_7; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr2[4] = sum2_4; outptr2[5] = sum2_5; outptr2[6] = sum2_6; outptr2[7] = sum2_7; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr3[4] = sum3_4; outptr3[5] = sum3_5; outptr3[6] = sum3_6; outptr3[7] = sum3_7; outptr0 += 8; outptr1 += 8; outptr2 += 8; outptr3 += 8; #endif // __ARM_NEON } for (; i+3<size; i+=4) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4); #if __ARM_NEON && __aarch64__ const signed char* kptr = kernel.channel(p/8 + (p%8)/4); #else const signed char* kptr = kernel.channel(p/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 // inch loop "lsr w4, %w12, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" //"prfm pldl1keep, [%5, #128] \n" // k "ld1 {v0.8b, v1.8b}, [%5], #16 \n" //"prfm pldl1keep, [%4, #128] \n" // d "ld1 {v8.8b, v9.8b}, [%4], #16 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k30,k01 - k31 "sshll v1.8h, v1.8b, #0 \n" // k02 - k32,k03 - k33 "sshll v8.8h, v8.8b, #0 \n" // a00 - a30,a01 - a31 "sshll v9.8h, v9.8b, #0 \n" // a02 - a32,a03 - a33 // k0 "smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a30) * k00 "smlal v17.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a30) * k10 "smlal v18.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a30) * k20 "smlal v19.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a30) * k30 // k1 "smlal2 v16.4s, v8.8h, v0.h[4] \n"// sum0 += (a01-a31) * k01 "smlal2 v17.4s, v8.8h, v0.h[5] \n"// sum1 += (a01-a31) * k11 "smlal2 v18.4s, v8.8h, v0.h[6] \n"// sum2 += (a01-a31) * k21 "smlal2 v19.4s, v8.8h, v0.h[7] \n"// sum3 += (a01-a31) * k31 // k2 "smlal v16.4s, v9.4h, v1.h[0] \n"// sum0 += (a02-a32) * k02 "smlal v17.4s, v9.4h, v1.h[1] \n"// sum1 += (a02-a32) * k12 "smlal v18.4s, v9.4h, v1.h[2] \n"// sum2 += (a02-a32) * k22 "smlal v19.4s, v9.4h, v1.h[3] \n"// sum3 += (a02-a32) * k32 "subs w4, w4, #1 \n" // k3 "smlal2 v16.4s, v9.8h, v1.h[4] \n"// sum0 += (a03-a33) * k03 "smlal2 v17.4s, v9.8h, v1.h[5] \n"// sum1 += (a03-a33) * k13 "smlal2 v18.4s, v9.8h, v1.h[6] \n"// sum2 += (a03-a33) * k23 "smlal2 v19.4s, v9.8h, v1.h[7] \n"// sum3 += (a03-a33) * k33 "bne 0b \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" //"prfm pldl1keep, [%5, #128] \n" "ld1 {v0.8b}, [%5] \n" //"prfm pldl1keep, [%4, #128] \n" "ld1 {v8.8b}, [%4] \n" "add %4, %4, #4 \n" "add %5, %5, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k30 "sshll v8.8h, v8.8b, #0 \n" // a00 - a30 "subs w4, w4, #1 \n" // k0 "smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a30) * k00 "smlal v17.4s, v8.4h, v0.h[1] \n"// sum1 += (a00-a30) * k10 "smlal v18.4s, v8.4h, v0.h[2] \n"// sum2 += (a00-a30) * k20 "smlal v19.4s, v8.4h, v0.h[3] \n"// sum3 += (a00-a30) * k30 "bne 2b \n" "3: \n" "st1 {v16.4s}, [%0], #16 \n" "st1 {v17.4s}, [%1], #16 \n" "st1 {v18.4s}, [%2], #16 \n" "st1 {v19.4s}, [%3], #16 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); #else asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "vmov.s32 q8, #0 \n" "vmov.s32 q9, #0 \n" "lsr r4, %12, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4-d5}, [%4]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data) "vmovl.s8 q3, d5 \n"// a20-a23,a30-a33 "vmovl.s8 q2, d4 \n"// a00-a04,a10-a14 "vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n"// k02-k32,k03-k33 "vmovl.s8 q0, d0 \n"// k00-k30,k01-k31 "vmlal.s16 q6, d4, d0[0] \n"// sum0 = (a00-a03) * k00 "vmlal.s16 q7, d4, d0[1] \n"// sum1 = (a00-a03) * k10 "vmlal.s16 q8, d4, d0[2] \n"// sum2 = (a00-a03) * k20 "vmlal.s16 q9, d4, d0[3] \n"// sum3 = (a00-a03) * k30 "vmlal.s16 q6, d5, d1[0] \n"// sum0 += (a10-a13) * k01 "vmlal.s16 q7, d5, d1[1] \n"// sum1 += (a10-a13) * k11 "vmlal.s16 q8, d5, d1[2] \n"// sum2 += (a10-a13) * k21 "vmlal.s16 q9, d5, d1[3] \n"// sum3 += (a10-a13) * k31 "vmlal.s16 q6, d6, d2[0] \n"// sum0 += (a20-a23) * k02 "vmlal.s16 q7, d6, d2[1] \n"// sum1 += (a20-a23) * k12 "vmlal.s16 q8, d6, d2[2] \n"// sum2 += (a20-a23) * k22 "vmlal.s16 q9, d6, d2[3] \n"// sum3 += (a20-a23) * k32 "vmlal.s16 q6, d7, d3[0] \n"// sum0 += (a30-a33) * k03 "vmlal.s16 q7, d7, d3[1] \n"// sum1 += (a30-a33) * k13 "vmlal.s16 q8, d7, d3[2] \n"// sum2 += (a30-a33) * k23 "vmlal.s16 q9, d7, d3[3] \n"// sum3 += (a30-a33) * k33 "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n"// tmpr a00-a03 a(inch)(data) "vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #4 \n" "add %5, #4 \n" "vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a03) * k00 "vmlal.s16 q7, d2, d0[1] \n"// sum1 += (a00-a03) * k10 "vmlal.s16 q8, d2, d0[2] \n"// sum2 += (a00-a03) * k20 "vmlal.s16 q9, d2, d0[3] \n"// sum3 += (a00-a03) * k30 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.s32 {d12-d13}, [%0]! \n" "vst1.s32 {d14-d15}, [%1]! \n" "vst1.s32 {d16-d17}, [%2]! \n" "vst1.s32 {d18-d19}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else int sum0_0 = 0; int sum0_1 = 0; int sum0_2 = 0; int sum0_3 = 0; int sum1_0 = 0; int sum1_1 = 0; int sum1_2 = 0; int sum1_3 = 0; int sum2_0 = 0; int sum2_1 = 0; int sum2_2 = 0; int sum2_3 = 0; int sum3_0 = 0; int sum3_1 = 0; int sum3_2 = 0; int sum3_3 = 0; for (int q=0; q<inch; q++) { sum0_0 += tmpptr[0] * kptr[0]; sum0_1 += tmpptr[1] * kptr[0]; sum0_2 += tmpptr[2] * kptr[0]; sum0_3 += tmpptr[3] * kptr[0]; sum1_0 += tmpptr[0] * kptr[1]; sum1_1 += tmpptr[1] * kptr[1]; sum1_2 += tmpptr[2] * kptr[1]; sum1_3 += tmpptr[3] * kptr[1]; sum2_0 += tmpptr[0] * kptr[2]; sum2_1 += tmpptr[1] * kptr[2]; sum2_2 += tmpptr[2] * kptr[2]; sum2_3 += tmpptr[3] * kptr[2]; sum3_0 += tmpptr[0] * kptr[3]; sum3_1 += tmpptr[1] * kptr[3]; sum3_2 += tmpptr[2] * kptr[3]; sum3_3 += tmpptr[3] * kptr[3]; tmpptr += 4; kptr += 4; } outptr0[0] = sum0_0; outptr0[1] = sum0_1; outptr0[2] = sum0_2; outptr0[3] = sum0_3; outptr1[0] = sum1_0; outptr1[1] = sum1_1; outptr1[2] = sum1_2; outptr1[3] = sum1_3; outptr2[0] = sum2_0; outptr2[1] = sum2_1; outptr2[2] = sum2_2; outptr2[3] = sum2_3; outptr3[0] = sum3_0; outptr3[1] = sum3_1; outptr3[2] = sum3_2; outptr3[3] = sum3_3; outptr0 += 4; outptr1 += 4; outptr2 += 4; outptr3 += 4; #endif // __ARM_NEON } for (; i<size; i++) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); #if __ARM_NEON && __aarch64__ const signed char* kptr = kernel.channel(p/8 + (p%8)/4); #else const signed char* kptr = kernel.channel(p/4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "eor v14.16b, v14.16b, v14.16b \n" // sum0_3 "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum1 "eor v18.16b, v18.16b, v18.16b \n" // sum2 "eor v19.16b, v19.16b, v19.16b \n" // sum3 // inch loop "lsr w4, %w12, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" //"prfm pldl1keep, [%5, #128] \n" // k "ld1 {v0.8b, v1.8b}, [%5], #16 \n" //"prfm pldl1keep, [%4, #64] \n" // d "ld1 {v4.8b}, [%4] \n" "add %4, %4, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k30,k01 - k31 "sshll v1.8h, v1.8b, #0 \n" // k02 - k32,k03 - k33 "sshll v4.8h, v4.8b, #0 \n" // a00 - a30 "subs w4, w4, #1 \n" // "smlal v16.4s, v0.4h, v4.h[0] \n"// sum0 += (k00-k30) * a00 "smlal2 v17.4s, v0.8h, v4.h[1] \n"// sum1 += (k01-k31) * a10 "smlal v18.4s, v1.4h, v4.h[2] \n"// sum2 += (k02-k32) * a20 "smlal2 v19.4s, v1.8h, v4.h[3] \n"// sum3 += (k03-k33) * a30 "bne 0b \n" "add v16.4s, v16.4s, v18.4s \n" "add v17.4s, v17.4s, v19.4s \n" "add v14.4s, v16.4s, v17.4s \n" "1: \n" // remain loop "and w4, %w12, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" //"prfm pldl1keep, [%5, #128] \n" "ld1 {v0.8b}, [%5] \n"// k "ld1 {v4.8b}, [%4] \n"// d "add %4, %4, #1 \n" "add %5, %5, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k30 "sshll v4.8h, v4.8b, #0 \n" // a00 - a30 "subs w4, w4, #1 \n" // k0 "smlal v14.4s, v0.4h, v4.h[0] \n"// sum0_3 += (k00-k30) * a00 "bne 2b \n" "3: \n" "st1 {v14.s}[0], [%0], #4 \n" "st1 {v14.s}[1], [%1], #4 \n" "st1 {v14.s}[2], [%2], #4 \n" "st1 {v14.s}[3], [%3], #4 \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19" ); #else asm volatile( // inch loop "veor q6, q6, q6 \n" "veor q7, q7, q7 \n" "veor q8, q8, q8 \n" "veor q9, q9, q9 \n" "vmov.s32 q10, #0 \n" "lsr r4, %12, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%4, #128] \n" "vld1.s8 {d4}, [%4] \n"// tmpr a00,a10,a20,a30 a(inch)(data) "add %4, #4 \n" "vmovl.s8 q2, d4 \n"// a00,a10,a20,a30 "vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch) "vmovl.s8 q1, d1 \n"// k02-k32,k03-k33 "vmovl.s8 q0, d0 \n"// k00-k30,k01-k31 "vmlal.s16 q6, d0, d4[0] \n"// (k00-k30) * a00 "vmlal.s16 q7, d1, d4[1] \n"// (k01-k31) * a10 "vmlal.s16 q8, d2, d4[2] \n"// (k02-k32) * a20 "vmlal.s16 q9, d3, d4[3] \n"// (k03-k33) * a30 "subs r4, r4, #1 \n" "bne 0b \n"// end for "vadd.s32 q6, q6, q7 \n" "vadd.s32 q9, q9, q8 \n" "vadd.s32 q10, q6, q9 \n" "1: \n" // remain loop "and r4, %12, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%4] \n"// tmpr a00 a(inch)(data) "vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %4, #1 \n" "add %5, #4 \n" "vmlal.s16 q10, d0, d2[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.s32 {d20[0]}, [%0]! \n" "vst1.s32 {d20[1]}, [%1]! \n" "vst1.s32 {d21[0]}, [%2]! \n" "vst1.s32 {d21[1]}, [%3]! \n" : "=r"(outptr0), // %0 "=r"(outptr1), // %1 "=r"(outptr2), // %2 "=r"(outptr3), // %3 "=r"(tmpptr), // %4 "=r"(kptr) // %5 : "0"(outptr0), "1"(outptr1), "2"(outptr2), "3"(outptr3), "4"(tmpptr), "5"(kptr), "r"(inch) // %12 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15" ); #endif // __aarch64__ #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[0] * kptr[1]; sum2 += tmpptr[0] * kptr[2]; sum3 += tmpptr[0] * kptr[3]; tmpptr++; kptr += 4; } outptr0[0] = sum0; outptr1[0] = sum1; outptr2[0] = sum2; outptr3[0] = sum3; outptr0++; outptr1++; outptr2++; outptr3++; #endif // __ARM_NEON } } remain_outch_start += nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int p=remain_outch_start; p<outch; p++) { Mat out0 = top_blob.channel(p); int* outptr0 = out0; int i = 0; for (; i+7<size; i+=8) { const signed char* tmpptr = tmp.channel(i/8); #if __ARM_NEON && __aarch64__ const signed char* kptr = kernel.channel(p/8 + (p%8)/4 + p%4); #else const signed char* kptr = kernel.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" // sum0 "eor v17.16b, v17.16b, v17.16b \n" // sum0n // inch loop "lsr w4, %w6, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" //"prfm pldl1keep, [%2, #128] \n" // k "ld1 {v0.8b}, [%2] \n" //"prfm pldl1keep, [%1, #128] \n" // d "ld1 {v8.8b, v9.8b, v10.8b, v11.8b}, [%1], #32 \n" "add %2, %2, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k03 "sshll v8.8h, v8.8b, #0 \n" // a00 - a07 "sshll v9.8h, v9.8b, #0 \n" // a10 - a17 "sshll v10.8h, v10.8b, #0 \n" // a20 - a27 "sshll v11.8h, v11.8b, #0 \n" // a30 - a37 // k0 "smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a07) * k00 "smlal2 v17.4s, v8.8h, v0.h[0] \n"// // k1 "smlal v16.4s, v9.4h, v0.h[1] \n"// sum0 += (a10-a17) * k01 "smlal2 v17.4s, v9.8h, v0.h[1] \n"// // k2 "smlal v16.4s, v10.4h, v0.h[2] \n"// sum0 += (a20-a27) * k02 "smlal2 v17.4s, v10.8h, v0.h[2] \n"// "subs w4, w4, #1 \n" // k3 "smlal v16.4s, v11.4h, v0.h[3] \n"// sum0 += (a30-a37) * k03 "smlal2 v17.4s, v11.8h, v0.h[3] \n"// "bne 0b \n" "1: \n" // remain loop "and w4, %w6, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" //"prfm pldl1keep, [%2, #128] \n" "ld1 {v0.8b}, [%2] \n" //"prfm pldl1keep, [%1, #128] \n" "ld1 {v8.8b}, [%1], #8 \n" "add %2, %2, #1 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 "sshll v8.8h, v8.8b, #0 \n" // a00 - a07 "subs w4, w4, #1 \n" // k0 "smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a07) * k00 "smlal2 v17.4s, v8.8h, v0.h[0] \n"// "bne 2b \n" "3: \n" "st1 {v16.4s, v17.4s}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17" ); #else asm volatile( // inch loop "vmov.s32 q6, #0 \n" "vmov.s32 q7, #0 \n" "lsr r4, %6, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%1, #128] \n" "vld1.s8 {d4-d7}, [%1]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data) "vmovl.s8 q5, d7 \n"// a30-a37 "vmovl.s8 q4, d6 \n"// a20-a27 "vmovl.s8 q3, d5 \n"// a10-a17 "vmovl.s8 q2, d4 \n"// a00-a07 "vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch) "vmovl.s8 q0, d0 \n"// k00,k01,k02,k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n"// (a00-a07) * k00 "vmlal.s16 q7, d5, d0[0] \n" "vmlal.s16 q6, d6, d0[1] \n"// (a10-a17) * k01 "vmlal.s16 q7, d7, d0[1] \n" "vmlal.s16 q6, d8, d0[2] \n"// (a20-a27) * k02 "vmlal.s16 q7, d9, d0[2] \n" "vmlal.s16 q6, d10, d0[3] \n"// (a30-a37) * k03 "vmlal.s16 q7, d11, d0[3] \n" "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %6, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%1]! \n"// tmpr a00-a07 a(inch)(data) "vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n"// (a00-a07) * k00 "vmlal.s16 q7, d3, d0[0] \n" "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.s32 {d12-d15}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7" ); #endif // __aarch64__ #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; int sum4 = 0; int sum5 = 0; int sum6 = 0; int sum7 = 0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; sum4 += tmpptr[4] * kptr[0]; sum5 += tmpptr[5] * kptr[0]; sum6 += tmpptr[6] * kptr[0]; sum7 += tmpptr[7] * kptr[0]; tmpptr += 8; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0[4] = sum4; outptr0[5] = sum5; outptr0[6] = sum6; outptr0[7] = sum7; outptr0 += 8; #endif // __ARM_NEON } for (; i+3<size; i+=4) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4); #if __ARM_NEON && __aarch64__ const signed char* kptr = kernel.channel(p/8 + (p%8)/4 + p%4); #else const signed char* kptr = kernel.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ #if __ARM_NEON #if __aarch64__ asm volatile( "eor v16.16b, v16.16b, v16.16b \n" // sum0 // inch loop "lsr w4, %w6, #2 \n"// w4 = nn = inch >> 2 "cmp w4, #0 \n" "beq 1f \n" "0: \n" //"prfm pldl1keep, [%2, #128] \n" // k "ld1 {v0.8b}, [%2] \n" //"prfm pldl1keep, [%1, #128] \n" // d "ld1 {v8.8b, v9.8b}, [%1], #16 \n" "add %2, %2, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 - k03 "sshll v8.8h, v8.8b, #0 \n" // a00 - a03,a10 - a13 "sshll v9.8h, v9.8b, #0 \n" // a20 - a23,a30 - a33 "smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a03) * k00 "smlal v16.4s, v9.4h, v0.h[1] \n"// sum0 += (a10-a13) * k01 "smlal v16.4s, v10.4h, v0.h[2] \n"// sum0 += (a20-a23) * k02 "subs w4, w4, #1 \n" // k3 "smlal v16.4s, v11.4h, v0.h[3] \n"// sum0 += (a30-a33) * k03 "bne 0b \n" "1: \n" // remain loop "and w4, %w6, #3 \n"// w4 = remain = inch & 3; "cmp w4, #0 \n" "beq 3f \n" "2: \n" //"prfm pldl1keep, [%2, #128] \n" "ld1 {v0.8b}, [%2] \n" //"prfm pldl1keep, [%1, #128] \n" "ld1 {v8.8b}, [%1] \n" "add %2, %2, #1 \n" "add %1, %1, #4 \n" "sshll v0.8h, v0.8b, #0 \n" // k00 "sshll v8.8h, v8.8b, #0 \n" // a00 - a03 "subs w4, w4, #1 \n" // k0 "smlal v16.4s, v8.4h, v0.h[0] \n"// sum0 += (a00-a03) * k00 "bne 2b \n" "3: \n" "st1 {v16.4s}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17" ); #else asm volatile( // inch loop "vmov.s32 q6, #0 \n" "lsr r4, %6, #2 \n"// r4 = nn = inch >> 2 "cmp r4, #0 \n" "beq 1f \n" "0: \n"// for(; nn != 0; nn--) "pld [%2, #128] \n" "vld1.s8 {d4-d5}, [%1]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data) "vmovl.s8 q3, d5 \n"// a20-a23,a30-a33 "vmovl.s8 q2, d4 \n"// a00-a03,a10-a13 "vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch) "vmovl.s8 q0, d0 \n"// k00,k01,k02,k03 "add %2, #4 \n" "vmlal.s16 q6, d4, d0[0] \n"// (a00-a03) * k00 "vmlal.s16 q6, d5, d0[1] \n"// (a10-a13) * k01 "vmlal.s16 q6, d6, d0[2] \n"// (a20-a23) * k02 "vmlal.s16 q6, d7, d0[3] \n"// (a30-a33) * k03 "subs r4, r4, #1 \n" "bne 0b \n"// end for "1: \n" // remain loop "and r4, %6, #3 \n"// r4 = remain = inch & 3 "cmp r4, #0 \n" "beq 3f \n" "2: \n"// for(; remain != 0; remain--) "vld1.s8 {d2}, [%1] \n"// tmpr a00-a03 a(inch)(data) "vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch) "vmovl.s8 q1, d2 \n" "vmovl.s8 q0, d0 \n" "add %1, #4 \n" "add %2, #1 \n" "vmlal.s16 q6, d2, d0[0] \n"// (a00-a03) * k00 "subs r4, r4, #1 \n" "bne 2b \n" "3: \n"// store the result to memory "vst1.s32 {d12-d13}, [%0]! \n" : "=r"(outptr0), // %0 "=r"(tmpptr), // %1 "=r"(kptr) // %2 : "0"(outptr0), "1"(tmpptr), "2"(kptr), "r"(inch) // %6 : "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6" ); #endif // __aarch64__ #else int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; for (int q=0; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; sum1 += tmpptr[1] * kptr[0]; sum2 += tmpptr[2] * kptr[0]; sum3 += tmpptr[3] * kptr[0]; tmpptr += 4; kptr++; } outptr0[0] = sum0; outptr0[1] = sum1; outptr0[2] = sum2; outptr0[3] = sum3; outptr0 += 4; #endif // __ARM_NEON } for (; i<size; i++) { const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4); #if __ARM_NEON && __aarch64__ const signed char* kptr = kernel.channel(p/8 + (p%8)/4 + p%4); #else const signed char* kptr = kernel.channel(p/4 + p%4); #endif // __ARM_NEON && __aarch64__ int q = 0; int sum0 = 0; for (; q<inch; q++) { sum0 += tmpptr[0] * kptr[0]; tmpptr++; kptr++; } outptr0[0] = sum0; outptr0++; } } // // NOTE sgemm int8 // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // int* outptr0 = out0; // // for (int i=0; i<size; i++) // { // int sum = 0; // // const signed char* kptr = _kernel.channel(p/8 + p%8); // // for (int q=0; q<inch; q++) // { // const signed char* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 1; int kernel_h = 1; int stride_w = 1; int stride_h = 1; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); } static void conv1x1s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt) { int kernel_w = 1; int kernel_h = 1; int stride_w = 2; int stride_h = 2; conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt); }
main.c
double foo(int N, double *restrict A) { double S = 0; #pragma omp parallel default(shared) { #pragma omp for reduction(+ : S) for (int I = 0; I < N; ++I) { S += A[I]; A[I] = I; } } return S; }
pooling_3x3.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "cstl/utils.h" static void pooling3x3s2_max_neon(const Mat& bottom_blob, Mat& top_blob, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2*outw + w; #pragma omp parallel for num_threads(opt.num_threads) for (int q=0; q<inch; q++) { const float* img0 = bottom_blob.channel(q); float* outptr = top_blob.channel(q); const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w*2; for (int i = 0; i < outh; i++) { #if __ARM_NEON int nn = outw >> 2; int remain = outw - (nn << 2); #else int remain = outw; #endif // __ARM_NEON #if __ARM_NEON #if __aarch64__ if (nn > 0) { asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld2 {v0.4s, v1.4s}, [%1], #32 \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v2.4s, v3.4s}, [%2], #32 \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v4.4s, v5.4s}, [%3], #32 \n" "0: \n" "prfm pldl1keep, [%1, #256] \n" "ld2 {v6.4s, v7.4s}, [%1], #32 \n" "fmax v12.4s, v0.4s, v1.4s \n" "fmax v13.4s, v2.4s, v3.4s \n" "prfm pldl1keep, [%2, #256] \n" "ld2 {v8.4s, v9.4s}, [%2], #32 \n" "fmax v14.4s, v4.4s, v5.4s \n" "ext v0.16b, v0.16b, v6.16b, #4 \n" "prfm pldl1keep, [%3, #256] \n" "ld2 {v10.4s, v11.4s}, [%3], #32 \n" "ext v2.16b, v2.16b, v8.16b, #4 \n" "fmax v12.4s, v12.4s, v0.4s \n" "ext v4.16b, v4.16b, v10.16b, #4 \n" "fmax v13.4s, v13.4s, v2.4s \n" "fmax v14.4s, v14.4s, v4.4s \n" "fmax v12.4s, v12.4s, v13.4s \n" "orr v0.16b, v6.16b, v6.16b \n" "orr v1.16b, v7.16b, v7.16b \n" "fmax v12.4s, v12.4s, v14.4s \n" "orr v2.16b, v8.16b, v8.16b \n" "orr v3.16b, v9.16b, v9.16b \n" "orr v4.16b, v10.16b, v10.16b \n" "orr v5.16b, v11.16b, v11.16b \n" "subs %w0, %w0, #1 \n" "st1 {v12.4s}, [%4], #16 \n" "bne 0b \n" "sub %1, %1, #32 \n" "sub %2, %2, #32 \n" "sub %3, %3, #32 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(outptr) // %4 : "0"(nn), "1"(r0), "2"(r1), "3"(r2), "4"(outptr) : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14" ); } #else if (nn > 0) { asm volatile( "pld [%1, #256] \n" "vld2.f32 {d0-d3}, [%1]! \n"// q0 = 0 2 4 6 q1 = 1 3 5 7 "pld [%2, #256] \n" "vld2.f32 {d4-d7}, [%2]! \n" "pld [%3, #256] \n" "vld2.f32 {d8-d11}, [%3]! \n" "0: \n" "pld [%1, #256] \n" "vld2.f32 {d12-d15}, [%1]! \n"// q6 = 8 10 12 14 q7 = 9 11 13 15 "vmax.f32 q12, q0, q1 \n" "vmax.f32 q13, q2, q3 \n" "pld [%2, #256] \n" "vld2.f32 {d16-d19}, [%2]! \n" "vmax.f32 q14, q4, q5 \n" "vext.32 q0, q0, q6, #1 \n" "pld [%3, #256] \n" "vld2.f32 {d20-d23}, [%3]! \n" "vext.32 q2, q2, q8, #1 \n" "vmax.f32 q12, q12, q0 \n" "vext.32 q4, q4, q10, #1 \n" "vmax.f32 q13, q13, q2 \n" "vmax.f32 q14, q14, q4 \n" "vmax.f32 q12, q12, q13 \n" "vorr q0, q6, q6 \n" "vorr q1, q7, q7 \n" "vmax.f32 q12, q12, q14 \n" "vorr q2, q8, q8 \n" "vorr q3, q9, q9 \n" "vorr q4, q10, q10 \n" "vorr q5, q11, q11 \n" "subs %0, #1 \n" "vst1.f32 {d24-d25}, [%4]! \n" "bne 0b \n" "sub %1, #32 \n" "sub %2, #32 \n" "sub %3, #32 \n" : "=r"(nn), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(outptr) // %4 : "0"(nn), "1"(r0), "2"(r1), "3"(r2), "4"(outptr) : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14" ); } #endif // __aarch64__ #endif // __ARM_NEON for (; remain>0; remain--) { float max0 = max(max(r0[0], r0[1]), r0[2]); float max1 = max(max(r1[0], r1[1]), r1[2]); float max2 = max(max(r2[0], r2[1]), r2[2]); *outptr = max(max(max0, max1), max2); r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep;//1 + w; r1 += tailstep;//1 + w; r2 += tailstep;//1 + w; } } }
tutorial_region_prof.c
/* * Copyright (c) 2015, 2016, 2017, 2018, 2019, 2020, Intel Corporation * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <stdint.h> #include <mpi.h> #ifdef _OPENMP #include <omp.h> #endif #include <geopm.h> #include "tutorial_region.h" #ifdef _OPENMP static int stream_profiled_omp(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c) { const size_t block = 256; const size_t num_block = num_stream / block; const size_t num_remain = num_stream % block; int err = 0; int num_thread = 1; #pragma omp parallel { num_thread = omp_get_num_threads(); } #pragma omp parallel { int thread_idx = omp_get_thread_num(); (void)geopm_tprof_init_loop(num_thread, thread_idx, num_block, 0); #pragma omp for for (size_t i = 0; i < num_block; ++i) { for (size_t j = 0; j < block; ++j) { a[i * block + j] = b[i * block + j] + scalar * c[i * block + j]; } (void)geopm_tprof_post(); } #pragma omp for for (size_t j = 0; j < num_remain; ++j) { a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j]; } } return err; } #endif static int stream_profiled_serial(uint64_t region_id, size_t num_stream, double scalar, double *a, double *b, double *c) { const size_t block = 256; const size_t num_block = num_stream / block; const size_t num_remain = num_stream % block; const double norm = 1.0 / num_block; for (size_t i = 0; i < num_block; ++i) { for (size_t j = 0; j < block; ++j) { a[i * block + j] = b[i * block + j] + scalar * c[i * block + j]; } geopm_prof_progress(region_id, i * norm); } for (size_t j = 0; j < num_remain; ++j) { a[num_block * block + j] = b[num_block * block + j] + scalar * c[num_block * block + j]; } return 0; } int tutorial_stream_profiled(double big_o, int do_report) { int err = 0; if (big_o != 0.0) { size_t cline_size = 64; size_t num_stream = (size_t)big_o * 500000000; size_t mem_size = sizeof(double) * num_stream; double *a = NULL; double *b = NULL; double *c = NULL; double scalar = 3.0; uint64_t stream_rid; if (!err) { err = geopm_prof_region("tutorial_stream", GEOPM_REGION_HINT_MEMORY, &stream_rid); } err = posix_memalign((void *)&a, cline_size, mem_size); if (!err) { err = posix_memalign((void *)&b, cline_size, mem_size); } if (!err) { err = posix_memalign((void *)&c, cline_size, mem_size); } if (!err) { #pragma omp parallel for for (int i = 0; i < num_stream; i++) { a[i] = 0.0; b[i] = 1.0; c[i] = 2.0; } if (do_report) { printf("Executing profiled STREAM triad on length %d vectors.\n", num_stream); fflush(stdout); } err = geopm_prof_enter(stream_rid); } if (!err) { #ifdef _OPENMP err = stream_profiled_omp(stream_rid, num_stream, scalar, a, b, c); #else err = stream_profiled_serial(stream_rid, num_stream, scalar, a, b, c); #endif } if (!err) { err = geopm_prof_exit(stream_rid); } if (!err) { free(c); free(b); free(a); } } }
GraphBuilder.h
/* * GraphBuilder.h * * Created on: 15.07.2014 * Author: Marvin Ritter (marvin.ritter@gmail.com) */ #ifndef GRAPH_BUILDER_H #define GRAPH_BUILDER_H #include <vector> #include "../Globals.h" #include "Graph.h" namespace NetworKit { /* * The GraphBuilder helps to speed up graph generation by minimizing the number of checks on addEdge/setWeight/increaseWeight. Further more it delays the construction of some internal data structures of the Graph class until you call toGraph(). toGraph() can only be called once. * In the Graph class for an edge u -> v, v is stored in the adjacent array of u (first half) and u in the adjacent array of v (second half). (For directed graphs these might be in and out adjacent arrays.). So each edge can be seen as a pair of 2 half edges. To allow optimization and mainly parallelization GraphBuilder lets you add both half edges yourself. You are responsible for adding both half edges, otherwise you might end up with an invalid Graph object. * As adding the first half edge of an edge u -> v only requires access to the adjacent array of u, other threads can add edges a -> b as long as a != u. Some goes for the methods setWeight and increaseWeight. Note: If you add the first half edge of u -> v, you can change the weight by calling setWeight(u, v, ew) or increaseWeight(u, v, ew), but calling setWeight(v, u, ew) or increaseWeight(v, u, ew) will add the second half edge. * GraphBuilder allows you to be lazy and only add one half of each edge. Calling toGraph with autoCompleteEdges set to true, will make each half Edge in GraphBuilder to one full edge in Graph. * * So far I didn't came up with a good parallelization for toGraph, so at some point I might omit the parallel parameter for toGraph. */ class GraphBuilder { private: count n; //!< current number of nodes count selfloops; //!< currently encountered number of self loops std::string name; //!< name of the graph, if not set it will be G#ID bool weighted; //!< true if the graph will be weighted, false otherwise bool directed; //!< true if the graph will be directed, false otherwise std::vector< std::vector<node> > outEdges; //!< (outgoing) edges, for each edge (u, v) v is saved in outEdges[u] and for undirected also u in outEdges[v] std::vector< std::vector<edgeweight> > outEdgeWeights; //!< same schema (and same order!) as outEdges std::vector< std::vector<node> > inEdges; //!< only used for directed graphs, inEdges[v] contains all nodes u that have an edge (u, v) std::vector< std::vector<edgeweight> > inEdgeWeights; //!< only used for directed graphs, same schema as inEdges index indexInOutEdgeArray(node u, node v) const; index indexInInEdgeArray(node u, node v) const; public: /** * Creates a new GraphBuilder. GraphBuilder supports the basic methods needed to create a new graph (addNode, addEdge, setWeight, increaseWeight). It is designed to be much faster for graph creation, but the speed comes with a restriction: * For undirected graphs GraphBuilder will handle u->v and v->u as two different edges. Keep that in mind when using setWeight and increaseWeight. * GraphBuilder allows parallelization in a special way. It's internal data structure saves edges only at the source node. As long as edges from node u are only added/changed by thread t1, every other thread can modifier edges not starting in u. * addNode is not threadsafe. * @param n Number of nodes. * @param weighted If set to <code>true</code>, the graph has edge weights. * @param directed If set to @c true, the graph will be directed. */ GraphBuilder(count n = 0, bool weighted = false, bool directed = false); void reset(count n = 0); /** * Set name of graph to @a name. * @param name The name. */ void setName(std::string name) { this->name = name; } /** * Returns <code>true</code> if this graph supports edge weights other than 1.0. * @return <code>true</code> if this graph supports edge weights other than 1.0. */ inline bool isWeighted() const { return weighted; } /** * Return <code>true</code> if this graph supports directed edges. * @return </code>true</code> if this graph supports directed edges. */ inline bool isDirected() const { return directed; } /** * Return <code>true</code> if graph contains no nodes. * @return <code>true</code> if graph contains no nodes. */ inline bool isEmpty() const { return n == 0; } /** * Return the number of nodes in the graph. * @return The number of nodes. */ count numberOfNodes() const { return n; } /** * Get an upper bound for the node ids in the graph. * @return An upper bound for the node ids. */ index upperNodeIdBound() const { return n; } /** * Add a new node to the graph and return it. * @return The new node. */ node addNode(); /** * Insert an edge between the nodes @a u and @a v. If the graph is weighted you can optionally * set a weight for this edge. The default weight is 1.0. * @param u Endpoint of edge. * @param v Endpoint of edge. * @param weight Optional edge weight. */ void addHalfEdge(node u, node v, edgeweight ew = defaultEdgeWeight) { addHalfOutEdge(u, v, ew); } void addHalfOutEdge(node u, node v, edgeweight ew = defaultEdgeWeight); void addHalfInEdge(node u, node v, edgeweight ew = defaultEdgeWeight); void swapNeighborhood(node u, std::vector<node> &neighbours, std::vector<edgeweight> &weights, bool selfloop); /** * Set the weight of an edge. If the edge does not exist, * it will be inserted. * * @param[in] u endpoint of edge * @param[in] v endpoint of edge * @param[in] weight edge weight */ void setWeight(node u, node v, edgeweight ew) { setOutWeight(u, v, ew); } void setOutWeight(node u, node v, edgeweight ew); void setInWeight(node u, node v, edgeweight ew); /** * Increase the weight of an edge. If the edge does not exist, * it will be inserted. * * @param[in] u endpoint of edge * @param[in] v endpoint of edge * @param[in] weight edge weight */ void increaseWeight(node u, node v, edgeweight ew) { increaseOutWeight(u, v, ew); } void increaseOutWeight(node u, node v, edgeweight ew); void increaseInWeight(node u, node v, edgeweight ew); /** * Generates a Graph instance. The graph builder will be reseted at the end. */ Graph toGraph(bool autoCompleteEdges, bool parallel = false); /** * Iterate over all nodes of the graph and call @a handle (lambda closure). * * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void forNodes(L handle) const; /** * Iterate randomly over all nodes of the graph and call @a handle (lambda closure). * * @param handle Takes parameter <code>(node)</code>. */ template<typename L> void parallelForNodes(L handle) const; /** * Iterate over all undirected pairs of nodes and call @a handle (lambda closure). * * @param handle Takes parameters <code>(node, node)</code>. */ template<typename L> void forNodePairs(L handle) const; /** * Iterate over all undirected pairs of nodes in parallel and call @a handle (lambda closure). * * @param handle Takes parameters <code>(node, node)</code>. */ template<typename L> void parallelForNodePairs(L handle) const; private: void toGraphDirectSwap(Graph &G); void toGraphSequential(Graph &G); void toGraphParallel(Graph &G); template <typename T> static void copyAndClear(std::vector<T>& source, std::vector<T>& target); void setDegrees(Graph& G); count numberOfEdges(const Graph& G); }; template<typename L> void GraphBuilder::forNodes(L handle) const { for (node v = 0; v < n; v++) { handle(v); } } template<typename L> void GraphBuilder::parallelForNodes(L handle) const { #pragma omp parallel for schedule(dynamic, 100) for (omp_index v = 0; v < static_cast<omp_index>(n); v++) { handle(v); } } template<typename L> void GraphBuilder::forNodePairs(L handle) const { for (node u = 0; u < n; u++) { for (node v = u + 1; v < n; v++) { handle(u, v); } } } template<typename L> void GraphBuilder::parallelForNodePairs(L handle) const { #pragma omp parallel for schedule(dynamic, 100) for (omp_index u = 0; u < static_cast<omp_index>(n); u++) { for (node v = u + 1; v < n; v++) { handle(u, v); } } } template <typename T> void GraphBuilder::copyAndClear(std::vector<T>& source, std::vector<T>& target) { std::copy(source.begin(), source.end(), std::back_inserter(target)); source.clear(); } } /* namespace NetworKit */ #endif /* GRAPH_BUILDER_H */
update_ops_single.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include "constant.h" #include "update_ops.h" #include "utility.h" #ifdef _OPENMP #include <omp.h> #endif void single_qubit_Pauli_gate(UINT target_qubit_index, UINT Pauli_operator_type, CTYPE *state, ITYPE dim) { switch(Pauli_operator_type){ case 0: break; case 1: X_gate(target_qubit_index,state,dim); break; case 2: Y_gate(target_qubit_index,state,dim); break; case 3: Z_gate(target_qubit_index,state,dim); break; default: fprintf(stderr,"invalid Pauli operation is called"); assert(0); } } void single_qubit_Pauli_rotation_gate(UINT target_qubit_index, UINT Pauli_operator_index, double angle, CTYPE *state, ITYPE dim) { // create matrix and call dense matrix UINT i, j; CTYPE rotation_gate[4]; for(i = 0; i < 2; ++i) for(j = 0; j < 2; ++j) rotation_gate[i*2+j] = cos(angle) * PAULI_MATRIX[0][i*2+j] + sin(angle) * 1.0i * PAULI_MATRIX[Pauli_operator_index][i*2+j]; single_qubit_dense_matrix_gate(target_qubit_index, rotation_gate, state, dim); } void single_qubit_dense_matrix_gate(UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // loop variables const ITYPE loop_dim = dim/2; ITYPE state_index; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim ; ++state_index) { // create index ITYPE basis_0 = insert_zero_to_basis_index(state_index,target_mask,target_qubit_index); // gather index ITYPE basis_1 = basis_0 ^ target_mask; // fetch values CTYPE cval_0 = state[basis_0]; CTYPE cval_1 = state[basis_1]; // set values state[basis_0] = matrix[0] * cval_0 + matrix[1] * cval_1; state[basis_1] = matrix[2] * cval_0 + matrix[3] * cval_1; } } void single_qubit_diagonal_matrix_gate(UINT target_qubit_index, const CTYPE diagonal_matrix[2], CTYPE *state, ITYPE dim) { // loop variables const ITYPE loop_dim = dim; ITYPE state_index; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim ; ++state_index) { // determin matrix pos UINT bit_val = (state_index >> target_qubit_index)%2; // set value state[state_index] *= diagonal_matrix[bit_val]; } } void single_qubit_phase_gate(UINT target_qubit_index, CTYPE phase, CTYPE *state, ITYPE dim) { // target tmask const ITYPE mask = 1ULL << target_qubit_index; // loop varaibles const ITYPE loop_dim = dim/2; ITYPE state_index; #ifdef _OPENMP #pragma omp parallel for #endif for (state_index = 0; state_index < loop_dim ; ++state_index) { // crate index ITYPE basis_1 = insert_zero_to_basis_index(state_index,mask,target_qubit_index) ^ mask; // set values state[basis_1] *= phase; } } void single_qubit_control_single_qubit_dense_matrix_gate(UINT control_qubit_index, UINT control_value, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // loop varaibles const ITYPE loop_dim = dim>>2; ITYPE state_index; // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // control mask const ITYPE control_mask = (1ULL << control_qubit_index) * control_value; // insert index const UINT min_qubit_index = get_min_ui(control_qubit_index, target_qubit_index); const UINT max_qubit_index = get_max_ui(control_qubit_index, target_qubit_index); const ITYPE min_qubit_mask = 1ULL << min_qubit_index; const ITYPE max_qubit_mask = 1ULL << max_qubit_index; #ifdef _OPENMP #pragma omp parallel for #endif for(state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_c_t0 = state_index; basis_c_t0 = insert_zero_to_basis_index(basis_c_t0, min_qubit_mask, min_qubit_index); basis_c_t0 = insert_zero_to_basis_index(basis_c_t0 , max_qubit_mask, max_qubit_index); // flip control basis_c_t0 ^= control_mask; // gather index ITYPE basis_c_t1 = basis_c_t0 ^ target_mask; // fetch values CTYPE cval_c_t0 = state[basis_c_t0]; CTYPE cval_c_t1 = state[basis_c_t1]; // set values state[basis_c_t0] = matrix[0] * cval_c_t0 + matrix[1] * cval_c_t1; state[basis_c_t1] = matrix[2] * cval_c_t0 + matrix[3] * cval_c_t1; } } // This function can be further optimized by disucssing what should be computed before loop as local object. // Current function is designed to avoid if-statement in loop. void multi_qubit_control_single_qubit_dense_matrix_gate(const UINT* control_qubit_index_list, const UINT* control_value_list, UINT control_qubit_index_count, UINT target_qubit_index, const CTYPE matrix[4], CTYPE *state, ITYPE dim) { // insert index list const UINT insert_index_list_count = control_qubit_index_count + 1; UINT* insert_index_list = create_sorted_ui_list_value(control_qubit_index_list, control_qubit_index_count, target_qubit_index); // target mask const ITYPE target_mask = 1ULL << target_qubit_index; // control mask ITYPE control_mask = create_control_mask(control_qubit_index_list, control_value_list, control_qubit_index_count); // loop variables const ITYPE loop_dim = dim >> insert_index_list_count; ITYPE state_index; #ifdef _OPENMP #pragma omp parallel for #endif for(state_index = 0; state_index < loop_dim; ++state_index) { // create base index ITYPE basis_c_t0 = state_index; for(UINT cursor = 0 ; cursor < insert_index_list_count ; ++cursor){ basis_c_t0 = insert_zero_to_basis_index(basis_c_t0 , 1ULL << insert_index_list[cursor] , insert_index_list[cursor]); } // flip controls basis_c_t0 ^= control_mask; // gather target ITYPE basis_c_t1 = basis_c_t0 ^ target_mask; // fetch values CTYPE cval_c_t0 = state[basis_c_t0]; CTYPE cval_c_t1 = state[basis_c_t1]; // set values state[basis_c_t0] = matrix[0] * cval_c_t0 + matrix[1] * cval_c_t1; state[basis_c_t1] = matrix[2] * cval_c_t0 + matrix[3] * cval_c_t1; } free(insert_index_list); }
sparseAsyncOverlappingJacobi.h
// // Created by mbarb on 16/02/2018. // #ifndef PARALLELITERATIVE_SPARSEASYNCOVERLAPPINGJACOBI_H #define PARALLELITERATIVE_SPARSEASYNCOVERLAPPINGJACOBI_H #include "Eigen" #include "utils.h" #include "sparseParallelJacobi.h" #include <typeinfo> #include <iostream> namespace Iterative { template <typename Scalar> class sparseAsyncOverlappingJacobi : public sparseParallelJacobi<Scalar> { public: explicit sparseAsyncOverlappingJacobi( const Eigen::SparseMatrix<Scalar>& A, const Eigen::ColumnVector<Scalar, Eigen::Dynamic>& b, const ulonglong iterations, const Scalar tolerance, const ulong workers=0L, const ulonglong blockSize = 0L, const ulonglong overlap = 0L) : sparseParallelJacobi<Scalar>::sparseParallelJacobi(A, b, iterations, tolerance, workers) { this->blockSize = blockSize; if (blockSize == 0) this->blockSize = std::max(ulong(this->A.cols() / workers), (ulong) 1L); if (overlap == 0) this->overlap = blockSize/2; splitter(); } const Eigen::ColumnVector<Scalar, Eigen::Dynamic> solve() { Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldSolution(this->solution); Scalar error = this->tolerance - this->tolerance; std::vector<std::pair<ulonglong, Eigen::Matrix<Scalar,Eigen::Dynamic,Eigen::Dynamic>>> inverses(blocks.size()); Eigen::ColumnVector<Scalar, Eigen::Dynamic> even_solution(this->solution); Eigen::ColumnVector<Scalar, Eigen::Dynamic> odd_solution(this->solution); Eigen::SimplicialLDLT<Eigen::SparseMatrix<Scalar>> solver; Eigen::Matrix<Scalar,Eigen::Dynamic, Eigen::Dynamic> I(blocks[0].rows, blocks[0].cols); // Compute the inverses in parallel #pragma omp parallel for schedule(dynamic) private(solver) for (long i = 0; i < blocks.size()-1; ++i) { Eigen::SparseMatrix<Scalar> block = this->A.block(blocks[i].startCol, blocks[i].startRow, blocks[i].cols, blocks[i].rows); solver.compute(block); if(I.size() != block.size()){ I.resize(block.rows(), block.cols()); I.setIdentity(); } inverses[i].first = i; inverses[i].second = solver.solve(I); } { Eigen::SparseMatrix<Scalar> block = this->A.block(blocks.back().startCol, blocks.back().startRow, blocks.back().cols, blocks.back().rows); solver.compute(block); I.resize(block.rows(), block.cols()); I.setIdentity(); inverses.back().first = blocks.size()-1; inverses.back().second = solver.solve(I); } auto nInverses = blocks.size(); std::vector<int> index; auto stop = false; for (this->iteration=0L; this->iteration < this->iterations && !stop; ++this->iteration) { // Calculate the solution in parallel #pragma omp parallel #pragma omp for private(oldSolution) schedule(dynamic) nowait for (int i = 0; i < inverses.size(); ++i) { oldSolution = (even_solution + odd_solution)/(Scalar)2.; // not overlapping portion of the solution b oldSolution.head(overlap) = even_solution.head(overlap); // not overlapping end portion of the solution b oldSolution.tail(overlap) = nInverses%2 ? even_solution.tail(overlap) : odd_solution.tail(overlap); Eigen::ColumnVector<Scalar, Eigen::Dynamic> oldBlock = inverses[i].first % 2 ? odd_solution.segment(blocks[i].startCol, blocks[i].cols) : even_solution.segment(blocks[i].startCol, blocks[i].cols); auto zeroBlock = oldSolution.segment(blocks[i].startCol, blocks[i].cols); zeroBlock.setZero(); auto block = inverses[i].first % 2 ? odd_solution.segment(blocks[i].startCol, blocks[i].cols) : even_solution.segment(blocks[i].startCol, blocks[i].cols); block = inverses[i].second * (this->b - (this->A * oldSolution)).segment(blocks[i].startCol, blocks[i].cols); if ((oldBlock - block).template lpNorm<1>() <= this->tolerance*block.size()) { #pragma omp critical index.emplace_back(i); } zeroBlock = block; } // average of the two values if (!index.empty()) { #pragma omp barrier #pragma omp single { std::sort(index.rbegin(), index.rend()); for (auto i : index) { blocks.erase(blocks.begin() + i); inverses.erase(inverses.begin() + i); } index.clear(); stop = inverses.empty(); }; } } #pragma omp barrier this->solution = (even_solution + odd_solution)/(Scalar)2.; // not overlapping portion of the solution b this->solution.head(overlap) = even_solution.head(overlap); // not overlapping end portion of the solution b this->solution.tail(overlap) = nInverses%2 ? even_solution.tail(overlap) : odd_solution.tail(overlap); std::cout << this->iteration << std::endl; return this->solution; } protected: ulonglong blockSize; std::vector<Index> blocks; ulonglong overlap; void splitter() { for (ulonglong i = 0; i < this->A.cols()-overlap; i += (blockSize-overlap)) blocks.emplace_back(Index(i, std::min(blockSize, (ulonglong) this->A.cols() - i), i, std::min(blockSize, (ulonglong) this->A.rows() - i))); } private: }; } #endif //PARALLELITERATIVE_ASYNCOVERLAPPINGJACOBI_H
Scan.h
/* This file is part of the implementation for the technical paper Field-Aligned Online Surface Reconstruction Nico Schertler, Marco Tarini, Wenzel Jakob, Misha Kazhdan, Stefan Gumhold, Daniele Panozzo ACM TOG 36, 4, July 2017 (Proceedings of SIGGRAPH 2017) Use of this source code is granted via a BSD-style license, which can be found in License.txt in the repository root. @author Nico Schertler */ #pragma once #include "osr/common.h" #include "osr/INeighborQueryable.h" #include "osr/HierarchyDecl.h" #include "osr/nanoflannForwardDeclare.h" #include "3rd/ICP.h" #include <nsessentials/math/Morton.h> #include <nsessentials/math/BoundingBox.h> #include <nsessentials/gui/GLBuffer.h> #include <nsessentials/gui/GLVertexArray.h> #include <nsessentials/util/TimedBlock.h> #include <random> #include <iostream> #include <memory> namespace osr { class Scan; class OSR_EXPORT IScanRenderer { public: virtual void initialize(Scan& scan) = 0; virtual bool isInitialized() const = 0; virtual void updateData(const Scan& scan) = 0; virtual void draw(const Scan& scan, const Eigen::Matrix4f & v, const Eigen::Matrix4f & proj) const = 0; bool showInput; bool showNormals; }; //Represents data of a single scan class OSR_EXPORT Scan : public IPointQueryable<size_t> { public: Scan(bool forunity){} Scan(const Matrix3Xf& V = Matrix3Xf(), const Matrix3Xf& N = Matrix3Xf(), const Matrix3Xus& C = Matrix3Xus(), const MatrixXu& F = MatrixXu(), const std::string& name = "unnamed", const Eigen::Affine3f& transform = Eigen::Affine3f::Identity()); void ScanUnity(const Matrix3Xf& V = Matrix3Xf(), const Matrix3Xf& N = Matrix3Xf(), const Matrix3Xuc& C = Matrix3Xuc(), const MatrixXu& F = MatrixXu(), const std::string& name = "unnamed", const Eigen::Affine3f& transform = Eigen::Affine3f::Identity()); ~Scan(); void initialize(); //Calculates the vertex normals if not already present. //If there are faces in the data set, uses averaged face normals. //Otherwise, uses PCA. PCA assumes normals to point towards the origin. void calculateNormals(); //Access to transformed attributes Vector3f p(size_t idx) const; //vertex position Vector3f n(size_t idx) const; //normal const std::string& getName() { return name; } const nse::math::BoundingBox<float, 3> boundingBox() const { return bbox; } nse::math::BoundingBox<float, 3> getTransformedBoundingBox() const; void updateData(); const Matrix3Xf& V() const { return mV; } Matrix3Xf& V() { return mV; } const Matrix3Xf& N() const { return mN; } Matrix3Xf& N() { return mN; } const Matrix3Xus& C() const { return mC; } Matrix3Xus& C() { return mC; } // const Matrix3Xuc& C_Unity() const { return mC_unity; } // Matrix3Xuc& C_Unity() { return mC_unity; } const MatrixXu& F() const { return mF; } MatrixXu& F() { return mF; } //Modifies the scan transform via ICP so as to register to other. template <typename Index> void alignTo(const IPointQueryable<Index>& other, int iterations = 20, double subsample = 0.1); //Removes all points that overlap the hierarchy (i.e. there is a point in the hierarchy with a distance of at most "distance"). void cleanOverlap(const THierarchy& hierarchy, float distance); const Eigen::Affine3f& transform() const { return mTransform; } Eigen::Affine3f& transform() { return mTransform; } std::shared_ptr<IScanRenderer> renderer; // ---------- nanoflann interface ---------- typedef nanoflann::KDTreeSingleIndexAdaptor< nanoflann::L2_Adaptor<float, Scan, float>, Scan, 3, size_t> KdTreeType; inline size_t kdtree_get_point_count() const { return mV.cols(); } inline float kdtree_distance(const float *p1, const size_t idx_p2, size_t size) const { float s = 0; for (size_t i = 0; i < size; ++i) { const float d = p1[i] - mV.coeff(i, idx_p2); s += d*d; } return s; } inline float kdtree_get_pt(const size_t idx, int dim) const { return mV.coeff(dim, idx); } template <class BBOX> bool kdtree_get_bbox(BBOX& bb) const { for (int i = 0; i < 3; ++i) { bb[i].low = bbox.min(i); bb[i].high = bbox.max(i); } return true; } // ---------- end nanoflann interface ---------- void buildTree(); Vector3f neighborP(const size_t& i) const { return mV.col(i); } //access to point position Vector3f neighborN(const size_t& i) const { return mN.col(i); }; //access to point normal bool isIndexValid(const size_t& idx) const { return idx < mV.cols(); } //Finds the closest point that has a similar normal as the provided one size_t findClosestCompatiblePoint(const Vector3f& p, const Vector3f& n) const; float closestPointRadius = 30; #ifdef USE_DAVIDVIVE struct { Eigen::Affine3f transformUncalibrated; //turntable + controller transform Eigen::Affine3f turntableRotation; Eigen::Affine3f davidToVive; } davidViveData; #endif private: KdTreeType* kdTree = nullptr; private: void calculateNormalsFromFaces(); void calculateNormalsPCA(); Matrix3Xf mV; //positions Matrix3Xf mN; //normals Matrix3Xus mC; //colors Matrix3Xuc mC_unity; //colors MatrixXu mF; //faces std::string name; nse::math::BoundingBox<float, 3> bbox; Eigen::Affine3f mTransform; }; template <typename Index> void Scan::alignTo(const IPointQueryable<Index>& other, int iterations, double subsample) { nse::util::TimedBlock b("Registering scan .."); std::vector<Index> correspondences(mV.cols()); //For each point, find the corresponding point in the other point cloud. #pragma omp parallel for for (int i = 0; i < mV.cols(); ++i) { if (std::isnan(mV.col(i).x())) continue; correspondences[i] = other.findClosestCompatiblePoint(mTransform * mV.col(i), mTransform.linear() * mN.col(i)); } //Distribute the points with a correspondence into normal buckets. std::map<nse::math::MortonCode64, std::vector<size_t>> normalBucketsMap; for (int i = 0; i < mV.cols(); ++i) { if (!std::isnan(mV.col(i).x()) && other.isIndexValid(correspondences[i])) { Vector3i discrete = (mN.col(i) * 10).cast<int>(); nse::math::MortonCode64 code(discrete.x(), discrete.y(), discrete.z()); normalBucketsMap[code].push_back(i); } } std::vector<std::vector<size_t>> normalBuckets; int potentialSamples = 0; for (auto& entry : normalBucketsMap) { potentialSamples += entry.second.size(); normalBuckets.push_back(std::move(entry.second)); } normalBucketsMap.clear(); if (potentialSamples < 10) { std::cout << "Could not find enough overlap. Registration will abort." << std::endl; return; } int samples = (int)(potentialSamples * subsample); std::uniform_int_distribution<size_t> bucketDist(0, normalBuckets.size() - 1); std::mt19937 rnd; Matrix3Xf X(3, samples), N(3, samples); //subsample the point cloud for ICP for (int i = 0; i < samples; ++i) { size_t sample; if (subsample == 1) sample = i; else { //normal space sampling bool sampleOk = false; int attempt = 0; while (!sampleOk && attempt++ < 10) { auto bucketIdx = bucketDist(rnd); auto& bucket = normalBuckets[bucketIdx]; std::uniform_int_distribution<size_t> sampleDist(0, bucket.size() - 1); auto sampleIdx = sampleDist(rnd); sample = bucket[sampleIdx]; if (std::isnan(mV.coeff(0, sample)) || std::isnan(mN.coeff(0, sample))) continue; sampleOk = true; bucket.erase(bucket.begin() + sampleIdx); if (bucket.empty()) { normalBuckets.erase(normalBuckets.begin() + bucketIdx); bucketDist = std::uniform_int_distribution<size_t>(0, normalBuckets.size() - 1); } } } X.col(i) = mTransform * mV.col(sample); N.col(i) = mTransform.linear() * mN.col(sample); } //Run ICP SICP::Parameters params; params.p = 1.5; params.max_icp = iterations; params.max_outer = 10; params.max_inner = 1; Eigen::setNbThreads(0); mTransform = SICP::point_to_plane(X, N, other, params) * mTransform; Eigen::setNbThreads(1); } }
target_data.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-aarch64-unknown-linux-gnu 2>&1 \ // RUN: | %fcheck-aarch64-unknown-linux-gnu // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64-ibm-linux-gnu // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-powerpc64le-ibm-linux-gnu 2>&1 \ // RUN: | %fcheck-powerpc64le-ibm-linux-gnu // RUN: %libomptarget-compile-x86_64-pc-linux-gnu -fopenmp-version=51 // RUN: %libomptarget-run-fail-x86_64-pc-linux-gnu 2>&1 \ // RUN: | %fcheck-x86_64-pc-linux-gnu #include <stdio.h> int main() { int i; // CHECK: addr=0x[[#%x,HOST_ADDR:]], size=[[#%u,SIZE:]] fprintf(stderr, "addr=%p, size=%ld\n", &i, sizeof i); // CHECK-NOT: Libomptarget #pragma omp target data map(alloc: i) #pragma omp target data map(present, alloc: i) ; // CHECK: i is present fprintf(stderr, "i is present\n"); // CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] ([[#SIZE]] bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target data map(present, alloc: i) ; // CHECK-NOT: i is present fprintf(stderr, "i is present\n"); return 0; }
GB_unaryop__lnot_uint8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint8_uint32 // op(A') function: GB_tran__lnot_uint8_uint32 // C type: uint8_t // A type: uint32_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint8_uint32 ( uint8_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 24; tile_size[3] = 1024; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
sections_dispatch.c
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck %s // REQUIRES: ompt // UNSUPPORTED: intel-15.0, intel-16.0, intel-17.0, intel-18.0 // GCC generates code that does not distinguish between sections and loops #include "callback.h" #include <omp.h> int main() { #pragma omp parallel sections num_threads(2) { #pragma omp section { printf("%lu: section 1\n", ompt_get_thread_data()->value); } #pragma omp section { printf("%lu: section 2\n", ompt_get_thread_data()->value); } } // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_work' // CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_dispatch' // CHECK: 0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_sections_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID:[0-9]+]], // CHECK-SAME: parent_task_id=[[TASK_ID:[0-9]+]], // CHECK-SAME: codeptr_ra=[[SECT_BEGIN:0x[0-f]+]], count=2 // CHECK: {{^}}[[MASTER_ID]]: ompt_event_section_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]] // CHECK-SAME: codeptr_ra=[[SECT_BEGIN]] // CHECK: {{^}}[[MASTER_ID]]: ompt_event_sections_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, // CHECK-SAME: codeptr_ra=[[SECT_END:0x[0-f]+]] // CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_sections_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], parent_task_id=[[TASK_ID:[0-9]+]], // CHECK-SAME: codeptr_ra=[[SECT_BEGIN]], count=2 // CHECK: {{^}}[[THREAD_ID]]: ompt_event_section_begin: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id=[[TASK_ID]] // CHECK-SAME: codeptr_ra=[[SECT_BEGIN]] // CHECK: {{^}}[[THREAD_ID]]: ompt_event_sections_end: // CHECK-SAME: parallel_id=[[PARALLEL_ID]], task_id={{[0-9]+}}, // CHECK-SAME: codeptr_ra=[[SECT_END]] return 0; }
GB_binop__pow_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_int64) // A.*B function (eWiseMult): GB (_AemultB_08__pow_int64) // A.*B function (eWiseMult): GB (_AemultB_02__pow_int64) // A.*B function (eWiseMult): GB (_AemultB_04__pow_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_int64) // C+=b function (dense accum): GB (_Cdense_accumb__pow_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int64) // C=scalar+B GB (_bind1st__pow_int64) // C=scalar+B' GB (_bind1st_tran__pow_int64) // C=A+scalar GB (_bind2nd__pow_int64) // C=A'+scalar GB (_bind2nd_tran__pow_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = GB_pow_int64 (aij, bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_int64 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_INT64 || GxB_NO_POW_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_int64 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_int64 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_int64 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_int64 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
phylokernelmixture.h
/* * phylokernelmixture.h * * Created on: Dec 19, 2014 * Author: minh */ #ifndef PHYLOKERNELMIXTURE_H_ #define PHYLOKERNELMIXTURE_H_ #include "model/modelmixture.h" /************************************************************************************************ * * Highly-optimized vectorized likelihood functions for mixture models * *************************************************************************************************/ template <class VectorClass, const int VCSIZE, const int nstates> void PhyloTree::computeMixturePartialLikelihoodEigenSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad) { if (dad_branch->node->degree() > 3) { // TODO: SIMD version for multifurcating node computePartialLikelihoodEigen(dad_branch, dad); return; } // don't recompute the likelihood ASSERT(dad); if (dad_branch->partial_lh_computed & 1) return; dad_branch->partial_lh_computed |= 1; size_t nptn = aln->size() + model_factory->unobserved_ptns.size(); PhyloNode *node = (PhyloNode*)(dad_branch->node); if (node->isLeaf()) { dad_branch->lh_scale_factor = 0.0; //memset(dad_branch->scale_num, 0, nptn * sizeof(UBYTE)); if (!tip_partial_lh_computed) computeTipPartialLikelihood(); return; } size_t ptn, c; size_t orig_ntn = aln->size(); size_t ncat = site_rate->getNRate(); size_t nmixture = model->getNMixtures(); ASSERT(nstates == aln->num_states && nstates >= VCSIZE && VCSIZE == VectorClass().size()); ASSERT(model->isReversible()); // only works with reversible model! const size_t nstatesqr=nstates*nstates; size_t i, x, j, m; size_t statecat = nstates * ncat; size_t block = statecat * nmixture; // internal node ASSERT(node->degree() == 3); // it works only for strictly bifurcating tree PhyloNeighbor *left = NULL, *right = NULL; // left & right are two neighbors leading to 2 subtrees FOR_NEIGHBOR_IT(node, dad, it) { if (!left) left = (PhyloNeighbor*)(*it); else right = (PhyloNeighbor*)(*it); } if (!left->node->isLeaf() && right->node->isLeaf()) { // swap left and right PhyloNeighbor *tmp = left; left = right; right = tmp; } if ((left->partial_lh_computed & 1) == 0) computeMixturePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(left, node); if ((right->partial_lh_computed & 1) == 0) computeMixturePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(right, node); if (params->lh_mem_save == LM_PER_NODE && !dad_branch->partial_lh) { // re-orient partial_lh bool done = false; FOR_NEIGHBOR_IT(node, dad, it2) { PhyloNeighbor *backnei = ((PhyloNeighbor*)(*it2)->node->findNeighbor(node)); if (backnei->partial_lh) { dad_branch->partial_lh = backnei->partial_lh; dad_branch->scale_num = backnei->scale_num; backnei->partial_lh = NULL; backnei->scale_num = NULL; backnei->partial_lh_computed &= ~1; // clear bit done = true; break; } } ASSERT(done && "partial_lh is not re-oriented"); } double *evec = model->getEigenvectors(); double *inv_evec = model->getInverseEigenvectors(); VectorClass *vc_inv_evec = aligned_alloc<VectorClass>(nmixture*nstatesqr/VCSIZE); ASSERT(inv_evec && evec); for (m = 0; m < nmixture; m++) { for (i = 0; i < nstates; i++) { for (x = 0; x < nstates/VCSIZE; x++) // inv_evec is not aligned! vc_inv_evec[m*nstatesqr/VCSIZE + i*nstates/VCSIZE+x].load_a(&inv_evec[m*nstatesqr + i*nstates+x*VCSIZE]); } } double *eval = model->getEigenvalues(); dad_branch->lh_scale_factor = left->lh_scale_factor + right->lh_scale_factor; VectorClass *eleft = (VectorClass*)aligned_alloc<double>(block*nstates); VectorClass *eright = (VectorClass*)aligned_alloc<double>(block*nstates); // precompute information buffer for (c = 0; c < ncat; c++) { VectorClass vc_evec; VectorClass expleft[nstates/VCSIZE]; VectorClass expright[nstates/VCSIZE]; for (m = 0; m < nmixture; m++) { // length for heterotachy model double len_left = site_rate->getRate(c) * left->getLength(m); double len_right = site_rate->getRate(c) * right->getLength(m); size_t addr = (m*ncat+c)*nstatesqr/VCSIZE; for (i = 0; i < nstates/VCSIZE; i++) { // eval is not aligned! expleft[i] = exp(VectorClass().load_a(&eval[m*nstates+i*VCSIZE]) * VectorClass(len_left)); expright[i] = exp(VectorClass().load_a(&eval[m*nstates+i*VCSIZE]) * VectorClass(len_right)); } for (x = 0; x < nstates; x++) for (i = 0; i < nstates/VCSIZE; i++) { // evec is not be aligned! vc_evec.load_a(&evec[m*nstatesqr+x*nstates+i*VCSIZE]); eleft[addr+x*nstates/VCSIZE+i] = (vc_evec * expleft[i]); eright[addr+x*nstates/VCSIZE+i] = (vc_evec * expright[i]); } } } if (left->node->isLeaf() && right->node->isLeaf()) { // special treatment for TIP-TIP (cherry) case // pre compute information for both tips double *partial_lh_left = aligned_alloc<double>((aln->STATE_UNKNOWN+1)*block); double *partial_lh_right = aligned_alloc<double>((aln->STATE_UNKNOWN+1)*block); vector<int>::iterator it; for (it = aln->seq_states[left->node->id].begin(); it != aln->seq_states[left->node->id].end(); it++) { int state = (*it); VectorClass vc_partial_lh_tmp[nstates/VCSIZE]; VectorClass vleft[VCSIZE]; for (m = 0; m < nmixture; m++) { double *this_tip_partial_lh = &tip_partial_lh[state*nstates*nmixture + m*nstates]; VectorClass *this_eleft = &eleft[m*ncat*nstatesqr/VCSIZE]; double *this_partial_lh_left = &partial_lh_left[state*block+m*statecat]; for (i = 0; i < nstates/VCSIZE; i++) vc_partial_lh_tmp[i].load_a(&this_tip_partial_lh[i*VCSIZE]); for (x = 0; x < statecat; x+=VCSIZE) { for (j = 0; j < VCSIZE; j++) vleft[j] = this_eleft[(x+j)*nstates/VCSIZE] * vc_partial_lh_tmp[0]; for (i = 1; i < nstates/VCSIZE; i++) { for (j = 0; j < VCSIZE; j++) vleft[j] = mul_add(this_eleft[(x+j)*nstates/VCSIZE+i], vc_partial_lh_tmp[i], vleft[j]); } horizontal_add(vleft).store_a(&this_partial_lh_left[x]); } } } for (it = aln->seq_states[right->node->id].begin(); it != aln->seq_states[right->node->id].end(); it++) { int state = (*it); VectorClass vc_partial_lh_tmp[nstates/VCSIZE]; VectorClass vright[VCSIZE]; for (m = 0; m < nmixture; m++) { double *this_tip_partial_lh = &tip_partial_lh[state*nstates*nmixture + m*nstates]; VectorClass *this_eright = &eright[m*ncat*nstatesqr/VCSIZE]; double *this_partial_lh_right = &partial_lh_right[state*block+m*statecat]; for (i = 0; i < nstates/VCSIZE; i++) vc_partial_lh_tmp[i].load_a(&this_tip_partial_lh[i*VCSIZE]); for (x = 0; x < statecat; x+=VCSIZE) { for (j = 0; j < VCSIZE; j++) vright[j] = this_eright[(x+j)*nstates/VCSIZE] * vc_partial_lh_tmp[0]; for (i = 1; i < nstates/VCSIZE; i++) { for (j = 0; j < VCSIZE; j++) vright[j] = mul_add(this_eright[(x+j)*nstates/VCSIZE+i], vc_partial_lh_tmp[i], vright[j]); } horizontal_add(vright).store_a(&this_partial_lh_right[x]); } } } size_t addr_unknown = aln->STATE_UNKNOWN * block; for (x = 0; x < block; x++) { partial_lh_left[addr_unknown+x] = 1.0; partial_lh_right[addr_unknown+x] = 1.0; } // assign pointers for left and right partial_lh double **lh_left_ptr = aligned_alloc<double*>(nptn); double **lh_right_ptr = aligned_alloc<double*>(nptn); for (ptn = 0; ptn < orig_ntn; ptn++) { lh_left_ptr[ptn] = &partial_lh_left[block * (aln->at(ptn))[left->node->id]]; lh_right_ptr[ptn] = &partial_lh_right[block * (aln->at(ptn))[right->node->id]]; } for (ptn = orig_ntn; ptn < nptn; ptn++) { lh_left_ptr[ptn] = &partial_lh_left[block * model_factory->unobserved_ptns[ptn-orig_ntn]]; lh_right_ptr[ptn] = &partial_lh_right[block * model_factory->unobserved_ptns[ptn-orig_ntn]]; } // scale number must be ZERO memset(dad_branch->scale_num, 0, nptn * sizeof(UBYTE)); VectorClass vc_partial_lh_tmp[nstates/VCSIZE]; VectorClass res[VCSIZE]; #ifdef _OPENMP #pragma omp parallel for private(ptn, c, x, i, j, m, vc_partial_lh_tmp, res) #endif for (ptn = 0; ptn < nptn; ptn++) { double *partial_lh = dad_branch->partial_lh + ptn*block; double *lh_left = lh_left_ptr[ptn]; double *lh_right = lh_right_ptr[ptn]; for (m = 0; m < nmixture; m++) { for (c = 0; c < ncat; c++) { // compute real partial likelihood vector for (x = 0; x < nstates/VCSIZE; x++) { vc_partial_lh_tmp[x] = (VectorClass().load_a(&lh_left[x*VCSIZE]) * VectorClass().load_a(&lh_right[x*VCSIZE])); } // compute dot-product with inv_eigenvector for (i = 0; i < nstates; i+=VCSIZE) { for (j = 0; j < VCSIZE; j++) { res[j] = vc_partial_lh_tmp[0] * vc_inv_evec[(m*nstates+i+j)*nstates/VCSIZE]; } for (x = 1; x < nstates/VCSIZE; x++) for (j = 0; j < VCSIZE; j++) { res[j] = mul_add(vc_partial_lh_tmp[x], vc_inv_evec[(m*nstates+i+j)*nstates/VCSIZE+x], res[j]); } horizontal_add(res).store_a(&partial_lh[i]); } lh_left += nstates; lh_right += nstates; partial_lh += nstates; } } } aligned_free(lh_left_ptr); aligned_free(lh_right_ptr); aligned_free(partial_lh_right); aligned_free(partial_lh_left); } else if (left->node->isLeaf() && !right->node->isLeaf()) { // special treatment to TIP-INTERNAL NODE case // only take scale_num from the right subtree memcpy(dad_branch->scale_num, right->scale_num, nptn * sizeof(UBYTE)); // pre compute information for left tip double *partial_lh_left = aligned_alloc<double>((aln->STATE_UNKNOWN+1)*block); vector<int>::iterator it; for (it = aln->seq_states[left->node->id].begin(); it != aln->seq_states[left->node->id].end(); it++) { int state = (*it); VectorClass vc_partial_lh_tmp[nstates/VCSIZE]; VectorClass vleft[VCSIZE]; for (m = 0; m < nmixture; m++) { double *this_tip_partial_lh = &tip_partial_lh[state*nstates*nmixture + m*nstates]; VectorClass *this_eleft = &eleft[m*ncat*nstatesqr/VCSIZE]; double *this_partial_lh_left = &partial_lh_left[state*block+m*statecat]; for (i = 0; i < nstates/VCSIZE; i++) vc_partial_lh_tmp[i].load_a(&this_tip_partial_lh[i*VCSIZE]); for (x = 0; x < statecat; x+=VCSIZE) { for (j = 0; j < VCSIZE; j++) vleft[j] = this_eleft[(x+j)*nstates/VCSIZE] * vc_partial_lh_tmp[0]; for (i = 1; i < nstates/VCSIZE; i++) { for (j = 0; j < VCSIZE; j++) vleft[j] = mul_add(this_eleft[(x+j)*nstates/VCSIZE+i], vc_partial_lh_tmp[i], vleft[j]); } horizontal_add(vleft).store_a(&this_partial_lh_left[x]); } } } size_t addr_unknown = aln->STATE_UNKNOWN * block; for (x = 0; x < block; x++) { partial_lh_left[addr_unknown+x] = 1.0; } // assign pointers for partial_lh_left double **lh_left_ptr = aligned_alloc<double*>(nptn); for (ptn = 0; ptn < orig_ntn; ptn++) { lh_left_ptr[ptn] = &partial_lh_left[block * (aln->at(ptn))[left->node->id]]; } for (ptn = orig_ntn; ptn < nptn; ptn++) { lh_left_ptr[ptn] = &partial_lh_left[block * model_factory->unobserved_ptns[ptn-orig_ntn]]; } double sum_scale = 0.0; VectorClass vc_lh_right[nstates/VCSIZE]; VectorClass vc_partial_lh_tmp[nstates/VCSIZE]; VectorClass res[VCSIZE]; VectorClass vc_max; // maximum of partial likelihood, for scaling check VectorClass vright[VCSIZE]; #ifdef _OPENMP #pragma omp parallel for reduction(+: sum_scale) private (ptn, c, x, i, j, m, vc_lh_right, vc_partial_lh_tmp, res, vc_max, vright) #endif for (ptn = 0; ptn < nptn; ptn++) { double *partial_lh = dad_branch->partial_lh + ptn*block; double *partial_lh_right = right->partial_lh + ptn*block; double *lh_left = lh_left_ptr[ptn]; vc_max = 0.0; for (m = 0; m < nmixture; m++) for (c = 0; c < ncat; c++) { // compute real partial likelihood vector for (i = 0; i < nstates/VCSIZE; i++) vc_lh_right[i].load_a(&partial_lh_right[i*VCSIZE]); for (x = 0; x < nstates/VCSIZE; x++) { size_t addr = (m*ncat+c)*nstatesqr/VCSIZE+x*nstates; for (j = 0; j < VCSIZE; j++) { vright[j] = eright[addr+nstates*j/VCSIZE] * vc_lh_right[0]; } for (i = 1; i < nstates/VCSIZE; i++) for (j = 0; j < VCSIZE; j++) { vright[j] = mul_add(eright[addr+i+nstates*j/VCSIZE], vc_lh_right[i], vright[j]); } vc_partial_lh_tmp[x] = VectorClass().load_a(&lh_left[x*VCSIZE]) * horizontal_add(vright); } // compute dot-product with inv_eigenvector for (i = 0; i < nstates; i+=VCSIZE) { for (j = 0; j < VCSIZE; j++) { res[j] = vc_partial_lh_tmp[0] * vc_inv_evec[(m*nstates+i+j)*nstates/VCSIZE]; } for (x = 1; x < nstates/VCSIZE; x++) { for (j = 0; j < VCSIZE; j++) { res[j] = mul_add(vc_partial_lh_tmp[x], vc_inv_evec[(m*nstates+i+j)*nstates/VCSIZE+x], res[j]); } } VectorClass sum_res = horizontal_add(res); sum_res.store_a(&partial_lh[i]); vc_max = max(vc_max, abs(sum_res)); // take the maximum for scaling check } lh_left += nstates; partial_lh_right += nstates; partial_lh += nstates; } // check if one should scale partial likelihoods double lh_max = horizontal_max(vc_max); if (lh_max < SCALING_THRESHOLD) { // now do the likelihood scaling partial_lh -= block; // revert its pointer VectorClass scale_thres(SCALING_THRESHOLD_INVER); for (i = 0; i < block; i+=VCSIZE) { (VectorClass().load_a(&partial_lh[i]) * scale_thres).store_a(&partial_lh[i]); } // unobserved const pattern will never have underflow sum_scale += LOG_SCALING_THRESHOLD * ptn_freq[ptn]; dad_branch->scale_num[ptn] += 1; partial_lh += block; // increase the pointer again } } dad_branch->lh_scale_factor += sum_scale; aligned_free(lh_left_ptr); aligned_free(partial_lh_left); } else { // both left and right are internal node double sum_scale = 0.0; VectorClass vc_max; // maximum of partial likelihood, for scaling check VectorClass vc_partial_lh_tmp[nstates/VCSIZE]; VectorClass vc_lh_left[nstates/VCSIZE], vc_lh_right[nstates/VCSIZE]; VectorClass res[VCSIZE]; VectorClass vleft[VCSIZE], vright[VCSIZE]; #ifdef _OPENMP #pragma omp parallel for reduction (+: sum_scale) private(ptn, c, x, i, j, m, vc_max, vc_partial_lh_tmp, vc_lh_left, vc_lh_right, res, vleft, vright) #endif for (ptn = 0; ptn < nptn; ptn++) { double *partial_lh = dad_branch->partial_lh + ptn*block; double *partial_lh_left = left->partial_lh + ptn*block; double *partial_lh_right = right->partial_lh + ptn*block; dad_branch->scale_num[ptn] = left->scale_num[ptn] + right->scale_num[ptn]; vc_max = 0.0; for (m = 0; m < nmixture; m++) for (c = 0; c < ncat; c++) { // compute real partial likelihood vector for (i = 0; i < nstates/VCSIZE; i++) { vc_lh_left[i].load_a(&partial_lh_left[i*VCSIZE]); vc_lh_right[i].load_a(&partial_lh_right[i*VCSIZE]); } for (x = 0; x < nstates/VCSIZE; x++) { size_t addr = (m*ncat+c)*nstatesqr/VCSIZE+x*nstates; for (j = 0; j < VCSIZE; j++) { size_t addr_com = addr+j*nstates/VCSIZE; vleft[j] = eleft[addr_com] * vc_lh_left[0]; vright[j] = eright[addr_com] * vc_lh_right[0]; } for (i = 1; i < nstates/VCSIZE; i++) { for (j = 0; j < VCSIZE; j++) { size_t addr_com = addr+i+j*nstates/VCSIZE; vleft[j] = mul_add(eleft[addr_com], vc_lh_left[i], vleft[j]); vright[j] = mul_add(eright[addr_com], vc_lh_right[i], vright[j]); } } vc_partial_lh_tmp[x] = horizontal_add(vleft) * horizontal_add(vright); } // compute dot-product with inv_eigenvector for (i = 0; i < nstates; i+=VCSIZE) { for (j = 0; j < VCSIZE; j++) { res[j] = vc_partial_lh_tmp[0] * vc_inv_evec[(m*nstates+i+j)*nstates/VCSIZE]; } for (x = 1; x < nstates/VCSIZE; x++) for (j = 0; j < VCSIZE; j++) res[j] = mul_add(vc_partial_lh_tmp[x], vc_inv_evec[(m*nstates+i+j)*nstates/VCSIZE+x], res[j]); VectorClass sum_res = horizontal_add(res); sum_res.store_a(&partial_lh[i]); vc_max = max(vc_max, abs(sum_res)); // take the maximum for scaling check } partial_lh += nstates; partial_lh_left += nstates; partial_lh_right += nstates; } // check if one should scale partial likelihoods double lh_max = horizontal_max(vc_max); if (lh_max < SCALING_THRESHOLD) { // now do the likelihood scaling partial_lh -= block; // revert its pointer VectorClass scale_thres(SCALING_THRESHOLD_INVER); for (i = 0; i < block; i+=VCSIZE) { (VectorClass().load_a(&partial_lh[i]) * scale_thres).store_a(&partial_lh[i]); } // unobserved const pattern will never have underflow sum_scale += LOG_SCALING_THRESHOLD * ptn_freq[ptn]; dad_branch->scale_num[ptn] += 1; partial_lh += block; // increase the pointer again } } dad_branch->lh_scale_factor += sum_scale; } aligned_free(eright); aligned_free(eleft); aligned_free(vc_inv_evec); } template <class VectorClass, const int VCSIZE, const int nstates> void PhyloTree::computeMixtureLikelihoodDervEigenSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad, double &df, double &ddf) { PhyloNode *node = (PhyloNode*) dad_branch->node; PhyloNeighbor *node_branch = (PhyloNeighbor*) node->findNeighbor(dad); if (!central_partial_lh) initializeAllPartialLh(); if (node->isLeaf()) { PhyloNode *tmp_node = dad; dad = node; node = tmp_node; PhyloNeighbor *tmp_nei = dad_branch; dad_branch = node_branch; node_branch = tmp_nei; } if ((dad_branch->partial_lh_computed & 1) == 0) computeMixturePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(dad_branch, dad); if ((node_branch->partial_lh_computed & 1) == 0) computeMixturePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(node_branch, node); df = ddf = 0.0; size_t ncat = site_rate->getNRate(); size_t nmixture = model->getNMixtures(); size_t block = ncat * nstates * nmixture; size_t statemix = nstates * nmixture; size_t statecat = nstates * ncat; size_t ptn; // for big data size > 4GB memory required size_t c, i, j, m; size_t orig_nptn = aln->size(); size_t nptn = aln->size()+model_factory->unobserved_ptns.size(); size_t maxptn = ((nptn+VCSIZE-1)/VCSIZE)*VCSIZE; maxptn = max(maxptn, aln->size()+((model_factory->unobserved_ptns.size()+VCSIZE-1)/VCSIZE)*VCSIZE); double *eval = model->getEigenvalues(); ASSERT(eval); VectorClass *vc_val0 = (VectorClass*)aligned_alloc<double>(block); VectorClass *vc_val1 = (VectorClass*)aligned_alloc<double>(block); VectorClass *vc_val2 = (VectorClass*)aligned_alloc<double>(block); for (c = 0; c < ncat; c++) { VectorClass vc_rate = site_rate->getRate(c); for (m = 0; m < nmixture; m++) { // length for heterotachy model VectorClass vc_len = dad_branch->getLength(m); VectorClass vc_prop = VectorClass(site_rate->getProp(c) * model->getMixtureWeight(m)); for (i = 0; i < nstates/VCSIZE; i++) { VectorClass cof = VectorClass().load_a(&eval[m*nstates+i*VCSIZE]) * vc_rate; VectorClass val = exp(cof*vc_len) * vc_prop; VectorClass val1_ = cof*val; vc_val0[(m*ncat+c)*nstates/VCSIZE+i] = val; vc_val1[(m*ncat+c)*nstates/VCSIZE+i] = val1_; vc_val2[(m*ncat+c)*nstates/VCSIZE+i] = cof*val1_; } } } ASSERT(theta_all); if (!theta_computed) { theta_computed = true; // precompute theta for fast branch length optimization if (dad->isLeaf()) { // special treatment for TIP-INTERNAL NODE case #ifdef _OPENMP #pragma omp parallel for private(ptn, i, m) #endif for (ptn = 0; ptn < orig_nptn; ptn++) { double *partial_lh_dad = dad_branch->partial_lh + ptn*block; double *theta = theta_all + ptn*block; double *lh_dad = &tip_partial_lh[(aln->at(ptn))[dad->id] * statemix]; for (m = 0; m < nmixture; m++) { for (i = 0; i < statecat; i+=VCSIZE) { (VectorClass().load_a(&lh_dad[i%nstates]) * VectorClass().load_a(&partial_lh_dad[i])). store_a(&theta[i]); } partial_lh_dad += statecat; theta += statecat; lh_dad += nstates; } } // ascertainment bias correction for (ptn = orig_nptn; ptn < nptn; ptn++) { double *partial_lh_dad = dad_branch->partial_lh + ptn*block; double *theta = theta_all + ptn*block; double *lh_dad = &tip_partial_lh[model_factory->unobserved_ptns[ptn-orig_nptn] * statemix]; for (m = 0; m < nmixture; m++) { for (i = 0; i < statecat; i+=VCSIZE) { (VectorClass().load_a(&lh_dad[i%nstates]) * VectorClass().load_a(&partial_lh_dad[i])). store_a(&theta[i]); } partial_lh_dad += statecat; theta += statecat; lh_dad += nstates; } } } else { // both dad and node are internal nodes double *partial_lh_node = node_branch->partial_lh; double *partial_lh_dad = dad_branch->partial_lh; size_t all_entries = nptn*block; #ifdef _OPENMP #pragma omp parallel for private(i) #endif for (i = 0; i < all_entries; i+=VCSIZE) { (VectorClass().load_a(&partial_lh_node[i]) * VectorClass().load_a(&partial_lh_dad[i])) .store_a(&theta_all[i]); } } if (nptn < maxptn) { // copy dummy values for (ptn = nptn; ptn < maxptn; ptn++) memcpy(&theta_all[ptn*block], &theta_all[(ptn-1)*block], block*sizeof(double)); } } VectorClass vc_ptn[VCSIZE], vc_df[VCSIZE], vc_ddf[VCSIZE], vc_theta[VCSIZE]; VectorClass vc_unit = 1.0; VectorClass vc_freq; VectorClass df_final = 0.0, ddf_final = 0.0; // these stores values of 2 consecutive patterns VectorClass lh_ptn, df_ptn, ddf_ptn, inv_lh_ptn; // perform 2 sites at the same time for SSE/AVX efficiency #ifdef _OPENMP #pragma omp parallel private (ptn, i, j, vc_freq, vc_ptn, vc_df, vc_ddf, vc_theta, inv_lh_ptn, lh_ptn, df_ptn, ddf_ptn) { VectorClass df_final_th = 0.0; VectorClass ddf_final_th = 0.0; #pragma omp for nowait #endif for (ptn = 0; ptn < orig_nptn; ptn+=VCSIZE) { double *theta = theta_all + ptn*block; // initialization for (i = 0; i < VCSIZE; i++) { vc_theta[i].load_a(theta+i*block); vc_ptn[i] = vc_val0[0] * vc_theta[i]; vc_df[i] = vc_val1[0] * vc_theta[i]; vc_ddf[i] = vc_val2[0] * vc_theta[i]; } for (i = 1; i < block/VCSIZE; i++) { for (j = 0; j < VCSIZE; j++) { vc_theta[j].load_a(&theta[i*VCSIZE+j*block]); vc_ptn[j] = mul_add(vc_theta[j], vc_val0[i], vc_ptn[j]); vc_df[j] = mul_add(vc_theta[j], vc_val1[i], vc_df[j]); vc_ddf[j] = mul_add(vc_theta[j], vc_val2[i], vc_ddf[j]); } } lh_ptn = horizontal_add(vc_ptn) + VectorClass().load_a(&ptn_invar[ptn]); inv_lh_ptn = vc_unit / abs(lh_ptn); vc_freq.load_a(&ptn_freq[ptn]); df_ptn = horizontal_add(vc_df) * inv_lh_ptn; ddf_ptn = horizontal_add(vc_ddf) * inv_lh_ptn; ddf_ptn = nmul_add(df_ptn, df_ptn, ddf_ptn); #ifdef _OPENMP df_final_th = mul_add(df_ptn, vc_freq, df_final_th); ddf_final_th = mul_add(ddf_ptn, vc_freq, ddf_final_th); #else df_final = mul_add(df_ptn, vc_freq, df_final); ddf_final = mul_add(ddf_ptn, vc_freq, ddf_final); #endif } #ifdef _OPENMP #pragma omp critical { df_final += df_final_th; ddf_final += ddf_final_th; } } #endif df = horizontal_add(df_final); ddf = horizontal_add(ddf_final); if (isnan(df) || isinf(df)) { df = 0.0; ddf = 0.0; // outWarning("Numerical instability (some site-likelihood = 0)"); } // assert(isnormal(tree_lh)); if (orig_nptn < nptn) { // ascertaiment bias correction VectorClass lh_final = 0.0; df_final = 0.0; ddf_final = 0.0; lh_ptn = 0.0; df_ptn = 0.0; ddf_ptn = 0.0; double prob_const, df_const, ddf_const; double *theta = &theta_all[orig_nptn*block]; for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) { lh_final += lh_ptn; df_final += df_ptn; ddf_final += ddf_ptn; // initialization for (i = 0; i < VCSIZE; i++) { vc_theta[i].load_a(theta+i*block); vc_ptn[i] = vc_val0[0] * vc_theta[i]; vc_df[i] = vc_val1[0] * vc_theta[i]; vc_ddf[i] = vc_val2[0] * vc_theta[i]; } for (i = 1; i < block/VCSIZE; i++) { for (j = 0; j < VCSIZE; j++) { vc_theta[j].load_a(&theta[i*VCSIZE+j*block]); vc_ptn[j] = mul_add(vc_theta[j], vc_val0[i], vc_ptn[j]); vc_df[j] = mul_add(vc_theta[j], vc_val1[i], vc_df[j]); vc_ddf[j] = mul_add(vc_theta[j], vc_val2[i], vc_ddf[j]); } } theta += block*VCSIZE; // ptn_invar[ptn] is not aligned lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]); df_ptn = horizontal_add(vc_df); ddf_ptn = horizontal_add(vc_ddf); } switch ((nptn-orig_nptn) % VCSIZE) { case 0: prob_const = horizontal_add(lh_final+lh_ptn); df_const = horizontal_add(df_final+df_ptn); ddf_const = horizontal_add(ddf_final+ddf_ptn); break; case 1: prob_const = horizontal_add(lh_final)+lh_ptn[0]; df_const = horizontal_add(df_final)+df_ptn[0]; ddf_const = horizontal_add(ddf_final)+ddf_ptn[0]; break; case 2: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]; df_const = horizontal_add(df_final)+df_ptn[0]+df_ptn[1]; ddf_const = horizontal_add(ddf_final)+ddf_ptn[0]+ddf_ptn[1]; break; case 3: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2]; df_const = horizontal_add(df_final)+df_ptn[0]+df_ptn[1]+df_ptn[2]; ddf_const = horizontal_add(ddf_final)+ddf_ptn[0]+ddf_ptn[1]+ddf_ptn[2]; break; default: ASSERT(0); break; } prob_const = 1.0 - prob_const; double df_frac = df_const / prob_const; double ddf_frac = ddf_const / prob_const; int nsites = aln->getNSite(); df += nsites * df_frac; ddf += nsites *(ddf_frac + df_frac*df_frac); } aligned_free(vc_val2); aligned_free(vc_val1); aligned_free(vc_val0); } template <class VectorClass, const int VCSIZE, const int nstates> double PhyloTree::computeMixtureLikelihoodBranchEigenSIMD(PhyloNeighbor *dad_branch, PhyloNode *dad) { PhyloNode *node = (PhyloNode*) dad_branch->node; PhyloNeighbor *node_branch = (PhyloNeighbor*) node->findNeighbor(dad); if (!central_partial_lh) initializeAllPartialLh(); if (node->isLeaf()) { PhyloNode *tmp_node = dad; dad = node; node = tmp_node; PhyloNeighbor *tmp_nei = dad_branch; dad_branch = node_branch; node_branch = tmp_nei; } if ((dad_branch->partial_lh_computed & 1) == 0) computeMixturePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(dad_branch, dad); if ((node_branch->partial_lh_computed & 1) == 0) computeMixturePartialLikelihoodEigenSIMD<VectorClass, VCSIZE, nstates>(node_branch, node); double tree_lh = node_branch->lh_scale_factor + dad_branch->lh_scale_factor; size_t ncat = site_rate->getNRate(); size_t nmixture = model->getNMixtures(); size_t block = ncat * nstates * nmixture; size_t statemix = nstates * nmixture; size_t ptn; // for big data size > 4GB memory required size_t c, i, j, m; size_t orig_nptn = aln->size(); size_t nptn = aln->size()+model_factory->unobserved_ptns.size(); size_t maxptn = ((nptn+VCSIZE-1)/VCSIZE)*VCSIZE; maxptn = max(maxptn, aln->size()+((model_factory->unobserved_ptns.size()+VCSIZE-1)/VCSIZE)*VCSIZE); double *eval = model->getEigenvalues(); ASSERT(eval); VectorClass *vc_val = (VectorClass*)aligned_alloc<double>(block); for (c = 0; c < ncat; c++) { for (m = 0; m < nmixture; m++) { double len = site_rate->getRate(c)*dad_branch->getLength(m); VectorClass vc_len(len); VectorClass vc_prop = VectorClass(site_rate->getProp(c) * model->getMixtureWeight(m)); for (i = 0; i < nstates/VCSIZE; i++) { // eval is not aligned! vc_val[(m*ncat+c)*nstates/VCSIZE+i] = exp(VectorClass().load_a(&eval[m*nstates+i*VCSIZE]) * vc_len) * vc_prop; } } } double prob_const = 0.0; if (dad->isLeaf()) { // special treatment for TIP-INTERNAL NODE case double *partial_lh_node = aligned_alloc<double>((aln->STATE_UNKNOWN+1)*block); IntVector states_dad = aln->seq_states[dad->id]; states_dad.push_back(aln->STATE_UNKNOWN); for (IntVector::iterator it = states_dad.begin(); it != states_dad.end(); it++) { double *lh_node = partial_lh_node + (*it)*block; double *lh_tip = tip_partial_lh + (*it)*statemix; VectorClass *vc_val_tmp = vc_val; for (m = 0; m < nmixture; m++) { for (c = 0; c < ncat; c++) { for (i = 0; i < nstates; i+=VCSIZE) { (vc_val_tmp[i/VCSIZE] * VectorClass().load_a(&lh_tip[m*nstates+i])).store_a(&lh_node[i]); } lh_node += nstates; vc_val_tmp += nstates/VCSIZE; } } } // VectorClass vc_tip_partial_lh[nstates]; // VectorClass vc_partial_lh_dad[VCSIZE]; VectorClass vc_ptn[VCSIZE]; VectorClass lh_final(0.0), vc_freq; VectorClass lh_ptn; // store likelihoods of VCSIZE consecutive patterns // double **lh_states_dad = aligned_alloc<double*>(maxptn); // for (ptn = 0; ptn < orig_nptn; ptn++) // lh_states_dad[ptn] = &tip_partial_lh[(aln->at(ptn))[dad->id] * nstates]; // for (ptn = orig_nptn; ptn < nptn; ptn++) // lh_states_dad[ptn] = &tip_partial_lh[model_factory->unobserved_ptns[ptn-orig_nptn] * nstates]; // // initialize beyond #patterns for efficiency // for (ptn = nptn; ptn < maxptn; ptn++) // lh_states_dad[ptn] = &tip_partial_lh[aln->STATE_UNKNOWN * nstates]; int *ptn_states_dad = aligned_alloc<int>(maxptn); for (ptn = 0; ptn < orig_nptn; ptn++) ptn_states_dad[ptn] = (aln->at(ptn))[dad->id]; for (ptn = orig_nptn; ptn < nptn; ptn++) ptn_states_dad[ptn] = model_factory->unobserved_ptns[ptn-orig_nptn]; // initialize beyond #patterns for efficiency for (ptn = nptn; ptn < maxptn; ptn++) ptn_states_dad[ptn] = aln->STATE_UNKNOWN; // copy dummy values because VectorClass will access beyond nptn for (ptn = nptn; ptn < maxptn; ptn++) memcpy(&dad_branch->partial_lh[ptn*block], dad_branch->partial_lh, block*sizeof(double)); #ifdef _OPENMP #pragma omp parallel private(ptn, i, j, vc_ptn, vc_freq, lh_ptn) { VectorClass lh_final_th = 0.0; #pragma omp for nowait #endif // main loop over all patterns with a step size of VCSIZE for (ptn = 0; ptn < orig_nptn; ptn+=VCSIZE) { // double *partial_lh_dad = dad_branch->partial_lh + ptn*block; for (j = 0; j < VCSIZE; j++) { vc_ptn[j] = 0.0; double *partial_lh_dad = dad_branch->partial_lh + (ptn+j)*block; int state_dad = ptn_states_dad[ptn+j]; double *lh_node = &partial_lh_node[state_dad*block]; for (i = 0; i < block; i+=VCSIZE) { vc_ptn[j] = mul_add(VectorClass().load_a(&lh_node[i]), VectorClass().load_a(&partial_lh_dad[i]), vc_ptn[j]); } } // // initialize vc_tip_partial_lh // for (j = 0; j < VCSIZE; j++) { // double *lh_dad = lh_states_dad[ptn+j]; // for (i = 0; i < nstates/VCSIZE; i++) { // vc_tip_partial_lh[j*(nstates/VCSIZE)+i].load_a(&lh_dad[i*VCSIZE]); // } // vc_partial_lh_dad[j].load_a(&partial_lh_dad[j*block]); // vc_ptn[j] = vc_val[0] * vc_tip_partial_lh[j*(nstates/VCSIZE)] * vc_partial_lh_dad[j]; // } // // // compute vc_ptn // for (i = 1; i < block/VCSIZE; i++) // for (j = 0; j < VCSIZE; j++) { // vc_partial_lh_dad[j].load_a(&partial_lh_dad[j*block+i*VCSIZE]); // vc_ptn[j] = mul_add(vc_val[i] * vc_tip_partial_lh[j*(nstates/VCSIZE)+i%(nstates/VCSIZE)], // vc_partial_lh_dad[j], vc_ptn[j]); // } vc_freq.load_a(&ptn_freq[ptn]); lh_ptn = horizontal_add(vc_ptn) + VectorClass().load_a(&ptn_invar[ptn]); lh_ptn = log(abs(lh_ptn)); lh_ptn.store_a(&_pattern_lh[ptn]); // multiply with pattern frequency #ifdef _OPENMP lh_final_th = mul_add(lh_ptn, vc_freq, lh_final_th); #else lh_final = mul_add(lh_ptn, vc_freq, lh_final); #endif } #ifdef _OPENMP #pragma omp critical { lh_final += lh_final_th; } } #endif tree_lh += horizontal_add(lh_final); if (isnan(tree_lh) || isinf(tree_lh)) { cout.setf(ios::scientific); cout.precision(10); model->writeInfo(cout); site_rate->writeInfo(cout); ASSERT(0); } // ascertainment bias correction if (orig_nptn < nptn) { lh_final = 0.0; lh_ptn = 0.0; for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) { // double *partial_lh_dad = &dad_branch->partial_lh[ptn*block]; lh_final += lh_ptn; for (j = 0; j < VCSIZE; j++) { vc_ptn[j] = 0.0; double *partial_lh_dad = dad_branch->partial_lh + (ptn+j)*block; int state_dad = ptn_states_dad[ptn+j]; double *lh_node = &partial_lh_node[state_dad*block]; for (i = 0; i < block; i+=VCSIZE) { vc_ptn[j] = mul_add(VectorClass().load_a(&lh_node[i]), VectorClass().load_a(&partial_lh_dad[i]), vc_ptn[j]); } } // // initialize vc_tip_partial_lh // for (j = 0; j < VCSIZE; j++) { // double *lh_dad = lh_states_dad[ptn+j]; // for (i = 0; i < nstates/VCSIZE; i++) { // vc_tip_partial_lh[j*(nstates/VCSIZE)+i].load_a(&lh_dad[i*VCSIZE]); // } // vc_partial_lh_dad[j].load_a(&partial_lh_dad[j*block]); // vc_ptn[j] = vc_val[0] * vc_tip_partial_lh[j*(nstates/VCSIZE)] * vc_partial_lh_dad[j]; // } // // // compute vc_ptn // for (i = 1; i < block/VCSIZE; i++) // for (j = 0; j < VCSIZE; j++) { // vc_partial_lh_dad[j].load_a(&partial_lh_dad[j*block+i*VCSIZE]); // vc_ptn[j] = mul_add(vc_val[i] * vc_tip_partial_lh[j*(nstates/VCSIZE)+i%(nstates/VCSIZE)], // vc_partial_lh_dad[j], vc_ptn[j]); // } // bugfix 2016-01-21, prob_const can be rescaled for (j = 0; j < VCSIZE; j++) if (dad_branch->scale_num[ptn+j] >= 1) vc_ptn[j] = vc_ptn[j] * SCALING_THRESHOLD; // ptn_invar[ptn] is not aligned lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]); } switch ((nptn-orig_nptn)%VCSIZE) { case 0: prob_const = horizontal_add(lh_final+lh_ptn); break; case 1: prob_const = horizontal_add(lh_final)+lh_ptn[0]; break; case 2: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]; break; case 3: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2]; break; default: ASSERT(0); break; } } aligned_free(ptn_states_dad); aligned_free(partial_lh_node); } else { // both dad and node are internal nodes VectorClass vc_partial_lh_node[VCSIZE]; VectorClass vc_partial_lh_dad[VCSIZE], vc_ptn[VCSIZE]; VectorClass lh_final(0.0), vc_freq; VectorClass lh_ptn; // copy dummy values because VectorClass will access beyond nptn for (ptn = nptn; ptn < maxptn; ptn++) { memcpy(&dad_branch->partial_lh[ptn*block], dad_branch->partial_lh, block*sizeof(double)); memcpy(&node_branch->partial_lh[ptn*block], node_branch->partial_lh, block*sizeof(double)); } #ifdef _OPENMP #pragma omp parallel private(ptn, i, j, vc_partial_lh_node, vc_partial_lh_dad, vc_ptn, vc_freq, lh_ptn) { VectorClass lh_final_th = 0.0; #pragma omp for nowait #endif for (ptn = 0; ptn < orig_nptn; ptn+=VCSIZE) { double *partial_lh_dad = dad_branch->partial_lh + ptn*block; double *partial_lh_node = node_branch->partial_lh + ptn*block; for (j = 0; j < VCSIZE; j++) vc_ptn[j] = 0.0; for (i = 0; i < block; i+=VCSIZE) { for (j = 0; j < VCSIZE; j++) { vc_partial_lh_node[j].load_a(&partial_lh_node[i+j*block]); vc_partial_lh_dad[j].load_a(&partial_lh_dad[i+j*block]); vc_ptn[j] = mul_add(vc_val[i/VCSIZE] * vc_partial_lh_node[j], vc_partial_lh_dad[j], vc_ptn[j]); } } vc_freq.load_a(&ptn_freq[ptn]); lh_ptn = horizontal_add(vc_ptn) + VectorClass().load_a(&ptn_invar[ptn]); lh_ptn = log(abs(lh_ptn)); lh_ptn.store_a(&_pattern_lh[ptn]); #ifdef _OPENMP lh_final_th = mul_add(lh_ptn, vc_freq, lh_final_th); #else lh_final = mul_add(lh_ptn, vc_freq, lh_final); #endif } #ifdef _OPENMP #pragma omp critical { lh_final += lh_final_th; } } #endif tree_lh += horizontal_add(lh_final); ASSERT(!isnan(tree_lh) && !isinf(tree_lh)); if (orig_nptn < nptn) { // ascertainment bias correction lh_final = 0.0; lh_ptn = 0.0; double *partial_lh_node = &node_branch->partial_lh[orig_nptn*block]; double *partial_lh_dad = &dad_branch->partial_lh[orig_nptn*block]; for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) { lh_final += lh_ptn; for (j = 0; j < VCSIZE; j++) vc_ptn[j] = 0.0; for (i = 0; i < block; i+=VCSIZE) { for (j = 0; j < VCSIZE; j++) { vc_partial_lh_node[j].load_a(&partial_lh_node[i+j*block]); vc_partial_lh_dad[j].load_a(&partial_lh_dad[i+j*block]); vc_ptn[j] = mul_add(vc_val[i/VCSIZE] * vc_partial_lh_node[j], vc_partial_lh_dad[j], vc_ptn[j]); } } // bugfix 2016-01-21, prob_const can be rescaled for (j = 0; j < VCSIZE; j++) if (dad_branch->scale_num[ptn+j] + node_branch->scale_num[ptn+j] >= 1) vc_ptn[j] = vc_ptn[j] * SCALING_THRESHOLD; // ptn_invar[ptn] is not aligned lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]); partial_lh_node += block*VCSIZE; partial_lh_dad += block*VCSIZE; } switch ((nptn-orig_nptn)%VCSIZE) { case 0: prob_const = horizontal_add(lh_final+lh_ptn); break; case 1: prob_const = horizontal_add(lh_final)+lh_ptn[0]; break; case 2: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]; break; case 3: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2]; break; default: ASSERT(0); break; } } } if (orig_nptn < nptn) { // ascertainment bias correction prob_const = log(1.0 - prob_const); for (ptn = 0; ptn < orig_nptn; ptn++) _pattern_lh[ptn] -= prob_const; tree_lh -= aln->getNSite()*prob_const; } aligned_free(vc_val); return tree_lh; } template <class VectorClass, const int VCSIZE, const int nstates> double PhyloTree::computeMixtureLikelihoodFromBufferEigenSIMD() { ASSERT(theta_all && theta_computed); double tree_lh = current_it->lh_scale_factor + current_it_back->lh_scale_factor; size_t ncat = site_rate->getNRate(); size_t nmixture = model->getNMixtures(); size_t block = nstates * ncat * nmixture; size_t ptn; // for big data size > 4GB memory required size_t c, i, j, m; size_t orig_nptn = aln->size(); size_t nptn = aln->size()+model_factory->unobserved_ptns.size(); // size_t maxptn = ((nptn+VCSIZE-1)/VCSIZE)*VCSIZE; double *eval = model->getEigenvalues(); ASSERT(eval); VectorClass *vc_val0 = (VectorClass*)aligned_alloc<double>(block); for (c = 0; c < ncat; c++) { VectorClass vc_rate = site_rate->getRate(c); for (m = 0; m < nmixture; m++) { VectorClass vc_len = current_it->getLength(m); VectorClass vc_prop = site_rate->getProp(c)*model->getMixtureWeight(m); for (i = 0; i < nstates/VCSIZE; i++) { VectorClass cof = VectorClass().load_a(&eval[m*nstates+i*VCSIZE]) * vc_rate; VectorClass val = exp(cof*vc_len) * vc_prop; vc_val0[(m*ncat+c)*nstates/VCSIZE+i] = val; } } } VectorClass vc_ptn[VCSIZE]; VectorClass vc_freq; VectorClass lh_final = 0.0; // these stores values of 2 consecutive patterns VectorClass lh_ptn; // perform 2 sites at the same time for SSE/AVX efficiency #ifdef _OPENMP #pragma omp parallel private (ptn, i, j, vc_freq, vc_ptn, lh_ptn) { VectorClass lh_final_th = 0.0; #pragma omp for nowait #endif for (ptn = 0; ptn < orig_nptn; ptn+=VCSIZE) { double *theta = theta_all + ptn*block; // initialization for (i = 0; i < VCSIZE; i++) { vc_ptn[i] = vc_val0[0] * VectorClass().load_a(theta+i*block); } for (i = 1; i < block/VCSIZE; i++) { for (j = 0; j < VCSIZE; j++) { vc_ptn[j] = mul_add(VectorClass().load_a(&theta[i*VCSIZE+j*block]), vc_val0[i], vc_ptn[j]); } } lh_ptn = horizontal_add(vc_ptn) + VectorClass().load_a(&ptn_invar[ptn]); lh_ptn = log(abs(lh_ptn)); lh_ptn.store_a(&_pattern_lh[ptn]); vc_freq.load_a(&ptn_freq[ptn]); #ifdef _OPENMP lh_final_th = mul_add(lh_ptn, vc_freq, lh_final_th); #else lh_final = mul_add(lh_ptn, vc_freq, lh_final); #endif } #ifdef _OPENMP #pragma omp critical { lh_final += lh_final_th; } } #endif tree_lh += horizontal_add(lh_final); if (isnan(tree_lh) || isinf(tree_lh)) { cout << "WARNING: Numerical underflow caused by alignment sites"; i = aln->getNSite(); for (j = 0, c = 0; j < i; j++) { ptn = aln->getPatternID(j); if (isnan(_pattern_lh[ptn]) || isinf(_pattern_lh[ptn])) { cout << " " << j+1; c++; if (c >= 10) { cout << " ..."; break; } } } cout << endl; tree_lh = current_it->lh_scale_factor + current_it_back->lh_scale_factor; for (ptn = 0; ptn < orig_nptn; ptn++) { if (isnan(_pattern_lh[ptn]) || isinf(_pattern_lh[ptn])) { _pattern_lh[ptn] = LOG_SCALING_THRESHOLD*4; // log(2^(-1024)) } tree_lh += _pattern_lh[ptn] * ptn_freq[ptn]; } } if (orig_nptn < nptn) { // ascertaiment bias correction lh_final = 0.0; lh_ptn = 0.0; double prob_const;// df_const, ddf_const; double *theta = &theta_all[orig_nptn*block]; UBYTE sum_scale_num[nstates+VCSIZE]; memset(sum_scale_num, 0, sizeof(UBYTE)*(nstates+VCSIZE)); if (current_it->node->isLeaf()) memcpy(sum_scale_num, current_it_back->scale_num+orig_nptn, sizeof(UBYTE)*(nptn-orig_nptn)); else if (current_it_back->node->isLeaf()) memcpy(sum_scale_num, current_it->scale_num+orig_nptn, sizeof(UBYTE)*(nptn-orig_nptn)); else { for (ptn = orig_nptn; ptn < nptn; ptn++) sum_scale_num[ptn-orig_nptn] = current_it->scale_num[ptn] + current_it_back->scale_num[ptn]; } for (ptn = orig_nptn; ptn < nptn; ptn+=VCSIZE) { lh_final += lh_ptn; // initialization for (i = 0; i < VCSIZE; i++) { vc_ptn[i] = vc_val0[0] * VectorClass().load_a(theta+i*block); } for (i = 1; i < block/VCSIZE; i++) { for (j = 0; j < VCSIZE; j++) { vc_ptn[j] = mul_add(VectorClass().load_a(&theta[i*VCSIZE+j*block]), vc_val0[i], vc_ptn[j]); } } theta += block*VCSIZE; // bugfix 2016-01-21, prob_const can be rescaled for (j = 0; j < VCSIZE; j++) if (sum_scale_num[ptn+j-orig_nptn] >= 1) vc_ptn[j] = vc_ptn[j] * SCALING_THRESHOLD; // ptn_invar[ptn] is not aligned lh_ptn = horizontal_add(vc_ptn) + VectorClass().load(&ptn_invar[ptn]); } switch ((nptn-orig_nptn) % VCSIZE) { case 0: prob_const = horizontal_add(lh_final+lh_ptn); break; case 1: prob_const = horizontal_add(lh_final)+lh_ptn[0]; break; case 2: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]; break; case 3: prob_const = horizontal_add(lh_final)+lh_ptn[0]+lh_ptn[1]+lh_ptn[2]; break; default: ASSERT(0); break; } prob_const = log(1.0 - prob_const); tree_lh -= aln->getNSite() * prob_const; for (ptn = 0; ptn < orig_nptn; ptn++) _pattern_lh[ptn] -= prob_const; } aligned_free(vc_val0); return tree_lh; } #endif /* PHYLOKERNELMIXTURE_H_ */
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=floord(Nt-1,3);t1++) { lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6)); ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(max(0,ceild(3*t1-3*t2,2)),ceild(3*t1-2,4)),ceild(24*t2-Nz-3,16));t3<=min(min(min(floord(4*Nt+Ny-9,16),floord(12*t1+Ny+15,16)),floord(24*t2+Ny+11,16)),floord(24*t1-24*t2+Nz+Ny+13,16));t3++) { for (t4=max(max(max(max(0,ceild(3*t1-3*t2-14,16)),ceild(3*t1-30,32)),ceild(24*t2-Nz-115,128)),ceild(16*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(12*t1+Nx+15,128)),floord(24*t2+Nx+11,128)),floord(16*t3+Nx+3,128)),floord(24*t1-24*t2+Nz+Nx+13,128));t4++) { for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(16*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),Nt-1),3*t1+5),6*t2+4),4*t3+2),32*t4+30);t5++) { for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(16*t3,4*t5+4);t7<=min(16*t3+15,4*t5+Ny-5);t7++) { lbv=max(128*t4,4*t5+4); ubv=min(128*t4+127,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
gcc-ping-pong.c
/* Guessing what compilers are sufficient here... */ /* On Blue Gene/Q, we tried 4.7.2 and 4.4.7 only. */ #if (defined(__GNUC__) && (__GNUC__ >= 5)) || \ (defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ >= 7)) || \ (defined(__clang__) && defined(__clang_major__) && (__clang_major__ >= 3)) || \ (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 1400)) #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #ifdef _OPENMP # include <omp.h> #else # error No OpenMP support! #endif #ifdef SEQUENTIAL_CONSISTENCY int load_model = __ATOMIC_SEQ_CST; int store_model = __ATOMIC_SEQ_CST; #else int load_model = __ATOMIC_ACQUIRE; int store_model = __ATOMIC_RELEASE; #endif int main(int argc, char * argv[]) { int nt = omp_get_max_threads(); #if 1 if (nt != 2) omp_set_num_threads(2); #else if (nt < 2) omp_set_num_threads(2); if (nt % 2 != 0) omp_set_num_threads(nt-1); #endif int iterations = (argc>1) ? atoi(argv[1]) : 1000000; printf("thread ping-pong benchmark\n"); printf("num threads = %d\n", omp_get_max_threads()); printf("iterations = %d\n", iterations); #ifdef SEQUENTIAL_CONSISTENCY printf("memory model = %s\n", "seq_cst"); #else printf("memory model = %s\n", "acq-rel"); #endif fflush(stdout); int left_ready = -1; int right_ready = -1; int left_payload = 0; int right_payload = 0; #pragma omp parallel { int me = omp_get_thread_num(); /// 0=left 1=right bool parity = (me % 2 == 0); int junk = 0; /// START TIME #pragma omp barrier double t0 = omp_get_wtime(); for (int i=0; i<iterations; ++i) { if (parity) { /// send to left left_payload = i; __atomic_store_n( &left_ready, i, store_model); /// recv from right while (i != __atomic_load_n( &right_ready, load_model)); //printf("%d: left received %d\n", i, right_payload); junk += right_payload; } else { /// recv from left while (i != __atomic_load_n( &left_ready, load_model)); //printf("%d: right received %d\n", i, left_payload); junk += left_payload; ///send to right right_payload = i; __atomic_store_n( &right_ready, i, store_model); } } /// STOP TIME #pragma omp barrier double t1 = omp_get_wtime(); /// PRINT TIME double dt = t1-t0; #pragma omp critical { printf("total time elapsed = %lf\n", dt); printf("time per iteration = %e\n", dt/iterations); printf("%d\n", junk); } } return 0; } #else // GCC 5+ #error Your compiler probably does not support __atomic functions. #endif // GCC 5+
3d25pt_var.c
/* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 16; tile_size[1] = 16; tile_size[2] = 8; tile_size[3] = 512; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[(t)%2][i ][j ][k ] + coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) + coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) + coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) + coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) + coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) + coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) + coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) + coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) + coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) + coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) + coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) + coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_binop__bor_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bor_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__bor_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__bor_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__bor_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bor_uint8) // A*D function (colscale): GB (_AxD__bor_uint8) // D*A function (rowscale): GB (_DxB__bor_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__bor_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__bor_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bor_uint8) // C=scalar+B GB (_bind1st__bor_uint8) // C=scalar+B' GB (_bind1st_tran__bor_uint8) // C=A+scalar GB (_bind2nd__bor_uint8) // C=A'+scalar GB (_bind2nd_tran__bor_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij) | (bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) | (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BOR || GxB_NO_UINT8 || GxB_NO_BOR_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bor_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bor_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__bor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__bor_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bor_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bor_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bor_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bor_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bor_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x) | (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bor_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) | (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) | (aij) ; \ } GrB_Info GB (_bind1st_tran__bor_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) | (y) ; \ } GrB_Info GB (_bind2nd_tran__bor_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB045-doall1-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Simplest one dimension array computation */ int a[100]; int main() { int i; int _ret_val_0; #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<100; i ++ ) { a[i]=i; } #pragma cetus private(i) #pragma loop name main#1 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<100; i ++ ) { a[i]=(a[i]+1); } #pragma cetus private(i) #pragma loop name main#2 for (i=0; i<100; i ++ ) { printf("%d\n", a[i]); } _ret_val_0=0; return _ret_val_0; }
single_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s void foo(); // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single foo void test_no_clause() { int i; #pragma omp single foo(); #pragma omp single ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp single { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single foo bar foo(); } void test_non_identifiers() { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single; foo(); #pragma omp parallel // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp single'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single linear(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single private(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single, private(x); foo(); } void test_private() { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp single private( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single private(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single private(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single private(0) foo(); int x, y, z; #pragma omp parallel #pragma omp single private(x) foo(); #pragma omp parallel #pragma omp single private(x, y) foo(); #pragma omp parallel #pragma omp single private(x, y, z) foo(); } void test_firstprivate() { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp single firstprivate( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single firstprivate(0) foo(); } void test_nowait() { #pragma omp single nowait nowait // expected-error {{directive '#pragma omp single' cannot contain more than one 'nowait' clause}} for (int i = 0; i < 16; ++i) ; }
Wparentheses-2.c
/* PR c/70436 */ /* { dg-additional-options "-Wparentheses" } */ int a, b, c; void bar (void); void baz (void); void f1 (void); #pragma omp declare target to (bar, baz, f1, a, b, c) void f1 (void) { int i, j; if (a) /* { dg-warning "ambiguous" } */ #pragma omp distribute for (i = 0; i < 10; i++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ for (i = 0; i < 10; i++) #pragma omp distribute simd for (j = 0; j < 10; j++) if (b) bar (); else baz (); if (a) #pragma omp distribute parallel for for (i = 0; i < 10; i++) if (b) /* { dg-warning "ambiguous" } */ #pragma omp parallel for for (j = 0; j < 10; j++) if (c) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp distribute parallel for simd collapse(2) for (i = 0; i < 10; i++) for (j = 0; j < 10; j++) if (b) bar (); else baz (); if (a) #pragma omp distribute for (i = 0; i < 10; i++) { if (b) bar (); else baz (); } if (a) { #pragma omp distribute simd for (i = 0; i < 10; ++i) if (b) bar (); } else baz (); if (a) #pragma omp distribute parallel for collapse(2) for (i = 0; i < 10; i++) { for (j = 0; j < 10; j++) if (b) bar (); else baz (); } if (a) for (i = 0; i < 10; i++) #pragma omp distribute parallel for simd for (j = 0; j < 10; j++) { if (b) bar (); } else baz (); } void f2 (void) { int i, j; if (a) /* { dg-warning "ambiguous" } */ #pragma omp target teams distribute for (i = 0; i < 10; i++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ for (i = 0; i < 10; i++) #pragma omp target teams distribute simd for (j = 0; j < 10; j++) if (b) bar (); else baz (); if (a) #pragma omp target teams distribute parallel for for (i = 0; i < 10; i++) if (b) /* { dg-warning "ambiguous" } */ #pragma omp parallel for for (j = 0; j < 10; j++) if (c) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp target teams distribute parallel for simd collapse(2) for (i = 0; i < 10; i++) for (j = 0; j < 10; j++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp target teams if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp target #pragma omp parallel if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp target if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp target parallel if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp target simd for (i = 0; i < 10; i++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp target simd collapse(2) for (i = 0; i < 10; i++) for (j = 0; j < 10; j++) if (b) bar (); else baz (); if (a) #pragma omp target teams distribute for (i = 0; i < 10; i++) { if (b) bar (); else baz (); } if (a) { #pragma omp target teams distribute simd for (i = 0; i < 10; ++i) if (b) bar (); } else baz (); if (a) #pragma omp target teams distribute parallel for collapse(2) for (i = 0; i < 10; i++) { for (j = 0; j < 10; j++) if (b) bar (); else baz (); } if (a) for (i = 0; i < 10; i++) #pragma omp target teams distribute parallel for simd for (j = 0; j < 10; j++) { if (b) bar (); } else baz (); if (a) #pragma omp target teams { if (b) bar (); } else baz (); if (a) #pragma omp target #pragma omp parallel { if (b) bar (); else baz (); } if (a) #pragma omp target { if (b) bar (); } else baz (); if (a) #pragma omp target parallel { if (b) bar (); } else baz (); if (a) #pragma omp target simd for (i = 0; i < 10; i++) { if (b) bar (); else baz (); } if (a) #pragma omp target simd for (i = 0; i < 10; i++) { if (b) bar (); } else baz (); if (a) #pragma omp target simd collapse(2) for (i = 0; i < 10; i++) { { for (j = 0; j < 10; j++) if (b) bar (); else baz (); } } if (a) #pragma omp target simd collapse(2) for (i = 0; i < 10; i++) { { for (j = 0; j < 10; j++) if (b) bar (); } } else baz (); } void f3 (void) { int i, j; if (a) /* { dg-warning "ambiguous" } */ #pragma omp target #pragma omp teams distribute for (i = 0; i < 10; i++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ for (i = 0; i < 10; i++) #pragma omp target #pragma omp teams distribute simd for (j = 0; j < 10; j++) if (b) bar (); else baz (); if (a) #pragma omp target #pragma omp teams distribute parallel for for (i = 0; i < 10; i++) if (b) /* { dg-warning "ambiguous" } */ #pragma omp parallel for for (j = 0; j < 10; j++) if (c) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp target #pragma omp teams distribute parallel for simd collapse(2) for (i = 0; i < 10; i++) for (j = 0; j < 10; j++) if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp target #pragma omp teams if (b) bar (); else baz (); if (a) /* { dg-warning "ambiguous" } */ #pragma omp target #pragma omp teams #pragma omp parallel if (b) bar (); else baz (); if (a) #pragma omp target #pragma omp teams distribute for (i = 0; i < 10; i++) { if (b) bar (); else baz (); } if (a) { #pragma omp target #pragma omp teams distribute simd for (i = 0; i < 10; ++i) if (b) bar (); } else baz (); if (a) #pragma omp target #pragma omp teams distribute parallel for collapse(2) for (i = 0; i < 10; i++) { for (j = 0; j < 10; j++) if (b) bar (); else baz (); } if (a) for (i = 0; i < 10; i++) #pragma omp target #pragma omp teams distribute parallel for simd for (j = 0; j < 10; j++) { if (b) bar (); } else baz (); if (a) #pragma omp target #pragma omp teams { if (b) bar (); } else baz (); if (a) #pragma omp target #pragma omp teams #pragma omp parallel { if (b) bar (); else baz (); } } void f4 (void) { if (a) /* { dg-warning "ambiguous" } */ #pragma omp target data map (tofrom: b) if (b) bar (); else baz (); if (a) #pragma omp target data map (tofrom: b) { if (b) bar (); else baz (); } }
softmax_layer.c
#include "softmax_layer.h" #include "blas.h" #include "dark_cuda.h" #include "utils.h" #include "blas.h" #include <float.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <assert.h> #define SECRET_NUM -1234 void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output) { int b; for (b = 0; b < batch; ++b) { int i; int count = 0; for (i = 0; i < hierarchy->groups; ++i) { int group_size = hierarchy->group_size[i]; softmax(input + b*inputs + count, group_size, temp, output + b*inputs + count, 1); count += group_size; } } } softmax_layer make_softmax_layer(int batch, int inputs, int groups) { assert(inputs%groups == 0); fprintf(stderr, "softmax %4d\n", inputs); softmax_layer l = { (LAYER_TYPE)0 }; l.type = SOFTMAX; l.batch = batch; l.groups = groups; l.inputs = inputs; l.outputs = inputs; l.loss = (float*)xcalloc(inputs * batch, sizeof(float)); l.output = (float*)xcalloc(inputs * batch, sizeof(float)); l.delta = (float*)xcalloc(inputs * batch, sizeof(float)); l.cost = (float*)xcalloc(1, sizeof(float)); l.forward = forward_softmax_layer; l.backward = backward_softmax_layer; #ifdef GPU l.forward_gpu = forward_softmax_layer_gpu; l.backward_gpu = backward_softmax_layer_gpu; l.output_gpu = cuda_make_array(l.output, inputs*batch); l.loss_gpu = cuda_make_array(l.loss, inputs*batch); l.delta_gpu = cuda_make_array(l.delta, inputs*batch); #endif return l; } void forward_softmax_layer(const softmax_layer l, network_state net) { if(l.softmax_tree){ int i; int count = 0; for (i = 0; i < l.softmax_tree->groups; ++i) { int group_size = l.softmax_tree->group_size[i]; softmax_cpu(net.input + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output + count); count += group_size; } } else { softmax_cpu(net.input, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output); } if(net.truth && !l.noloss){ softmax_x_ent_cpu(l.batch*l.inputs, l.output, net.truth, l.delta, l.loss); l.cost[0] = sum_array(l.loss, l.batch*l.inputs); } } void backward_softmax_layer(const softmax_layer l, network_state net) { axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, net.delta, 1); } #ifdef GPU void pull_softmax_layer_output(const softmax_layer layer) { cuda_pull_array(layer.output_gpu, layer.output, layer.inputs*layer.batch); } void forward_softmax_layer_gpu(const softmax_layer l, network_state net) { if(l.softmax_tree){ softmax_tree_gpu(net.input, 1, l.batch, l.inputs, l.temperature, l.output_gpu, *l.softmax_tree); /* int i; int count = 0; for (i = 0; i < l.softmax_tree->groups; ++i) { int group_size = l.softmax_tree->group_size[i]; softmax_gpu(net.input_gpu + count, group_size, l.batch, l.inputs, 1, 0, 1, l.temperature, l.output_gpu + count); count += group_size; } */ } else { if(l.spatial){ softmax_gpu_new_api(net.input, l.c, l.batch*l.c, l.inputs/l.c, l.w*l.h, 1, l.w*l.h, 1, l.output_gpu); }else{ softmax_gpu_new_api(net.input, l.inputs/l.groups, l.batch, l.inputs, l.groups, l.inputs/l.groups, 1, l.temperature, l.output_gpu); } } if(net.truth && !l.noloss){ softmax_x_ent_gpu(l.batch*l.inputs, l.output_gpu, net.truth, l.delta_gpu, l.loss_gpu); if(l.softmax_tree){ mask_gpu_new_api(l.batch*l.inputs, l.delta_gpu, SECRET_NUM, net.truth, 0); mask_gpu_new_api(l.batch*l.inputs, l.loss_gpu, SECRET_NUM, net.truth, 0); } cuda_pull_array(l.loss_gpu, l.loss, l.batch*l.inputs); l.cost[0] = sum_array(l.loss, l.batch*l.inputs); } } void backward_softmax_layer_gpu(const softmax_layer layer, network_state state) { axpy_ongpu(layer.batch*layer.inputs, state.net.loss_scale, layer.delta_gpu, 1, state.delta, 1); } #endif // ------------------------------------- // Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf contrastive_layer make_contrastive_layer(int batch, int w, int h, int c, int classes, int inputs, layer *yolo_layer) { contrastive_layer l = { (LAYER_TYPE)0 }; l.type = CONTRASTIVE; l.batch = batch; l.inputs = inputs; l.w = w; l.h = h; l.c = c; l.temperature = 1; l.max_boxes = 0; if (yolo_layer) { l.detection = 1; l.max_boxes = yolo_layer->max_boxes; l.labels = yolo_layer->labels; // track id l.class_ids = yolo_layer->class_ids; // class_ids l.n = yolo_layer->n; // num of embeddings per cell = num of anchors l.classes = yolo_layer->classes;// num of classes classes = l.classes; l.embedding_size = l.inputs / (l.n*l.h*l.w); l.truths = yolo_layer->truths; if (l.embedding_size != yolo_layer->embedding_size) { printf(" Error: [contrastive] embedding_size=%d isn't equal to [yolo] embedding_size=%d. They should use the same [convolutional] layer \n", l.embedding_size, yolo_layer->embedding_size); getchar(); exit(0); } if (l.inputs % (l.n*l.h*l.w) != 0) { printf(" Warning: filters= number in the previous (embedding) layer isn't divisable by number of anchors %d \n", l.n); getchar(); } } else { l.detection = 0; l.labels = (int*)xcalloc(l.batch, sizeof(int)); // labels l.n = 1; // num of embeddings per cell l.classes = classes; // num of classes l.embedding_size = l.c; } l.outputs = inputs; l.loss = (float*)xcalloc(1, sizeof(float)); l.output = (float*)xcalloc(inputs * batch, sizeof(float)); l.delta = (float*)xcalloc(inputs * batch, sizeof(float)); l.cost = (float*)xcalloc(1, sizeof(float)); const size_t step = l.batch*l.n*l.h*l.w; l.cos_sim = NULL; l.exp_cos_sim = NULL; l.p_constrastive = NULL; if (!l.detection) { l.cos_sim = (float*)xcalloc(step*step, sizeof(float)); l.exp_cos_sim = (float*)xcalloc(step*step, sizeof(float)); l.p_constrastive = (float*)xcalloc(step*step, sizeof(float)); } //l.p_constrastive = (float*)xcalloc(step*step, sizeof(float)); //l.contrast_p_size = (int*)xcalloc(1, sizeof(int)); //*l.contrast_p_size = step; //l.contrast_p = (contrastive_params*)xcalloc(*l.contrast_p_size, sizeof(contrastive_params)); l.forward = forward_contrastive_layer; l.backward = backward_contrastive_layer; #ifdef GPU l.forward_gpu = forward_contrastive_layer_gpu; l.backward_gpu = backward_contrastive_layer_gpu; l.output_gpu = cuda_make_array(l.output, inputs*batch); l.delta_gpu = cuda_make_array(l.delta, inputs*batch); const int max_contr_size = (l.max_boxes*l.batch)*(l.max_boxes*l.batch) * sizeof(contrastive_params)/4; printf(" max_contr_size = %d MB \n", max_contr_size / (1024*1024)); l.contrast_p_gpu = (contrastive_params *)cuda_make_array(NULL, max_contr_size); #endif fprintf(stderr, "contrastive %4d x%4d x%4d x emb_size %4d x batch: %4d classes = %4d, step = %4d \n", w, h, l.n, l.embedding_size, batch, l.classes, step); if(l.detection) fprintf(stderr, "detection \n"); return l; } static inline float clip_value(float val, const float max_val) { if (val > max_val) { //printf("\n val = %f > max_val = %f \n", val, max_val); val = max_val; } else if (val < -max_val) { //printf("\n val = %f < -max_val = %f \n", val, -max_val); val = -max_val; } return val; } void forward_contrastive_layer(contrastive_layer l, network_state state) { if (!state.train) return; const float truth_thresh = state.net.label_smooth_eps; const int mini_batch = l.batch / l.steps; int b, n, w, h; fill_cpu(l.batch*l.inputs, 0, l.delta, 1); if (!l.detection) { for (b = 0; b < l.batch; ++b) { if (state.net.adversarial) l.labels[b] = b % 2; else l.labels[b] = b / 2; } // set labels for (b = 0; b < l.batch; ++b) { for (h = 0; h < l.h; ++h) { for (w = 0; w < l.w; ++w) { // find truth with max prob (only 1 label even if mosaic is used) float max_truth = 0; int n; for (n = 0; n < l.classes; ++n) { const float truth_prob = state.truth[b*l.classes + n]; //printf(" truth_prob = %f, ", truth_prob); //if (truth_prob > max_truth) if (truth_prob > truth_thresh) { //printf(" truth_prob = %f, max_truth = %f, n = %d; ", truth_prob, max_truth, n); max_truth = truth_prob; l.labels[b] = n; } } //printf(", l.labels[b] = %d ", l.labels[b]); } } } } //printf("\n\n"); // set pointers to features float **z = (float**)xcalloc(l.batch*l.n*l.h*l.w, sizeof(float*)); for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { for (h = 0; h < l.h; ++h) { for (w = 0; w < l.w; ++w) { const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w; if (l.labels[z_index] < 0) continue; //const int input_index = b*l.inputs + n*l.embedding_size*l.h*l.w + h*l.w + w; //float *ptr = state.input + input_index; //z[z_index] = ptr; z[z_index] = (float*)xcalloc(l.embedding_size, sizeof(float)); get_embedding(state.input, l.w, l.h, l.c, l.embedding_size, w, h, n, b, z[z_index]); } } } } int b2, n2, h2, w2; int contrast_p_index = 0; const size_t step = l.batch*l.n*l.h*l.w; size_t contrast_p_size = step; if (!l.detection) contrast_p_size = l.batch*l.batch; contrastive_params *contrast_p = (contrastive_params*)xcalloc(contrast_p_size, sizeof(contrastive_params)); float *max_sim_same = (float *)xcalloc(l.batch*l.inputs, sizeof(float)); float *max_sim_diff = (float *)xcalloc(l.batch*l.inputs, sizeof(float)); fill_cpu(l.batch*l.inputs, -10, max_sim_same, 1); fill_cpu(l.batch*l.inputs, -10, max_sim_diff, 1); // precalculate cosine similiraty for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { for (h = 0; h < l.h; ++h) { for (w = 0; w < l.w; ++w) { const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w; if (l.labels[z_index] < 0) continue; for (b2 = 0; b2 < l.batch; ++b2) { for (n2 = 0; n2 < l.n; ++n2) { for (h2 = 0; h2 < l.h; ++h2) { for (w2 = 0; w2 < l.w; ++w2) { const int z_index2 = b2*l.n*l.h*l.w + n2*l.h*l.w + h2*l.w + w2; if (l.labels[z_index2] < 0) continue; if (z_index == z_index2) continue; if (l.detection) if (l.class_ids[z_index] != l.class_ids[z_index2]) continue; const int time_step_i = b / mini_batch; const int time_step_j = b2 / mini_batch; if (time_step_i != time_step_j) continue; const size_t step = l.batch*l.n*l.h*l.w; const float sim = cosine_similarity(z[z_index], z[z_index2], l.embedding_size); const float exp_sim = expf(sim / l.temperature); if (!l.detection) { l.cos_sim[z_index*step + z_index2] = sim; l.exp_cos_sim[z_index*step + z_index2] = exp_sim; } // calc good sim if (l.labels[z_index] == l.labels[z_index2] && max_sim_same[z_index] < sim) max_sim_same[z_index] = sim; if (l.labels[z_index] != l.labels[z_index2] && max_sim_diff[z_index] < sim) max_sim_diff[z_index] = sim; //printf(" z_i = %d, z_i2 = %d, l = %d, l2 = %d, sim = %f \n", z_index, z_index2, l.labels[z_index], l.labels[z_index2], sim); contrast_p[contrast_p_index].sim = sim; contrast_p[contrast_p_index].exp_sim = exp_sim; contrast_p[contrast_p_index].i = z_index; contrast_p[contrast_p_index].j = z_index2; contrast_p[contrast_p_index].time_step_i = time_step_i; contrast_p[contrast_p_index].time_step_j = time_step_j; contrast_p_index++; //printf(" contrast_p_index = %d, contrast_p_size = %d \n", contrast_p_index, contrast_p_size); if ((contrast_p_index+1) >= contrast_p_size) { contrast_p_size = contrast_p_index + 1; //printf(" contrast_p_size = %d, z_index = %d, z_index2 = %d \n", contrast_p_size, z_index, z_index2); contrast_p = (contrastive_params*)xrealloc(contrast_p, contrast_p_size * sizeof(contrastive_params)); } if (sim > 1.001 || sim < -1.001) { printf(" sim = %f, ", sim); getchar(); } } } } } } } } } // calc contrastive accuracy int i; int good_sims = 0, all_sims = 0, same_sim = 0, diff_sim = 0; for (i = 0; i < l.batch*l.inputs; ++i) { if (max_sim_same[i] >= -1 && max_sim_diff[i] >= -1) { if (max_sim_same[i] >= -1) same_sim++; if (max_sim_diff[i] >= -1) diff_sim++; ++all_sims; //printf(" max_sim_diff[i] = %f, max_sim_same[i] = %f \n", max_sim_diff[i], max_sim_same[i]); if (max_sim_diff[i] < max_sim_same[i]) good_sims++; } } if (all_sims > 0) { *l.loss = 100 * good_sims / all_sims; } else *l.loss = -1; printf(" Contrast accuracy = %f %%, all = %d, good = %d, same = %d, diff = %d \n", *l.loss, all_sims, good_sims, same_sim, diff_sim); free(max_sim_same); free(max_sim_diff); /* // show near sim float good_contrast = 0; for (b = 0; b < l.batch; b += 2) { float same = l.cos_sim[b*l.batch + b]; float aug = l.cos_sim[b*l.batch + b + 1]; float diff = l.cos_sim[b*l.batch + b + 2]; good_contrast += (aug > diff); //printf(" l.labels[b] = %d, l.labels[b+1] = %d, l.labels[b+2] = %d, b = %d \n", l.labels[b], l.labels[b + 1], l.labels[b + 2], b); //printf(" same = %f, aug = %f, diff = %f, (aug > diff) = %d \n", same, aug, diff, (aug > diff)); } *l.loss = 100 * good_contrast / (l.batch / 2); printf(" Contrast accuracy = %f %% \n", *l.loss); */ /* // precalculate P_contrastive for (b = 0; b < l.batch; ++b) { int b2; for (b2 = 0; b2 < l.batch; ++b2) { if (b != b2) { const float P = P_constrastive(b, b2, l.labels, l.batch, z, l.embedding_size, l.temperature, l.cos_sim); l.p_constrastive[b*l.batch + b2] = P; if (P > 1 || P < -1) { printf(" p = %f, ", P); getchar(); } } } } */ const size_t contr_size = contrast_p_index; if (l.detection) { #ifdef GPU const int max_contr_size = (l.max_boxes*l.batch)*(l.max_boxes*l.batch); if (max_contr_size < contr_size) { printf(" Error: too large number of bboxes: contr_size = %d > max_contr_size = %d \n", contr_size, max_contr_size); exit(0); } int *labels = NULL; if (contr_size > 2) { cuda_push_array((float *)l.contrast_p_gpu, (float *)contrast_p, contr_size * sizeof(contrastive_params) / 4); P_constrastive_f_det_gpu(labels, l.embedding_size, l.temperature, l.contrast_p_gpu, contr_size); cuda_pull_array((float *)l.contrast_p_gpu, (float *)contrast_p, contr_size * sizeof(contrastive_params) / 4); } #else // GPU int k; //#pragma omp parallel for for (k = 0; k < contr_size; ++k) { contrast_p[k].P = P_constrastive_f_det(k, l.labels, z, l.embedding_size, l.temperature, contrast_p, contr_size); } #endif // GPU } else { // precalculate P-contrastive for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { for (h = 0; h < l.h; ++h) { for (w = 0; w < l.w; ++w) { const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w; if (l.labels[z_index] < 0) continue; for (b2 = 0; b2 < l.batch; ++b2) { for (n2 = 0; n2 < l.n; ++n2) { for (h2 = 0; h2 < l.h; ++h2) { for (w2 = 0; w2 < l.w; ++w2) { const int z_index2 = b2*l.n*l.h*l.w + n2*l.h*l.w + h2*l.w + w2; if (l.labels[z_index2] < 0) continue; if (z_index == z_index2) continue; if (l.detection) if (l.class_ids[z_index] != l.class_ids[z_index2]) continue; const int time_step_i = b / mini_batch; const int time_step_j = b2 / mini_batch; if (time_step_i != time_step_j) continue; const size_t step = l.batch*l.n*l.h*l.w; float P = -10; if (l.detection) { P = P_constrastive_f(z_index, z_index2, l.labels, z, l.embedding_size, l.temperature, contrast_p, contr_size); } else { P = P_constrastive(z_index, z_index2, l.labels, step, z, l.embedding_size, l.temperature, l.cos_sim, l.exp_cos_sim); l.p_constrastive[z_index*step + z_index2] = P; } int q; for (q = 0; q < contr_size; ++q) if (contrast_p[q].i == z_index && contrast_p[q].j == z_index2) { contrast_p[q].P = P; break; } //if (q == contr_size) getchar(); //if (P > 1 || P < -1) { // printf(" p = %f, z_index = %d, z_index2 = %d ", P, z_index, z_index2); getchar(); //} } } } } } } } } } // calc deltas #pragma omp parallel for for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { for (h = 0; h < l.h; ++h) { for (w = 0; w < l.w; ++w) { const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w; const size_t step = l.batch*l.n*l.h*l.w; if (l.labels[z_index] < 0) continue; const int delta_index = b*l.embedding_size*l.n*l.h*l.w + n*l.embedding_size*l.h*l.w + h*l.w + w; const int wh = l.w*l.h; if (l.detection) { // detector // positive grad_contrastive_loss_positive_f(z_index, l.class_ids, l.labels, step, z, l.embedding_size, l.temperature, l.delta + delta_index, wh, contrast_p, contr_size); // negative grad_contrastive_loss_negative_f(z_index, l.class_ids, l.labels, step, z, l.embedding_size, l.temperature, l.delta + delta_index, wh, contrast_p, contr_size, l.contrastive_neg_max); } else { // classifier // positive grad_contrastive_loss_positive(z_index, l.labels, step, z, l.embedding_size, l.temperature, l.cos_sim, l.p_constrastive, l.delta + delta_index, wh); // negative grad_contrastive_loss_negative(z_index, l.labels, step, z, l.embedding_size, l.temperature, l.cos_sim, l.p_constrastive, l.delta + delta_index, wh); } } } } } scal_cpu(l.inputs * l.batch, l.cls_normalizer, l.delta, 1); for (i = 0; i < l.inputs * l.batch; ++i) { l.delta[i] = clip_value(l.delta[i], l.max_delta); } *(l.cost) = pow(mag_array(l.delta, l.inputs * l.batch), 2); if (state.net.adversarial) { printf(" adversarial contrastive loss = %f \n\n", *(l.cost)); } else { printf(" contrastive loss = %f \n\n", *(l.cost)); } for (b = 0; b < l.batch; ++b) { for (n = 0; n < l.n; ++n) { for (h = 0; h < l.h; ++h) { for (w = 0; w < l.w; ++w) { const int z_index = b*l.n*l.h*l.w + n*l.h*l.w + h*l.w + w; //if (l.labels[z_index] < 0) continue; if (z[z_index]) free(z[z_index]); } } } } free(contrast_p); free(z); } void backward_contrastive_layer(contrastive_layer l, network_state state) { axpy_cpu(l.inputs*l.batch, 1, l.delta, 1, state.delta, 1); } #ifdef GPU void pull_contrastive_layer_output(const contrastive_layer l) { cuda_pull_array(l.output_gpu, l.output, l.inputs*l.batch); } void push_contrastive_layer_output(const contrastive_layer l) { cuda_push_array(l.delta_gpu, l.delta, l.inputs*l.batch); } void forward_contrastive_layer_gpu(contrastive_layer l, network_state state) { simple_copy_ongpu(l.batch*l.inputs, state.input, l.output_gpu); if (!state.train) return; float *in_cpu = (float *)xcalloc(l.batch*l.inputs, sizeof(float)); cuda_pull_array(l.output_gpu, l.output, l.batch*l.outputs); memcpy(in_cpu, l.output, l.batch*l.outputs * sizeof(float)); float *truth_cpu = 0; if (state.truth) { int num_truth = l.batch*l.classes; if (l.detection) num_truth = l.batch*l.truths; truth_cpu = (float *)xcalloc(num_truth, sizeof(float)); cuda_pull_array(state.truth, truth_cpu, num_truth); } network_state cpu_state = state; cpu_state.net = state.net; cpu_state.index = state.index; cpu_state.train = state.train; cpu_state.truth = truth_cpu; cpu_state.input = in_cpu; forward_contrastive_layer(l, cpu_state); cuda_push_array(l.delta_gpu, l.delta, l.batch*l.outputs); free(in_cpu); if (cpu_state.truth) free(cpu_state.truth); } void backward_contrastive_layer_gpu(contrastive_layer layer, network_state state) { axpy_ongpu(layer.batch*layer.inputs, state.net.loss_scale, layer.delta_gpu, 1, state.delta, 1); } #endif
convolution_pack16to4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack16to4_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_packed, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __m128 _sum = _mm_setzero_ps(); if (bias_data_ptr) { _sum = _mm_loadu_ps(bias_data_ptr + p * 4); } const float* kptr = weight_data_packed.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float* sptr = m.row(i * stride_h) + j * stride_w * 16; for (int k = 0; k < maxk; k++) { const float* slptr = sptr + space_ofs[k] * 16; __m128 _val0 = _mm_broadcast_ss(slptr); __m128 _val1 = _mm_broadcast_ss(slptr + 1); __m128 _val2 = _mm_broadcast_ss(slptr + 2); __m128 _val3 = _mm_broadcast_ss(slptr + 3); __m128 _val4 = _mm_broadcast_ss(slptr + 4); __m128 _val5 = _mm_broadcast_ss(slptr + 5); __m128 _val6 = _mm_broadcast_ss(slptr + 6); __m128 _val7 = _mm_broadcast_ss(slptr + 7); __m128 _val8 = _mm_broadcast_ss(slptr + 8); __m128 _val9 = _mm_broadcast_ss(slptr + 9); __m128 _vala = _mm_broadcast_ss(slptr + 10); __m128 _valb = _mm_broadcast_ss(slptr + 11); __m128 _valc = _mm_broadcast_ss(slptr + 12); __m128 _vald = _mm_broadcast_ss(slptr + 13); __m128 _vale = _mm_broadcast_ss(slptr + 14); __m128 _valf = _mm_broadcast_ss(slptr + 15); __m128 _w0 = _mm_load_ps(kptr + 4 * 0); __m128 _w1 = _mm_load_ps(kptr + 4 * 1); __m128 _w2 = _mm_load_ps(kptr + 4 * 2); __m128 _w3 = _mm_load_ps(kptr + 4 * 3); __m128 _w4 = _mm_load_ps(kptr + 4 * 4); __m128 _w5 = _mm_load_ps(kptr + 4 * 5); __m128 _w6 = _mm_load_ps(kptr + 4 * 6); __m128 _w7 = _mm_load_ps(kptr + 4 * 7); __m128 _w8 = _mm_load_ps(kptr + 4 * 8); __m128 _w9 = _mm_load_ps(kptr + 4 * 9); __m128 _wa = _mm_load_ps(kptr + 4 * 10); __m128 _wb = _mm_load_ps(kptr + 4 * 11); __m128 _wc = _mm_load_ps(kptr + 4 * 12); __m128 _wd = _mm_load_ps(kptr + 4 * 13); __m128 _we = _mm_load_ps(kptr + 4 * 14); __m128 _wf = _mm_load_ps(kptr + 4 * 15); _sum = _mm_fmadd_ps(_val0, _w0, _sum); _sum = _mm_fmadd_ps(_val1, _w1, _sum); _sum = _mm_fmadd_ps(_val2, _w2, _sum); _sum = _mm_fmadd_ps(_val3, _w3, _sum); _sum = _mm_fmadd_ps(_val4, _w4, _sum); _sum = _mm_fmadd_ps(_val5, _w5, _sum); _sum = _mm_fmadd_ps(_val6, _w6, _sum); _sum = _mm_fmadd_ps(_val7, _w7, _sum); _sum = _mm_fmadd_ps(_val8, _w8, _sum); _sum = _mm_fmadd_ps(_val9, _w9, _sum); _sum = _mm_fmadd_ps(_vala, _wa, _sum); _sum = _mm_fmadd_ps(_valb, _wb, _sum); _sum = _mm_fmadd_ps(_valc, _wc, _sum); _sum = _mm_fmadd_ps(_vald, _wd, _sum); _sum = _mm_fmadd_ps(_vale, _we, _sum); _sum = _mm_fmadd_ps(_valf, _wf, _sum); kptr += 64; } } _sum = activation_sse(_sum, activation_type, activation_params); _mm_storeu_ps(outptr, _sum); outptr += 4; } } } }
ext_sweep.h
#pragma once #pragma omp declare target typedef struct { unsigned int i,j,k; } cell; void sweep_octant( const unsigned int timestep, const unsigned int oct, const unsigned int ndiag, const cell* cells, const int* num_cells, const unsigned int num_groups_todo); void sweep_cell( const int istep, const int jstep, const int kstep, const unsigned int oct, const double* restrict l_flux_in, double* restrict l_flux_out, const cell * restrict cell_index, const unsigned int * restrict groups_todo, const unsigned int num_groups_todo, const unsigned int num_cells); void compute_sweep_order(int** num_cells, cell** cells); #pragma omp end declare target void perform_sweep( unsigned int num_groups_todo);
net_md5_fmt_plug.c
/* Cracker for RIPv2 MD5 authentication hashes. * * This software is Copyright (c) 2013, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Added linkage to dynamic (type dynamic_39) for any salt 230 bytes or less, * by Jim Fougeron. Any salts > 239 bytes will still be handled by this full * format. dynamic is limited to 256 bytes, which 'should' get us 240 bytes * of salt. I think we might be able to get 239 bytes (due to a few issues). * 240 byte salts fail. So, for peace of mind, I am limiting to 230 byte salts * within dynamic. This is the FIRST format that is hybrid fat-thin. */ #if AC_BUILT #include "autoconfig.h" #endif #ifndef DYNAMIC_DISABLED #if FMT_EXTERNS_H extern struct fmt_main fmt_netmd5; #elif FMT_REGISTERS_H john_register_one(&fmt_netmd5); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 2048 // XXX #endif #endif #include "formats.h" #include "dynamic.h" #include "md5.h" #include "misc.h" #include "common.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "net-md5" #define FORMAT_NAME "\"Keyed MD5\" RIPv2, OSPF, BGP, SNMPv2" #define FORMAT_TAG "$netmd5$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 // RIPv2 truncates (or null pads) passwords to length 16 #define PLAINTEXT_LENGTH 16 #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(ARCH_WORD_32) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN MEM_ALIGN_WORD #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MAX_SALT_LEN 1024 static struct fmt_tests tests[] = { /* RIPv2 MD5 authentication hashes */ { "02020000ffff0003002c01145267d48d000000000000000000020000ac100100ffffff000000000000000001ffff0001$1e372a8a233c6556253a0909bc3dcce6", "quagga"}, {FORMAT_TAG "02020000ffff0003002c01145267d48f000000000000000000020000ac100100ffffff000000000000000001ffff0001$ed9f940c3276afcc06d15babe8a1b61b", "quagga"}, {FORMAT_TAG "02020000ffff0003002c01145267d490000000000000000000020000ac100100ffffff000000000000000001ffff0001$c9f7763f80fcfcc2bbbca073be1f5df7", "quagga"}, {FORMAT_TAG "02020000ffff0003002c01145267d49a000000000000000000020000ac100200ffffff000000000000000001ffff0001$3f6a72deeda200806230298af0797997", "quagga"}, {FORMAT_TAG "02020000ffff0003002c01145267d49b000000000000000000020000ac100200ffffff000000000000000001ffff0001$b69184bacccc752cadf78cac455bd0de", "quagga"}, {FORMAT_TAG "02020000ffff0003002c01145267d49d000000000000000000020000ac100100ffffff000000000000000001ffff0001$6442669c577e7662188865a54c105d0e", "quagga"}, {FORMAT_TAG "02020000ffff0003002c01145267e076000000000000000000020000ac100200ffffff000000000000000001ffff0001$4afe22cf1750d9af8775b25bcf9cfb8c", "abcdefghijklmnop"}, {FORMAT_TAG "02020000ffff0003002c01145267e077000000000000000000020000ac100200ffffff000000000000000001ffff0001$326b12f6da03048a655ea4d8f7e3e123", "abcdefghijklmnop"}, {FORMAT_TAG "02020000ffff0003002c01145267e2ab000000000000000000020000ac100100ffffff000000000000000001ffff0001$ad76c40e70383f6993f54b4ba6492a26", "abcdefghijklmnop"}, /* OSPFv2 MD5 authentication hashes */ {"$netmd5$0201002cac1001010000000000000002000001105267ff8fffffff00000a0201000000280000000000000000$445ecbb27272bd791a757a6c85856150", "abcdefghijklmnop"}, {FORMAT_TAG "0201002cac1001010000000000000002000001105267ff98ffffff00000a0201000000280000000000000000$d4c248b417b8cb1490e02c5e99eb0ad1", "abcdefghijklmnop"}, {FORMAT_TAG "0201002cac1001010000000000000002000001105267ffa2ffffff00000a0201000000280000000000000000$528d9bf98be8213482af7295307625bf", "abcdefghijklmnop"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static void get_ptr(); static void init(struct fmt_main *self); static void done(void); #define MAGIC 0xfe5dd5ef static struct custom_salt { ARCH_WORD_32 magic; int length; unsigned char salt[MAX_SALT_LEN]; // fixd len, but should be OK } *cur_salt; static int dyna_salt_seen=0; static char Conv_Buf[300]; // max salt length we will pass to dyna is 230. 300 is MORE than enough. static struct fmt_main *pDynamicFmt, *pNetMd5_Dyna; /* this function converts a 'native' net-md5 signature string into a $dynamic_39$ syntax string */ static char *Convert(char *Buf, char *ciphertext) { char *cp, *cp2; if (text_in_dynamic_format_already(pDynamicFmt, ciphertext)) return ciphertext; cp = strchr(&ciphertext[2], '$'); if (!cp) return "*"; cp2 = strchr(&cp[1], '$'); if (!cp2) return "*"; snprintf(Buf, sizeof(Conv_Buf), "$dynamic_39$%s$HEX%*.*s", &cp2[1], (int)(cp2-cp), (int)(cp2-cp), cp); return Buf; } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q = NULL; int len; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = strrchr(ciphertext, '$'); if (!q) return 0; q = q + 1; if ((q - p - 1) > MAX_SALT_LEN * 2) return 0; len = strspn(q, HEXCHARS_lc); if (len != BINARY_SIZE * 2 || len != strlen(q)) { get_ptr(); return pDynamicFmt->methods.valid(ciphertext, pDynamicFmt); } if (strspn(p, HEXCHARS_lc) != q - p - 1) return 0; return 1; } static void *get_salt(char *ciphertext) { static char *pBuf=NULL; struct custom_salt *cs; char *orig_ct = ciphertext; int i, len; if (!pBuf) pBuf = (char *)mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD); cs = (struct custom_salt*) pBuf; memset(cs, 0, sizeof(*cs)); if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) ciphertext += TAG_LENGTH; len = (strrchr(ciphertext, '$') - ciphertext) / 2; for (i = 0; i < len; i++) cs->salt[i] = (atoi16[ARCH_INDEX(ciphertext[2 * i])] << 4) | atoi16[ARCH_INDEX(ciphertext[2 * i + 1])]; if (len < 230) { // return our memset buffer (putting the dyna salt pointer into it). // This keeps the 'pre-cleaned salt() warning from hitting this format) //return pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct)); memcpy((char*)cs, pDynamicFmt->methods.salt(Convert(Conv_Buf, orig_ct)), pDynamicFmt->params.salt_size); dyna_salt_seen=1; return cs; } cs->magic = MAGIC; cs->length = len; return cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; if (text_in_dynamic_format_already(pDynamicFmt, ciphertext)) // returns proper 16 bytes, so we do not need to copy into our buffer. return pDynamicFmt->methods.binary(ciphertext); p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[0](index); return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[1](index); return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[2](index); return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[3](index); return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[4](index); return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[5](index); return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { if (cur_salt->magic != MAGIC) return pDynamicFmt->methods.get_hash[6](index); return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; get_ptr(); if (cur_salt->magic != MAGIC) { pDynamicFmt->methods.set_salt(salt); } } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; if (cur_salt->magic != MAGIC) { return pDynamicFmt->methods.crypt_all(pcount, salt); } #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, cur_salt->salt, cur_salt->length); MD5_Update(&ctx, saved_key[index], PLAINTEXT_LENGTH); MD5_Final((unsigned char*)crypt_out[index], &ctx); } return count; } static int cmp_all(void *binary, int count) { int index = 0; if (cur_salt->magic != MAGIC) { return pDynamicFmt->methods.cmp_all(binary, count); } for (; index < count; index++) if (((ARCH_WORD_32*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { if (cur_salt->magic != MAGIC) { return pDynamicFmt->methods.cmp_one(binary, index); } return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void netmd5_set_key(char *key, int index) { if(dyna_salt_seen) pDynamicFmt->methods.set_key(key, index); /* strncpy will pad with zeros, which is needed */ strncpy(saved_key[index], key, sizeof(saved_key[0])); } static char *get_key(int index) { return saved_key[index]; } static char *prepare(char *fields[10], struct fmt_main *self) { static char buf[sizeof(cur_salt->salt)*2+TAG_LENGTH+1]; char *hash = fields[1]; if (strncmp(hash, FORMAT_TAG, TAG_LENGTH) && valid(hash, self)) { get_ptr(); if (text_in_dynamic_format_already(pDynamicFmt, hash)) return hash; sprintf(buf, "%s%s", FORMAT_TAG, hash); return buf; } return hash; } struct fmt_main fmt_netmd5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { NULL }, tests }, { init, done, fmt_default_reset, prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, netmd5_set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; static void get_ptr() { if (!pDynamicFmt) { char *Buf; pNetMd5_Dyna = mem_alloc_tiny(sizeof(fmt_netmd5), 16); memcpy(pNetMd5_Dyna, &fmt_netmd5, sizeof(fmt_netmd5)); pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetMd5_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-md5", 0); fmt_netmd5.params.min_keys_per_crypt = pDynamicFmt->params.min_keys_per_crypt; fmt_netmd5.params.max_keys_per_crypt = pDynamicFmt->params.max_keys_per_crypt; Buf = mem_alloc_tiny(strlen(fmt_netmd5.params.algorithm_name) + 4 + strlen("dynamic_39") + 1, 1); sprintf(Buf, "%s or %s", fmt_netmd5.params.algorithm_name, "dynamic_39"); fmt_netmd5.params.algorithm_name = Buf; //pDynamicFmt->methods.init(pDynamicFmt); } } static void init(struct fmt_main *self) { // We have to allocate our dyna_39 object first, because we get 'modified' min/max counts from there. get_ptr(); if (self->private.initialized == 0) { pDynamicFmt = dynamic_THIN_FORMAT_LINK(pNetMd5_Dyna, Convert(Conv_Buf, tests[1].ciphertext), "net-md5", 1); self->private.initialized = 1; } saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_out = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_out)); } static void done(void) { MEM_FREE(crypt_out); MEM_FREE(saved_key); pDynamicFmt->methods.done(); } #endif /* plugin stanza */ #endif /* DYNAMIC_DISABLED */
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) int main(void) { check_offloading(); double A[N], B[N], C[N], D[N], E[N]; int fail = 0; INIT(); // ************************** // Series 1: no dist_schedule // ************************** // // Test: #iterations == #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute parallel for simd for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // **************************** // Series 2: with dist_schedule // **************************** // // Test: #iterations == #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute parallel for simd dist_schedule(static,1) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations == #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute parallel for simd dist_schedule(static,512) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations == #teams, dist_schedule(#iterations/10), variable chunk size // ZERO(A); int ten = 10; int chunkSize = 512/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(512) #pragma omp distribute parallel for simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 512 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 512 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd dist_schedule(static,1) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd dist_schedule(static,500) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations > #teams, dist_schedule(#iterations/10), variable chunk size // ZERO(A); ten = 10; chunkSize = 500/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 500 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 500 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(1) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd dist_schedule(static,1) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(#iterations) // ZERO(A); for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd dist_schedule(static,123) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: #iterations < #teams, dist_schedule(#iterations) // ZERO(A); ten = 10; chunkSize = 123/ten; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) #pragma omp distribute parallel for simd dist_schedule(static,chunkSize) for (int i = 0 ; i < 123 ; i++) { A[i] += C[i]; // += 1 per position } } for (int i = 0 ; i < 123 ; i++) if (A[i] != TRIALS) { printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // **************************** // Series 3: with ds attributes // **************************** // // Test: private // ZERO(A); ZERO(B); double p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target #pragma omp teams num_teams(256) { #pragma omp distribute parallel for simd private(p,q) for(int i = 0 ; i < N ; i++) { p = 2; q = 3; A[i] += p; B[i] += q; } } } for(int i = 0 ; i < N ; i++) { if (A[i] != TRIALS*2) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]); fail = 1; } if (B[i] != TRIALS*3) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]); fail = 1; } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: firstprivate // ZERO(A); ZERO(B); p = 2.0, q = 4.0; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target // implicit firstprivate for p and q, their initial values being 2 and 4 for each target invocation #pragma omp teams num_teams(64) { #pragma omp distribute simd firstprivate(p,q) for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team) q += 7.0; A[i] += p; B[i] += q; } } } for(int i = 0 ; i < 128 ; i++) { if (i % 2 == 0) { if (A[i] != (2.0+3.0)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]); fail = 1; } } else { if (A[i] != (2.0+3.0*2)*TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]); fail = 1; } if (B[i] != (4.0+7.0*2)*TRIALS) { printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]); fail = 1; } } } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: lastprivate // /* int lastpriv = -1; #pragma omp target map(tofrom:lastpriv) #pragma omp teams num_teams(10) #pragma omp distribute parallel for simd lastprivate(lastpriv) for(int i = 0 ; i < omp_get_num_teams() ; i++) lastpriv = i; if(lastpriv != 9) { printf("lastpriv value is %d and should have been %d\n", lastpriv, 9); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); */ printf("Succeeded\n"); // ************************** // Series 4: collapse // ************************** // // Test: 2 loops // double * S = (double *) malloc(N*N*sizeof(double)); double * T = (double *) malloc(N*N*sizeof(double)); double * U = (double *) malloc(N*N*sizeof(double)); for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) { S[i*N+j] = 0.0; T[i*N+j] = 1.0; U[i*N+j] = 2.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N]) #pragma omp teams num_teams(512) #pragma omp distribute parallel for simd collapse(2) for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t } for (int i = 0 ; i < N ; i++) for (int j = 0 ; j < N ; j++) if (S[i*N+j] != TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); // // Test: 3 loops // int M = N/8; double * V = (double *) malloc(M*M*M*sizeof(double)); double * Z = (double *) malloc(M*M*M*sizeof(double)); for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) { V[i*M*M+j*M+k] = 2.0; Z[i*M*M+j*M+k] = 3.0; } for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M]) #pragma omp teams num_teams(512) #pragma omp distribute parallel for simd collapse(3) for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t } for (int i = 0 ; i < M ; i++) for (int j = 0 ; j < M ; j++) for (int k = 0 ; k < M ; k++) if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) { printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]); fail = 1; } if(fail) printf("Failed\n"); else printf("Succeeded\n"); return 0; }
hypre_prefix_sum.c
/*BHEADER********************************************************************** * Copyright (c) 2017, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * Written by Jongsoo Park et al. CODE-LLNL-738-322. * This file is part of AMG. See files README and COPYRIGHT for details. * * AMG is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * This software is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the * GNU General Public License for more details. * ***********************************************************************EHEADER*/ #include "_hypre_utilities.h" void hypre_prefix_sum(HYPRE_Int *in_out, HYPRE_Int *sum, HYPRE_Int *workspace) { #ifdef HYPRE_USING_OPENMP HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int num_threads = hypre_NumActiveThreads(); hypre_assert(1 == num_threads || omp_in_parallel()); workspace[my_thread_num + 1] = *in_out; #pragma omp barrier #pragma omp master { HYPRE_Int i; workspace[0] = 0; for (i = 1; i < num_threads; i++) { workspace[i + 1] += workspace[i]; } *sum = workspace[num_threads]; } #pragma omp barrier *in_out = workspace[my_thread_num]; #else /* !HYPRE_USING_OPENMP */ *sum = *in_out; *in_out = 0; workspace[0] = 0; workspace[1] = *sum; #endif /* !HYPRE_USING_OPENMP */ } void hypre_prefix_sum_pair(HYPRE_Int *in_out1, HYPRE_Int *sum1, HYPRE_Int *in_out2, HYPRE_Int *sum2, HYPRE_Int *workspace) { #ifdef HYPRE_USING_OPENMP HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int num_threads = hypre_NumActiveThreads(); hypre_assert(1 == num_threads || omp_in_parallel()); workspace[(my_thread_num + 1)*2] = *in_out1; workspace[(my_thread_num + 1)*2 + 1] = *in_out2; #pragma omp barrier #pragma omp master { HYPRE_Int i; workspace[0] = 0; workspace[1] = 0; for (i = 1; i < num_threads; i++) { workspace[(i + 1)*2] += workspace[i*2]; workspace[(i + 1)*2 + 1] += workspace[i*2 + 1]; } *sum1 = workspace[num_threads*2]; *sum2 = workspace[num_threads*2 + 1]; } #pragma omp barrier *in_out1 = workspace[my_thread_num*2]; *in_out2 = workspace[my_thread_num*2 + 1]; #else /* !HYPRE_USING_OPENMP */ *sum1 = *in_out1; *sum2 = *in_out2; *in_out1 = 0; *in_out2 = 0; workspace[0] = 0; workspace[1] = 0; workspace[2] = *sum1; workspace[3] = *sum2; #endif /* !HYPRE_USING_OPENMP */ } void hypre_prefix_sum_triple(HYPRE_Int *in_out1, HYPRE_Int *sum1, HYPRE_Int *in_out2, HYPRE_Int *sum2, HYPRE_Int *in_out3, HYPRE_Int *sum3, HYPRE_Int *workspace) { #ifdef HYPRE_USING_OPENMP HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int num_threads = hypre_NumActiveThreads(); hypre_assert(1 == num_threads || omp_in_parallel()); workspace[(my_thread_num + 1)*3] = *in_out1; workspace[(my_thread_num + 1)*3 + 1] = *in_out2; workspace[(my_thread_num + 1)*3 + 2] = *in_out3; #pragma omp barrier #pragma omp master { HYPRE_Int i; workspace[0] = 0; workspace[1] = 0; workspace[2] = 0; for (i = 1; i < num_threads; i++) { workspace[(i + 1)*3] += workspace[i*3]; workspace[(i + 1)*3 + 1] += workspace[i*3 + 1]; workspace[(i + 1)*3 + 2] += workspace[i*3 + 2]; } *sum1 = workspace[num_threads*3]; *sum2 = workspace[num_threads*3 + 1]; *sum3 = workspace[num_threads*3 + 2]; } #pragma omp barrier *in_out1 = workspace[my_thread_num*3]; *in_out2 = workspace[my_thread_num*3 + 1]; *in_out3 = workspace[my_thread_num*3 + 2]; #else /* !HYPRE_USING_OPENMP */ *sum1 = *in_out1; *sum2 = *in_out2; *sum3 = *in_out3; *in_out1 = 0; *in_out2 = 0; *in_out3 = 0; workspace[0] = 0; workspace[1] = 0; workspace[2] = 0; workspace[3] = *sum1; workspace[4] = *sum2; workspace[5] = *sum3; #endif /* !HYPRE_USING_OPENMP */ } void hypre_prefix_sum_multiple(HYPRE_Int *in_out, HYPRE_Int *sum, HYPRE_Int n, HYPRE_Int *workspace) { HYPRE_Int i; #ifdef HYPRE_USING_OPENMP HYPRE_Int my_thread_num = hypre_GetThreadNum(); HYPRE_Int num_threads = hypre_NumActiveThreads(); hypre_assert(1 == num_threads || omp_in_parallel()); for (i = 0; i < n; i++) { workspace[(my_thread_num + 1)*n + i] = in_out[i]; } #pragma omp barrier #pragma omp master { HYPRE_Int t; for (i = 0; i < n; i++) { workspace[i] = 0; } // assuming n is not so big, we don't parallelize this loop for (t = 1; t < num_threads; t++) { for (i = 0; i < n; i++) { workspace[(t + 1)*n + i] += workspace[t*n + i]; } } for (i = 0; i < n; i++) { sum[i] = workspace[num_threads*n + i]; } } #pragma omp barrier for (i = 0; i < n; i++) { in_out[i] = workspace[my_thread_num*n + i]; } #else /* !HYPRE_USING_OPENMP */ for (i = 0; i < n; i++) { sum[i] = in_out[i]; in_out[i] = 0; workspace[i] = 0; workspace[n + i] = sum[i]; } #endif /* !HYPRE_USING_OPENMP */ }
GB_binop__band_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__band_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__band_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__band_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__band_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__band_uint8) // A*D function (colscale): GB (_AxD__band_uint8) // D*A function (rowscale): GB (_DxB__band_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__band_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__band_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_uint8) // C=scalar+B GB (_bind1st__band_uint8) // C=scalar+B' GB (_bind1st_tran__band_uint8) // C=A+scalar GB (_bind2nd__band_uint8) // C=A'+scalar GB (_bind2nd_tran__band_uint8) // C type: uint8_t // A type: uint8_t // B,b type: uint8_t // BinaryOp: cij = (aij) & (bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x) & (y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BAND || GxB_NO_UINT8 || GxB_NO_BAND_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__band_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__band_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__band_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__band_uint8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__band_uint8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__band_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__band_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__band_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__band_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__band_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__band_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x) & (bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__band_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij) & (y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x) & (aij) ; \ } GrB_Info GB (_bind1st_tran__band_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij) & (y) ; \ } GrB_Info GB (_bind2nd_tran__band_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
wand-view.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % W W AAA N N DDDD % % W W A A NN N D D % % W W W AAAAA N N N D D % % WW WW A A N NN D D % % W W A A N N DDDD % % % % V V IIIII EEEEE W W % % V V I E W W % % V V I EEE W W W % % V V I E WW WW % % V IIIII EEEEE W W % % % % % % MagickWand Wand View Methods % % % % Software Design % % John Cristy % % March 2003 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "wand/studio.h" #include "wand/MagickWand.h" #include "wand/magick-wand-private.h" #include "wand/wand.h" #include "magick/monitor-private.h" #include "magick/thread-private.h" /* Define declarations. */ #define WandViewId "WandView" /* Typedef declarations. */ struct _WandView { size_t id; char name[MaxTextExtent], *description; RectangleInfo extent; MagickWand *wand; CacheView *view; size_t number_threads; PixelWand ***pixel_wands; ExceptionInfo *exception; MagickBooleanType debug; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneWandView() makes a copy of the specified wand view. % % The format of the CloneWandView method is: % % WandView *CloneWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport WandView *CloneWandView(const WandView *wand_view) { WandView *clone_view; register ssize_t i; assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); clone_view=(WandView *) AcquireMagickMemory(sizeof(*clone_view)); if (clone_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); (void) ResetMagickMemory(clone_view,0,sizeof(*clone_view)); clone_view->id=AcquireWandId(); (void) FormatLocaleString(clone_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) clone_view->id); clone_view->description=ConstantString(wand_view->description); clone_view->view=CloneCacheView(wand_view->view); clone_view->extent=wand_view->extent; clone_view->number_threads=wand_view->number_threads; clone_view->exception=AcquireExceptionInfo(); InheritException(clone_view->exception,wand_view->exception); for (i=0; i < (ssize_t) wand_view->number_threads; i++) clone_view->pixel_wands[i]=ClonePixelWands((const PixelWand **) wand_view->pixel_wands[i],wand_view->extent.width); clone_view->debug=wand_view->debug; if (clone_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",clone_view->name); clone_view->signature=WandSignature; return(clone_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyWandView() deallocates memory associated with a wand view. % % The format of the DestroyWandView method is: % % WandView *DestroyWandView(WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ static PixelWand ***DestroyPixelsThreadSet(PixelWand ***pixel_wands, const size_t number_wands,const size_t number_threads) { register ssize_t i; assert(pixel_wands != (PixelWand ***) NULL); for (i=0; i < (ssize_t) number_threads; i++) if (pixel_wands[i] != (PixelWand **) NULL) pixel_wands[i]=DestroyPixelWands(pixel_wands[i],number_wands); pixel_wands=(PixelWand ***) RelinquishMagickMemory(pixel_wands); return(pixel_wands); } WandExport WandView *DestroyWandView(WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->pixel_wands=DestroyPixelsThreadSet(wand_view->pixel_wands, wand_view->extent.width,wand_view->number_threads); wand_view->view=DestroyCacheView(wand_view->view); wand_view->exception=DestroyExceptionInfo(wand_view->exception); wand_view->signature=(~WandSignature); RelinquishWandId(wand_view->id); wand_view=(WandView *) RelinquishMagickMemory(wand_view); return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D u p l e x T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DuplexTransferWandViewIterator() iterates over three wand views in % parallel and calls your transfer method for each scanline of the view. The % source and duplex pixel extent is not confined to the image canvas-- that is % you can include negative offsets or widths or heights that exceed the image % dimension. However, the destination wand view is confined to the image % canvas-- that is no negative offsets or widths or heights that exceed the % image dimension are permitted. % % The callback signature is: % % MagickBooleanType DuplexTransferImageViewMethod(const WandView *source, % const WandView *duplex,WandView *destination,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the DuplexTransferWandViewIterator method is: % % MagickBooleanType DuplexTransferWandViewIterator(WandView *source, % WandView *duplex,WandView *destination, % DuplexTransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o duplex: the duplex wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType DuplexTransferWandViewIterator(WandView *source, WandView *duplex,WandView *destination,DuplexTransferWandViewMethod transfer, void *context) { ExceptionInfo *exception; Image *destination_image, *duplex_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (DuplexTransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; duplex_image=duplex->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict duplex_indexes, *restrict indexes; register const PixelPacket *restrict duplex_pixels, *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y, duplex->extent.width,1,duplex->exception); if (duplex_pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } duplex_indexes=GetCacheViewVirtualIndexQueue(duplex->view); for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetQuantumColor(duplex->pixel_wands[id][x],duplex_pixels+x); if (duplex_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetBlackQuantum(duplex->pixel_wands[id][x], GetPixelBlack(duplex_indexes+x)); if (duplex_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) duplex->extent.width; x++) PixelSetIndex(duplex->pixel_wands[id][x], GetPixelIndex(duplex_indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(destination_indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(destination_indexes+x)); if (transfer(source,duplex,destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_DuplexTransferWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewException() returns the severity, reason, and description of any % error that occurs when utilizing a wand view. % % The format of the GetWandViewException method is: % % char *GetWandViewException(const WandView *wand_view, % ExceptionType *severity) % % A description of each parameter follows: % % o wand_view: the pixel wand_view. % % o severity: the severity of the error is returned here. % */ WandExport char *GetWandViewException(const WandView *wand_view, ExceptionType *severity) { char *description; assert(wand_view != (const WandView *) NULL); assert(wand_view->signature == WandSignature); if (wand_view->debug != MagickFalse) (void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",wand_view->name); assert(severity != (ExceptionType *) NULL); *severity=wand_view->exception->severity; description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent, sizeof(*description)); if (description == (char *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", wand_view->name); *description='\0'; if (wand_view->exception->reason != (char *) NULL) (void) CopyMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->reason), MaxTextExtent); if (wand_view->exception->description != (char *) NULL) { (void) ConcatenateMagickString(description," (",MaxTextExtent); (void) ConcatenateMagickString(description,GetLocaleExceptionMessage( wand_view->exception->severity,wand_view->exception->description), MaxTextExtent); (void) ConcatenateMagickString(description,")",MaxTextExtent); } return(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewExtent() returns the wand view extent. % % The format of the GetWandViewExtent method is: % % RectangleInfo GetWandViewExtent(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport RectangleInfo GetWandViewExtent(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewIterator() iterates over the wand view in parallel and calls % your get method for each scanline of the view. The pixel extent is % not confined to the image canvas-- that is you can include negative offsets % or widths or heights that exceed the image dimension. Any updates to % the pixels in your callback are ignored. % % The callback signature is: % % MagickBooleanType GetImageViewMethod(const WandView *source, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback get method that must be % executed by a single thread at a time. % % The format of the GetWandViewIterator method is: % % MagickBooleanType GetWandViewIterator(WandView *source, % GetWandViewMethod get,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o get: the get callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType GetWandViewIterator(WandView *source, GetWandViewMethod get,void *context) { Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (get == (GetWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; status=MagickTrue; progress=0; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *indexes; register const PixelPacket *pixels; register ssize_t x; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (get(source,y,id,context) == MagickFalse) status=MagickFalse; if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_GetWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewPixels() returns the wand view pixel_wands. % % The format of the GetWandViewPixels method is: % % PixelWand *GetWandViewPixels(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport PixelWand **GetWandViewPixels(const WandView *wand_view) { const int id = GetOpenMPThreadId(); assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->pixel_wands[id]); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t W a n d V i e w W a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetWandViewWand() returns the magick wand associated with the wand view. % % The format of the GetWandViewWand method is: % % MagickWand *GetWandViewWand(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickWand *GetWandViewWand(const WandView *wand_view) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); return(wand_view->wand); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsWandView() returns MagickTrue if the the parameter is verified as a wand % view object. % % The format of the IsWandView method is: % % MagickBooleanType IsWandView(const WandView *wand_view) % % A description of each parameter follows: % % o wand_view: the wand view. % */ WandExport MagickBooleanType IsWandView(const WandView *wand_view) { size_t length; if (wand_view == (const WandView *) NULL) return(MagickFalse); if (wand_view->signature != WandSignature) return(MagickFalse); length=strlen(WandViewId); if (LocaleNCompare(wand_view->name,WandViewId,length) != 0) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandView() returns a wand view required for all other methods in the % Wand View API. % % The format of the NewWandView method is: % % WandView *NewWandView(MagickWand *wand) % % A description of each parameter follows: % % o wand: the wand. % */ static PixelWand ***AcquirePixelsThreadSet(const size_t number_wands, const size_t number_threads) { PixelWand ***pixel_wands; register ssize_t i; pixel_wands=(PixelWand ***) AcquireQuantumMemory(number_threads, sizeof(*pixel_wands)); if (pixel_wands == (PixelWand ***) NULL) return((PixelWand ***) NULL); (void) ResetMagickMemory(pixel_wands,0,number_threads*sizeof(*pixel_wands)); for (i=0; i < (ssize_t) number_threads; i++) { pixel_wands[i]=NewPixelWands(number_wands); if (pixel_wands[i] == (PixelWand **) NULL) return(DestroyPixelsThreadSet(pixel_wands,number_wands,number_threads)); } return(pixel_wands); } WandExport WandView *NewWandView(MagickWand *wand) { WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->wand=wand; wand_view->exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->extent.width=wand->images->columns; wand_view->extent.height=wand->images->rows; wand_view->number_threads=GetOpenMPMaximumThreads(); wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=WandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w W a n d V i e w E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewWandViewExtent() returns a wand view required for all other methods % in the Wand View API. % % The format of the NewWandViewExtent method is: % % WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, % const ssize_t y,const size_t width,const size_t height) % % A description of each parameter follows: % % o wand: the magick wand. % % o x,y,columns,rows: These values define the perimeter of a extent of % pixel_wands view. % */ WandExport WandView *NewWandViewExtent(MagickWand *wand,const ssize_t x, const ssize_t y,const size_t width,const size_t height) { WandView *wand_view; assert(wand != (MagickWand *) NULL); assert(wand->signature == WandSignature); wand_view=(WandView *) AcquireMagickMemory(sizeof(*wand_view)); if (wand_view == (WandView *) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); (void) ResetMagickMemory(wand_view,0,sizeof(*wand_view)); wand_view->id=AcquireWandId(); (void) FormatLocaleString(wand_view->name,MaxTextExtent,"%s-%.20g", WandViewId,(double) wand_view->id); wand_view->description=ConstantString("WandView"); wand_view->exception=AcquireExceptionInfo(); wand_view->view=AcquireVirtualCacheView(wand_view->wand->images, wand_view->exception); wand_view->wand=wand; wand_view->extent.width=width; wand_view->extent.height=height; wand_view->extent.x=x; wand_view->extent.y=y; wand_view->number_threads=GetOpenMPMaximumThreads(); wand_view->pixel_wands=AcquirePixelsThreadSet(wand_view->extent.width, wand_view->number_threads); if (wand_view->pixel_wands == (PixelWand ***) NULL) ThrowWandFatalException(ResourceLimitFatalError,"MemoryAllocationFailed", GetExceptionMessage(errno)); wand_view->debug=IsEventLogging(); wand_view->signature=WandSignature; return(wand_view); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w D e s c r i p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewDescription() associates a description with an image view. % % The format of the SetWandViewDescription method is: % % void SetWandViewDescription(WandView *image_view,const char *description) % % A description of each parameter follows: % % o wand_view: the wand view. % % o description: the wand view description. % */ MagickExport void SetWandViewDescription(WandView *wand_view, const char *description) { assert(wand_view != (WandView *) NULL); assert(wand_view->signature == WandSignature); wand_view->description=ConstantString(description); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewIterator() iterates over the wand view in parallel and calls % your set method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension. The pixels are initiallly % undefined and any settings you make in the callback method are automagically % synced back to your image. % % The callback signature is: % % MagickBooleanType SetImageViewMethod(ImageView *destination, % const ssize_t y,const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback set method that must be % executed by a single thread at a time. % % The format of the SetWandViewIterator method is: % % MagickBooleanType SetWandViewIterator(WandView *destination, % SetWandViewMethod set,void *context) % % A description of each parameter follows: % % o destination: the wand view. % % o set: the set callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType SetWandViewIterator(WandView *destination, SetWandViewMethod set,void *context) { ExceptionInfo *exception; Image *destination_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(destination != (WandView *) NULL); assert(destination->signature == WandSignature); if (set == (SetWandViewMethod) NULL) return(MagickFalse); destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (destination->extent.height-destination->extent.y); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(destination_image,destination_image,height,1) #endif for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x, y,destination->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(destination->view); if (set(destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( destination->view)); status=MagickFalse; } if (destination_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_SetWandViewIterator) #endif proceed=SetImageProgress(destination_image,destination->description, progress++,destination->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t W a n d V i e w T h r e a d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetWandViewThreads() sets the number of threads in a thread team. % % The format of the SetWandViewDescription method is: % % void SetWandViewThreads(WandView *image_view, % const size_t number_threads) % % A description of each parameter follows: % % o image_view: the image view. % % o number_threads: the number of threads in a thread team. % */ MagickExport void SetWandViewThreads(WandView *image_view, const size_t number_threads) { assert(image_view != (WandView *) NULL); assert(image_view->signature == MagickSignature); image_view->number_threads=number_threads; if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource)) image_view->number_threads=GetOpenMPMaximumThreads(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f e r W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransferWandViewIterator() iterates over two wand views in parallel and % calls your transfer method for each scanline of the view. The source pixel % extent is not confined to the image canvas-- that is you can include % negative offsets or widths or heights that exceed the image dimension. % However, the destination wand view is confined to the image canvas-- that % is no negative offsets or widths or heights that exceed the image dimension % are permitted. % % The callback signature is: % % MagickBooleanType TransferImageViewMethod(const WandView *source, % WandView *destination,const ssize_t y,const int thread_id, % void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback transfer method that must be % executed by a single thread at a time. % % The format of the TransferWandViewIterator method is: % % MagickBooleanType TransferWandViewIterator(WandView *source, % WandView *destination,TransferWandViewMethod transfer,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o destination: the destination wand view. % % o transfer: the transfer callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType TransferWandViewIterator(WandView *source, WandView *destination,TransferWandViewMethod transfer,void *context) { ExceptionInfo *exception; Image *destination_image, *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (transfer == (TransferWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; destination_image=destination->wand->images; if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=destination->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,destination_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; register const IndexPacket *restrict indexes; register const PixelPacket *restrict pixels; register IndexPacket *restrict destination_indexes; register ssize_t x; register PixelPacket *restrict destination_pixels; if (status == MagickFalse) continue; pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y, source->extent.width,1,source->exception); if (pixels == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (source_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetIndex(source->pixel_wands[id][x], GetPixelIndex(indexes+x)); destination_pixels=GetCacheViewAuthenticPixels(destination->view, destination->extent.x,y,destination->extent.width,1,exception); if (destination_pixels == (PixelPacket *) NULL) { status=MagickFalse; continue; } destination_indexes=GetCacheViewAuthenticIndexQueue(destination->view); for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetQuantumColor(destination->pixel_wands[id][x],pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetBlackQuantum(destination->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (destination_image->storage_class == PseudoClass) for (x=0; x < (ssize_t) destination->extent.width; x++) PixelSetIndex(destination->pixel_wands[id][x], GetPixelIndex(indexes+x)); if (transfer(source,destination,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) destination->extent.width; x++) PixelGetQuantumColor(destination->pixel_wands[id][x], destination_pixels+x); if (destination_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) destination->extent.width; x++) SetPixelBlack(destination_indexes+x,PixelGetBlackQuantum( destination->pixel_wands[id][x])); sync=SyncCacheViewAuthenticPixels(destination->view,exception); if (sync == MagickFalse) { InheritException(destination->exception,GetCacheViewException( source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_TransferWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status == MagickFalse ? 0 : 1); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U p d a t e W a n d V i e w I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UpdateWandViewIterator() iterates over the wand view in parallel and calls % your update method for each scanline of the view. The pixel extent is % confined to the image canvas-- that is no negative offsets or widths or % heights that exceed the image dimension are permitted. Updates to pixels % in your callback are automagically synced back to the image. % % The callback signature is: % % MagickBooleanType UpdateImageViewMethod(WandView *source,const ssize_t y, % const int thread_id,void *context) % % Use this pragma if the view is not single threaded: % % #pragma omp critical % % to define a section of code in your callback update method that must be % executed by a single thread at a time. % % The format of the UpdateWandViewIterator method is: % % MagickBooleanType UpdateWandViewIterator(WandView *source, % UpdateWandViewMethod update,void *context) % % A description of each parameter follows: % % o source: the source wand view. % % o update: the update callback method. % % o context: the user defined context. % */ WandExport MagickBooleanType UpdateWandViewIterator(WandView *source, UpdateWandViewMethod update,void *context) { ExceptionInfo *exception; Image *source_image; MagickBooleanType status; MagickOffsetType progress; #if defined(MAGICKCORE_OPENMP_SUPPORT) size_t height; #endif ssize_t y; assert(source != (WandView *) NULL); assert(source->signature == WandSignature); if (update == (UpdateWandViewMethod) NULL) return(MagickFalse); source_image=source->wand->images; if (SetImageStorageClass(source_image,DirectClass) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; exception=source->exception; #if defined(MAGICKCORE_OPENMP_SUPPORT) height=(size_t) (source->extent.height-source->extent.y); #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(source_image,source_image,height,1) #endif for (y=source->extent.y; y < (ssize_t) source->extent.height; y++) { const int id = GetOpenMPThreadId(); register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict pixels; if (status == MagickFalse) continue; pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y, source->extent.width,1,exception); if (pixels == (PixelPacket *) NULL) { InheritException(source->exception,GetCacheViewException( source->view)); status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(source->view); for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) PixelSetBlackQuantum(source->pixel_wands[id][x], GetPixelBlack(indexes+x)); if (update(source,y,id,context) == MagickFalse) status=MagickFalse; for (x=0; x < (ssize_t) source->extent.width; x++) PixelGetQuantumColor(source->pixel_wands[id][x],pixels+x); if (source_image->colorspace == CMYKColorspace) for (x=0; x < (ssize_t) source->extent.width; x++) SetPixelBlack(indexes+x,PixelGetBlackQuantum( source->pixel_wands[id][x])); if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse) { InheritException(source->exception,GetCacheViewException(source->view)); status=MagickFalse; } if (source_image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickWand_UpdateWandViewIterator) #endif proceed=SetImageProgress(source_image,source->description,progress++, source->extent.height); if (proceed == MagickFalse) status=MagickFalse; } } return(status == MagickFalse ? 0 : 1); }
ocp_nlp_common.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_common.h" #include <assert.h> #include <stdlib.h> #include <string.h> #include <math.h> // blasfeo #include "blasfeo/include/blasfeo_common.h" #include "blasfeo/include/blasfeo_d_blas.h" // hpipm #include "hpipm/include/hpipm_d_ocp_qp_dim.h" // acados #include "acados/utils/mem.h" // openmp #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif /************************************************ * config ************************************************/ int ocp_nlp_config_calculate_size(int N) { int ii; int size = 0; // self size += sizeof(ocp_nlp_config); // qp solver size += 1 * ocp_qp_xcond_solver_config_calculate_size(); // regularization size += ocp_nlp_reg_config_calculate_size(); // dynamics size += N * sizeof(ocp_nlp_dynamics_config *); for (ii = 0; ii < N; ii++) size += ocp_nlp_dynamics_config_calculate_size(); // cost size += (N + 1) * sizeof(ocp_nlp_cost_config *); for (ii = 0; ii <= N; ii++) size += ocp_nlp_cost_config_calculate_size(); // constraints size += (N + 1) * sizeof(ocp_nlp_constraints_config *); for (ii = 0; ii <= N; ii++) size += ocp_nlp_constraints_config_calculate_size(); return size; } ocp_nlp_config *ocp_nlp_config_assign(int N, void *raw_memory) { int ii; char *c_ptr = (char *) raw_memory; ocp_nlp_config *config = (ocp_nlp_config *) c_ptr; c_ptr += sizeof(ocp_nlp_config); config->N = N; // qp solver config->qp_solver = ocp_qp_xcond_solver_config_assign(c_ptr); c_ptr += ocp_qp_xcond_solver_config_calculate_size(); // regularization config->regularize = ocp_nlp_reg_config_assign(c_ptr); c_ptr += ocp_nlp_reg_config_calculate_size(); // dynamics config->dynamics = (ocp_nlp_dynamics_config **) c_ptr; c_ptr += N * sizeof(ocp_nlp_dynamics_config *); for (ii = 0; ii < N; ii++) { config->dynamics[ii] = ocp_nlp_dynamics_config_assign(c_ptr); c_ptr += ocp_nlp_dynamics_config_calculate_size(); } // cost config->cost = (ocp_nlp_cost_config **) c_ptr; c_ptr += (N + 1) * sizeof(ocp_nlp_cost_config *); for (ii = 0; ii <= N; ii++) { config->cost[ii] = ocp_nlp_cost_config_assign(c_ptr); c_ptr += ocp_nlp_cost_config_calculate_size(); } // constraints config->constraints = (ocp_nlp_constraints_config **) c_ptr; c_ptr += (N + 1) * sizeof(ocp_nlp_constraints_config *); for (ii = 0; ii <= N; ii++) { config->constraints[ii] = ocp_nlp_constraints_config_assign(c_ptr); c_ptr += ocp_nlp_constraints_config_calculate_size(); } return config; } /************************************************ * dims ************************************************/ static int ocp_nlp_dims_calculate_size_self(int N) { int size = 0; size += sizeof(ocp_nlp_dims); // nlp sizes size += 6 * (N + 1) * sizeof(int); // nv, nx, nu, ni, nz, ns // dynamics size += N * sizeof(void *); // cost size += (N + 1) * sizeof(void *); // constraints size += (N + 1) * sizeof(void *); // regularization size += ocp_nlp_reg_dims_calculate_size(N); size += sizeof(ocp_nlp_reg_dims); size += 8; // initial align return size; } int ocp_nlp_dims_calculate_size(void *config_) { ocp_nlp_config *config = config_; int N = config->N; int ii; int size = 0; // self size += ocp_nlp_dims_calculate_size_self(N); // dynamics for (ii = 0; ii < N; ii++) size += config->dynamics[ii]->dims_calculate_size(config->dynamics[ii]); // cost for (ii = 0; ii <= N; ii++) size += config->cost[ii]->dims_calculate_size(config->cost[ii]); // constraints for (ii = 0; ii <= N; ii++) size += config->constraints[ii]->dims_calculate_size(config->constraints[ii]); // qp solver size += config->qp_solver->dims_calculate_size(config->qp_solver, N); return size; } static ocp_nlp_dims *ocp_nlp_dims_assign_self(int N, void *raw_memory) { char *c_ptr = (char *) raw_memory; int ii; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_dims *dims = (ocp_nlp_dims *) c_ptr; c_ptr += sizeof(ocp_nlp_dims); // nv assign_and_advance_int(N + 1, &dims->nv, &c_ptr); // nx assign_and_advance_int(N + 1, &dims->nx, &c_ptr); // nu assign_and_advance_int(N + 1, &dims->nu, &c_ptr); // ni assign_and_advance_int(N + 1, &dims->ni, &c_ptr); // nz assign_and_advance_int(N + 1, &dims->nz, &c_ptr); // ns assign_and_advance_int(N + 1, &dims->ns, &c_ptr); // dynamics dims->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // cost dims->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // constraints dims->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // regularization dims->regularize = ocp_nlp_reg_dims_assign(N, c_ptr); c_ptr += ocp_nlp_reg_dims_calculate_size(N); /* initialize qp_solver dimensions */ // dims->qp_solver->N = N; // for (ii = 0; ii <= N; ii++) // { // TODO(dimitris): values below are needed for reformulation of QP when soft constraints // are not supported. Make this a bit more transparent as it clushes with nbx/nbu above. // dims->qp_solver->nsbx[ii] = 0; // dims->qp_solver->nsbu[ii] = 0; // dims->qp_solver->nsg[ii] = 0; // } // N dims->N = N; // initialize dimensions to zero by default // nv for(ii=0; ii<=N; ii++) dims->nv[ii] = 0; // nx for(ii=0; ii<=N; ii++) dims->nx[ii] = 0; // nu for(ii=0; ii<=N; ii++) dims->nu[ii] = 0; // ni for(ii=0; ii<=N; ii++) dims->ni[ii] = 0; // nz for(ii=0; ii<=N; ii++) dims->nz[ii] = 0; // ns for(ii=0; ii<=N; ii++) dims->ns[ii] = 0; // TODO initialize dims to zero by default also in modules !!!!!!! // assert assert((char *) raw_memory + ocp_nlp_dims_calculate_size_self(N) >= c_ptr); return dims; } ocp_nlp_dims *ocp_nlp_dims_assign(void *config_, void *raw_memory) { ocp_nlp_config *config = config_; int N = config->N; int ii; char *c_ptr = (char *) raw_memory; // self ocp_nlp_dims *dims = ocp_nlp_dims_assign_self(N, c_ptr); c_ptr += ocp_nlp_dims_calculate_size_self(N); // dynamics for (ii = 0; ii < N; ii++) { dims->dynamics[ii] = config->dynamics[ii]->dims_assign(config->dynamics[ii], c_ptr); c_ptr += config->dynamics[ii]->dims_calculate_size(config->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { dims->cost[ii] = config->cost[ii]->dims_assign(config->cost[ii], c_ptr); c_ptr += config->cost[ii]->dims_calculate_size(config->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { dims->constraints[ii] = config->constraints[ii]->dims_assign(config->constraints[ii], c_ptr); c_ptr += config->constraints[ii]->dims_calculate_size(config->constraints[ii]); } // qp solver dims->qp_solver = config->qp_solver->dims_assign(config->qp_solver, N, c_ptr); c_ptr += config->qp_solver->dims_calculate_size(config->qp_solver, N); // assert assert((char *) raw_memory + ocp_nlp_dims_calculate_size(config_) >= c_ptr); return dims; } void ocp_nlp_dims_set_opt_vars(void *config_, void *dims_, const char *field, const void* value_array) { // to set dimension nx, nu, nz, ns (number of slacks = number of soft constraints) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int ii; int N = config->N; int *int_array = (int *) value_array; /* set ocp_nlp dimension */ if (!strcmp(field, "nx")) { // opt var for (ii = 0; ii <= N; ii++) { // set nx dims->nx[ii] = int_array[ii]; // update nv dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nx", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx", &int_array[i]); } for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx1", &int_array[i+1]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nx", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nx", &int_array[i]); } // regularization for (ii = 0; ii <= N; ii++) { config->regularize->dims_set(config->regularize, dims->regularize, ii, "nx", &int_array[ii]); } } else if (!strcmp(field, "nu")) { // nlp opt var for (int ii = 0; ii <= N; ii++) { // set nu dims->nu[ii] = int_array[ii]; // update nv dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nu", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu", &int_array[i]); } for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu1", &int_array[i+1]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nu", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nu", &int_array[i]); } // regularization for (ii = 0; ii <= N; ii++) { config->regularize->dims_set(config->regularize, dims->regularize, ii, "nu", &int_array[ii]); } } else if (!strcmp(field, "nz")) { // nlp opt var for (int ii = 0; ii <= N; ii++) { // set nz dims->nz[ii] = int_array[ii]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "nz", &int_array[i]); } // dynamics for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nz", &int_array[i]); } // constraints for (int i = 0; i <= N; i++) { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], "nz", &int_array[i]); } } else if (!strcmp(field, "ns")) { // nlp opt var for (int ii = 0; ii <= N; ii++) { // set ns dims->ns[ii] = int_array[ii]; // update nv dims->nv[ii] = dims->nu[ii] + dims->nx[ii] + 2 * dims->ns[ii]; } // cost for (int i = 0; i <= N; i++) { config->cost[i]->dims_set(config->cost[i], dims->cost[i], "ns", &int_array[i]); } // qp solver for (int i = 0; i <= N; i++) { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ns", &int_array[i]); } } else { printf("error: dims type not available in module ocp_nlp: %s", field); exit(1); } #if 0 /* set ocp_nlp submodule dimensions */ if (strcmp(field, "ns")) // dynamics do not contain slack/soft constraints { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], field, &int_array[i]); } } if (!strcmp(field, "nu")) { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nu1", &int_array[i+1]); } } if (!strcmp(field, "nx")) { for (int i = 0; i < N; i++) { config->dynamics[i]->dims_set(config->dynamics[i], dims->dynamics[i], "nx1", &int_array[i+1]); } } for (int i = 0; i <= N; i++) // cost { config->cost[i]->dims_set(config->cost[i], dims->cost[i], field, &int_array[i]); } for (int i = 0; i <= N; i++) // constraints { config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], field, &int_array[i]); } if (strcmp(field, "nz")) // qp_solver does not contain nz { for (int i = 0; i <= N; i++) // qp_solver { config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, &int_array[i]); } } #endif return; } void ocp_nlp_dims_set_constraints(void *config_, void *dims_, int stage, const char *field, const void* value_) { // to set dimension nbx, nbu, ng, nh, nq (quadratic over nonlinear) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value_; int i = stage; // set in constraint module config->constraints[i]->dims_set(config->constraints[i], dims->constraints[i], field, int_value); // update ni in ocp_nlp dimensions config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "ni", &dims->ni[i]); // update qp_solver dims if ( (!strcmp(field, "nbx")) || (!strcmp(field, "nbu")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); // regularization config->regularize->dims_set(config->regularize, dims->regularize, i, (char *) field, int_value); } else if ( (!strcmp(field, "nsbx")) || (!strcmp(field, "nsbu")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); } else if ( (!strcmp(field, "ng")) || (!strcmp(field, "nh")) || (!strcmp(field, "nphi"))) { // update ng_qp_solver in qp_solver int ng_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "ng_qp_solver", &ng_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "ng", &ng_qp_solver); // regularization config->regularize->dims_set(config->regularize, dims->regularize, i, "ng", &ng_qp_solver); } else if ( (!strcmp(field, "nsg")) || (!strcmp(field, "nsh")) || (!strcmp(field, "nsphi"))) { // update ng_qp_solver in qp_solver int nsg_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nsg_qp_solver", &nsg_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nsg", &nsg_qp_solver); } else if ( (!strcmp(field, "nbxe")) || (!strcmp(field, "nbue")) ) { // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, field, int_value); } else if ( (!strcmp(field, "nge")) || (!strcmp(field, "nhe")) || (!strcmp(field, "nphie"))) { // update ng_qp_solver in qp_solver int ng_qp_solver; config->constraints[i]->dims_get(config->constraints[i], dims->constraints[i], "nge_qp_solver", &ng_qp_solver); // qp solver config->qp_solver->dims_set(config->qp_solver, dims->qp_solver, i, "nge", &ng_qp_solver); } return; } void ocp_nlp_dims_set_cost(void *config_, void *dims_, int stage, const char *field, const void* value_) { // to set dimension ny (output) ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value_; config->cost[stage]->dims_set(config->cost[stage], dims->cost[stage], field, int_value); } void ocp_nlp_dims_set_dynamics(void *config_, void *dims_, int stage, const char *field, const void* value) { // mainly for gnsf dimensions ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; int *int_value = (int *) value; config->dynamics[stage]->dims_set(config->dynamics[stage], dims->dynamics[stage], field, int_value); } /************************************************ * in ************************************************/ int ocp_nlp_in_calculate_size_self(int N) { int size = sizeof(ocp_nlp_in); size += N * sizeof(double); // Ts size += N * sizeof(void *); // dynamics size += (N + 1) * sizeof(void *); // cost size += (N + 1) * sizeof(void *); // constraints return size; } int ocp_nlp_in_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims) { int ii; int N = dims->N; int size = ocp_nlp_in_calculate_size_self(N); // dynamics for (ii = 0; ii < N; ii++) { size += config->dynamics[ii]->model_calculate_size(config->dynamics[ii], dims->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += config->cost[ii]->model_calculate_size(config->cost[ii], dims->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += config->constraints[ii]->model_calculate_size(config->constraints[ii], dims->constraints[ii]); } size += 8; // initial align size += 8; // final align // make_int_multiple_of(64, &size); return size; } ocp_nlp_in *ocp_nlp_in_assign_self(int N, void *raw_memory) { char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_in *in = (ocp_nlp_in *) c_ptr; c_ptr += sizeof(ocp_nlp_in); // Ts assign_and_advance_double(N, &in->Ts, &c_ptr); // dynamics in->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // cost in->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // constraints in->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); align_char_to(8, &c_ptr); return in; } ocp_nlp_in *ocp_nlp_in_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory) { int ii; int N = dims->N; char *c_ptr = (char *) raw_memory; // struct ocp_nlp_in *in = ocp_nlp_in_assign_self(N, c_ptr); c_ptr += ocp_nlp_in_calculate_size_self(N); // dynamics for (ii = 0; ii < N; ii++) { in->dynamics[ii] = config->dynamics[ii]->model_assign(config->dynamics[ii], dims->dynamics[ii], c_ptr); c_ptr += config->dynamics[ii]->model_calculate_size(config->dynamics[ii], dims->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { in->cost[ii] = config->cost[ii]->model_assign(config->cost[ii], dims->cost[ii], c_ptr); c_ptr += config->cost[ii]->model_calculate_size(config->cost[ii], dims->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { in->constraints[ii] = config->constraints[ii]->model_assign(config->constraints[ii], dims->constraints[ii], c_ptr); c_ptr += config->constraints[ii]->model_calculate_size(config->constraints[ii], dims->constraints[ii]); } assert((char *) raw_memory + ocp_nlp_in_calculate_size(config, dims) >= c_ptr); return in; } /************************************************ * out ************************************************/ int ocp_nlp_out_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; int size = sizeof(ocp_nlp_out); size += 4 * (N + 1) * sizeof(struct blasfeo_dvec); // ux, lam, t, z size += 1 * N * sizeof(struct blasfeo_dvec); // pi for (int ii = 0; ii < N; ii++) { size += 1 * blasfeo_memsize_dvec(nv[ii]); // ux size += 1 * blasfeo_memsize_dvec(nz[ii]); // z size += 2 * blasfeo_memsize_dvec(2 * ni[ii]); // lam, t size += 1 * blasfeo_memsize_dvec(nx[ii + 1]); // pi } size += 1 * blasfeo_memsize_dvec(nv[N]); // ux size += 1 * blasfeo_memsize_dvec(nz[N]); // z size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // lam, t size += 8; // initial align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_out *ocp_nlp_out_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, void *raw_memory) { // loop index int ii; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); ocp_nlp_out *out = (ocp_nlp_out *) c_ptr; c_ptr += sizeof(ocp_nlp_out); // blasfeo_struct align align_char_to(8, &c_ptr); // blasfeo_dvec_struct // ux assign_and_advance_blasfeo_dvec_structs(N + 1, &out->ux, &c_ptr); // z assign_and_advance_blasfeo_dvec_structs(N + 1, &out->z, &c_ptr); // pi assign_and_advance_blasfeo_dvec_structs(N, &out->pi, &c_ptr); // lam assign_and_advance_blasfeo_dvec_structs(N + 1, &out->lam, &c_ptr); // t assign_and_advance_blasfeo_dvec_structs(N + 1, &out->t, &c_ptr); // blasfeo_mem align align_char_to(64, &c_ptr); // blasfeo_dvec // ux for (int ii = 0; ii <= N; ++ii) { assign_and_advance_blasfeo_dvec_mem(nv[ii], out->ux + ii, &c_ptr); } // z for (int ii = 0; ii <= N; ++ii) { assign_and_advance_blasfeo_dvec_mem(nz[ii], out->z + ii, &c_ptr); } // pi for (int ii = 0; ii < N; ++ii) { assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], out->pi + ii, &c_ptr); } // lam for (int ii = 0; ii <= N; ++ii) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], out->lam + ii, &c_ptr); } // t for (int ii = 0; ii <= N; ++ii) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], out->t + ii, &c_ptr); } // zero solution for(ii=0; ii<N; ii++) { blasfeo_dvecse(nv[ii], 0.0, out->ux+ii, 0); blasfeo_dvecse(nz[ii], 0.0, out->z+ii, 0); blasfeo_dvecse(nx[ii+1], 0.0, out->pi+ii, 0); blasfeo_dvecse(2*ni[ii], 0.0, out->lam+ii, 0); blasfeo_dvecse(2*ni[ii], 0.0, out->t+ii, 0); } ii = N; blasfeo_dvecse(nv[ii], 0.0, out->ux+ii, 0); blasfeo_dvecse(nz[ii], 0.0, out->z+ii, 0); blasfeo_dvecse(2*ni[ii], 0.0, out->lam+ii, 0); blasfeo_dvecse(2*ni[ii], 0.0, out->t+ii, 0); assert((char *) raw_memory + ocp_nlp_out_calculate_size(config, dims) >= c_ptr); return out; } /************************************************ * options ************************************************/ int ocp_nlp_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; int size = 0; size += sizeof(ocp_nlp_opts); size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); size += config->regularize->opts_calculate_size(); // dynamics size += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]); } // cost size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]); } // constraints size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]); } size += 2*8; // 2 aligns return size; } void *ocp_nlp_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; char *c_ptr = (char *) raw_memory; align_char_to(8, &c_ptr); ocp_nlp_opts *opts = (ocp_nlp_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_opts); /* pointers to substructures */ opts->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); opts->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); opts->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); align_char_to(8, &c_ptr); /* substructures */ opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); opts->regularize = config->regularize->opts_assign(c_ptr); c_ptr += config->regularize->opts_calculate_size(); // dynamics for (int ii = 0; ii < N; ii++) { opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr); c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr); c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { opts->constraints[ii] = constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr); c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]); } assert((char *) raw_memory + ocp_nlp_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; ocp_nlp_reg_config *regularize = config->regularize; int ii; int N = dims->N; opts->reuse_workspace = 1; #if defined(ACADOS_WITH_OPENMP) #if defined(ACADOS_NUM_THREADS) opts->num_threads = ACADOS_NUM_THREADS; // printf("\nocp_nlp: openmp threads from macro = %d\n", opts->num_threads); #else opts->num_threads = omp_get_max_threads(); // printf("\nocp_nlp: omp_get_max_threads %d", omp_get_max_threads()); #endif #endif // printf("\nocp_nlp: openmp threads = %d\n", opts->num_threads); opts->globalization = FIXED_STEP; opts->step_length = 1.0; opts->levenberg_marquardt = 0.0; /* submodules opts */ // qp solver qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize); // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } // globalization opts->alpha_min = 0.05; opts->alpha_reduction = 0.7; return; } void ocp_nlp_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int ii; int N = dims->N; qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } return; } void ocp_nlp_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_; ocp_nlp_config *config = config_; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name, i.e. substring in field before '_' char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value); } else // nlp opts { if (!strcmp(field, "reuse_workspace")) { int* reuse_workspace = (int *) value; opts->reuse_workspace = *reuse_workspace; } else if (!strcmp(field, "num_threads")) { int* num_threads = (int *) value; opts->num_threads = *num_threads; } else if (!strcmp(field, "step_length")) { double* step_length = (double *) value; opts->step_length = *step_length; } else if (!strcmp(field, "alpha_reduction")) { double* alpha_reduction = (double *) value; opts->alpha_reduction = *alpha_reduction; } else if (!strcmp(field, "alpha_min")) { double* alpha_min = (double *) value; opts->alpha_min = *alpha_min; } else if (!strcmp(field, "globalization")) { char* globalization = (char *) value; if (!strcmp(globalization, "fixed_step")) { opts->globalization = FIXED_STEP; } else if (!strcmp(globalization, "merit_backtracking")) { opts->globalization = MERIT_BACKTRACKING; } else { printf("\nerror: ocp_nlp_opts_set: not supported value for globalization, got: %s\n", globalization); exit(1); } } else if (!strcmp(field, "levenberg_marquardt")) { double* levenberg_marquardt = (double *) value; opts->levenberg_marquardt = *levenberg_marquardt; } else if (!strcmp(field, "exact_hess")) { int N = config->N; // cost for (ii=0; ii<=N; ii++) config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value); // dynamics for (ii=0; ii<N; ii++) config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value); // constraints for (ii=0; ii<=N; ii++) config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value); } // selectively turn on exact hessian contributions else if (!strcmp(field, "exact_hess_cost")) { int N = config->N; for (ii=0; ii<=N; ii++) config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value); } else if (!strcmp(field, "exact_hess_dyn")) { int N = config->N; for (ii=0; ii<N; ii++) config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value); } else if (!strcmp(field, "exact_hess_constr")) { int N = config->N; for (ii=0; ii<=N; ii++) config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value); } else { printf("\nerror: ocp_nlp_opts_set: wrong field: %s\n", field); exit(1); } } return; } void ocp_nlp_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value) { ocp_nlp_opts *opts = (ocp_nlp_opts *) opts_; ocp_nlp_config *config = config_; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to dynamics module if ( ptr_module!=NULL && (!strcmp(ptr_module, "dynamics")) ) { config->dynamics[stage]->opts_set( config->dynamics[stage], opts->dynamics[stage], field+module_length+1, value ); } // pass options to cost module else if ( ptr_module!=NULL && (!strcmp(ptr_module, "cost")) ) { config->cost[stage]->opts_set( config->cost[stage], opts->cost[stage], field+module_length+1, value); } // pass options to constraint module else if ( ptr_module!=NULL && (!strcmp(ptr_module, "constraints")) ) { config->constraints[stage]->opts_set( config->constraints[stage], opts->constraints[stage], (char *) field+module_length+1, value); } else { printf("\nerror: ocp_nlp_opts_set_at_stage: wrong field: %s\n", field); exit(1); } return; } /************************************************ * memory ************************************************/ int ocp_nlp_memory_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nz = dims->nz; int *nu = dims->nu; int *ni = dims->ni; int size = sizeof(ocp_nlp_memory); // qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); // qp solver size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics size += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } // nlp res size += ocp_nlp_res_calculate_size(dims); size += (N+1)*sizeof(bool); // set_sim_guess size += (N+1)*sizeof(struct blasfeo_dmat); // dzduxt size += 6*(N+1)*sizeof(struct blasfeo_dvec); // cost_grad ineq_fun ineq_adj dyn_adj sim_guess z_alg size += 1*N*sizeof(struct blasfeo_dvec); // dyn_fun for (int ii = 0; ii < N; ii++) { size += 1*blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]); // dzduxt size += 1*blasfeo_memsize_dvec(nz[ii]); // z_alg size += 2*blasfeo_memsize_dvec(nv[ii]); // cost_grad ineq_adj size += 1*blasfeo_memsize_dvec(nu[ii] + nx[ii]); // dyn_adj size += 1*blasfeo_memsize_dvec(nx[ii + 1]); // dyn_fun size += 1*blasfeo_memsize_dvec(2 * ni[ii]); // ineq_fun size += 1*blasfeo_memsize_dvec(nx[ii] + nz[ii]); // sim_guess } size += 1*blasfeo_memsize_dmat(nu[N]+nx[N], nz[N]); // dzduxt size += 1*blasfeo_memsize_dvec(nz[N]); // z_alg size += 2*blasfeo_memsize_dvec(nv[N]); // cost_grad ineq_adj size += 1*blasfeo_memsize_dvec(nu[N] + nx[N]); // dyn_adj size += 1*blasfeo_memsize_dvec(2 * ni[N]); // ineq_fun size += 1*blasfeo_memsize_dvec(nx[N] + nz[N]); // sim_guess size += 8; // initial align size += 8; // middle align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_memory *ocp_nlp_memory_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, void *raw_memory) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nz = dims->nz; int *nu = dims->nu; int *ni = dims->ni; char *c_ptr = (char *) raw_memory; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_memory *mem = (ocp_nlp_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_memory); /* pointers to substructures */ // dynamics mem->dynamics = (void **) c_ptr; c_ptr += N*sizeof(void *); // cost mem->cost = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // constraints mem->constraints = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // middle align align_char_to(8, &c_ptr); /* substructures */ // qp in mem->qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out mem->qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); // QP solver mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr); c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize, opts->regularize, c_ptr); c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics for (int ii = 0; ii < N; ii++) { mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii], c_ptr); c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr); c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { mem->constraints[ii] = constraints[ii]->memory_assign(constraints[ii], dims->constraints[ii], opts->constraints[ii], c_ptr); c_ptr += constraints[ii]->memory_calculate_size( constraints[ii], dims->constraints[ii], opts->constraints[ii]); } // nlp res mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr); c_ptr += mem->nlp_res->memsize; // blasfeo_struct align align_char_to(8, &c_ptr); // dzduxt assign_and_advance_blasfeo_dmat_structs(N + 1, &mem->dzduxt, &c_ptr); // z_alg assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->z_alg, &c_ptr); // cost_grad assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->cost_grad, &c_ptr); // ineq_fun assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_fun, &c_ptr); // ineq_adj assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->ineq_adj, &c_ptr); // dyn_fun assign_and_advance_blasfeo_dvec_structs(N, &mem->dyn_fun, &c_ptr); // dyn_adj assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->dyn_adj, &c_ptr); // sim_guess assign_and_advance_blasfeo_dvec_structs(N + 1, &mem->sim_guess, &c_ptr); // set_sim_guess assign_and_advance_bool(N+1, &mem->set_sim_guess, &c_ptr); for (int ii = 0; ii <= N; ++ii) { mem->set_sim_guess[ii] = false; } // blasfeo_mem align align_char_to(64, &c_ptr); // dzduxt for (int ii=0; ii<=N; ii++) { assign_and_advance_blasfeo_dmat_mem(nu[ii]+nx[ii], nz[ii], mem->dzduxt+ii, &c_ptr); } // z_alg for (int ii=0; ii<=N; ii++) { blasfeo_create_dvec(nz[ii], mem->z_alg+ii, c_ptr); c_ptr += blasfeo_memsize_dvec(nz[ii]); } // cost_grad for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nv[ii], mem->cost_grad + ii, &c_ptr); } // ineq_fun for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], mem->ineq_fun + ii, &c_ptr); } // ineq_adj for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nv[ii], mem->ineq_adj + ii, &c_ptr); } // dyn_fun for (int ii = 0; ii < N; ii++) { assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], mem->dyn_fun + ii, &c_ptr); } // dyn_adj for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nu[ii] + nx[ii], mem->dyn_adj + ii, &c_ptr); } // sim_guess for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nx[ii] + nz[ii], mem->sim_guess + ii, &c_ptr); // set to 0; blasfeo_dvecse(nx[ii] + nz[ii], 0.0, mem->sim_guess+ii, 0); // printf("sim_guess ii %d: %p\n", ii, mem->sim_guess+ii); } // printf("created memory %p\n", mem); return mem; } /************************************************ * workspace ************************************************/ int ocp_nlp_workspace_calculate_size(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int ii; int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; int size = 0; // nlp size += sizeof(ocp_nlp_workspace); // tmp_nlp_out size += ocp_nlp_out_calculate_size(config, dims); // weight_merit_fun size += ocp_nlp_out_calculate_size(config, dims); // array of pointers // cost size += (N+1)*sizeof(void *); // dynamics size += N*sizeof(void *); // constraints size += (N+1)*sizeof(void *); // module workspace if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } #else int size_tmp = 0; int tmp; // qp solver tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (ii = 0; ii < N; ii++) { tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (ii = 0; ii <= N; ii++) { tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (ii = 0; ii <= N; ii++) { tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } size += size_tmp; #endif } else { // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } } size += 8; // struct align return size; } ocp_nlp_workspace *ocp_nlp_workspace_assign(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_opts *opts, ocp_nlp_memory *mem, void *raw_memory) { ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; char *c_ptr = (char *) raw_memory; ocp_nlp_workspace *work = (ocp_nlp_workspace *) c_ptr; c_ptr += sizeof(ocp_nlp_workspace); /* pointers to substructures */ // work->dynamics = (void **) c_ptr; c_ptr += N*sizeof(void *); // work->cost = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); // work->constraints = (void **) c_ptr; c_ptr += (N+1)*sizeof(void *); align_char_to(8, &c_ptr); /* substructures */ // tmp_nlp_out work->tmp_nlp_out = ocp_nlp_out_assign(config, dims, c_ptr); c_ptr += ocp_nlp_out_calculate_size(config, dims); // weight_merit_fun work->weight_merit_fun = ocp_nlp_out_assign(config, dims, c_ptr); c_ptr += ocp_nlp_out_calculate_size(config, dims); if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } #else int size_tmp = 0; int tmp; // qp solver work->qp_work = (void *) c_ptr; tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } c_ptr += size_tmp; #endif } else { // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } } assert((char *) work + ocp_nlp_workspace_calculate_size(config, dims, opts) >= c_ptr); return work; } /************************************************ * functions ************************************************/ void ocp_nlp_initialize_qp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int ii; int N = dims->N; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (ii = 0; ii <= N; ii++) { // cost config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], in->cost[ii], opts->cost[ii], mem->cost[ii], work->cost[ii]); // dynamics if (ii < N) config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii], in->dynamics[ii], opts->dynamics[ii], mem->dynamics[ii], work->dynamics[ii]); // constraints config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii], in->constraints[ii], opts->constraints[ii], mem->constraints[ii], work->constraints[ii]); } return; } void ocp_nlp_initialize_t_slacks(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int ii; struct blasfeo_dvec *ineq_fun; int N = dims->N; int *ni = dims->ni; int *ns = dims->ns; int *nx = dims->nx; int *nu = dims->nu; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (ii = 0; ii <= N; ii++) { // copy out->ux to tmp_nlp_out->ux, since this is used in compute_fun blasfeo_dveccp(nx[ii]+nu[ii]+2*ns[ii], out->ux+ii, 0, work->tmp_nlp_out->ux+ii, 0); // evaluate inequalities config->constraints[ii]->compute_fun(config->constraints[ii], dims->constraints[ii], in->constraints[ii], opts->constraints[ii], mem->constraints[ii], work->constraints[ii]); ineq_fun = config->constraints[ii]->memory_get_fun_ptr(mem->constraints[ii]); // t = -ineq_fun blasfeo_dveccpsc(2 * ni[ii], -1.0, ineq_fun, 0, out->t + ii, 0); } return; } void ocp_nlp_approximate_qp_matrices(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; /* stage-wise multiple shooting lagrangian evaluation */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // init Hessian to 0 blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, mem->qp_in->RSQrq+i, 0, 0); if (i < N) { // Levenberg Marquardt term: Ts[i] * levenberg_marquardt * eye() if (opts->levenberg_marquardt > 0.0) blasfeo_ddiare(nu[i] + nx[i], in->Ts[i] * opts->levenberg_marquardt, mem->qp_in->RSQrq+i, 0, 0); // dynamics config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); } else { // Levenberg Marquardt term: 1.0 * levenberg_marquardt * eye() if (opts->levenberg_marquardt > 0.0) blasfeo_ddiare(nu[i] + nx[i], opts->levenberg_marquardt, mem->qp_in->RSQrq+i, 0, 0); } // cost config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); // constraints config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } /* collect stage-wise evaluations */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i <= N; i++) { // nlp mem: cost_grad struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]); blasfeo_dveccp(nv[i], cost_grad, 0, mem->cost_grad + i, 0); // nlp mem: dyn_fun if (i < N) { struct blasfeo_dvec *dyn_fun = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); blasfeo_dveccp(nx[i + 1], dyn_fun, 0, mem->dyn_fun + i, 0); } // nlp mem: dyn_adj if (i < N) { struct blasfeo_dvec *dyn_adj = config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]); blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, mem->dyn_adj + i, 0); } else { blasfeo_dvecse(nu[N] + nx[N], 0.0, mem->dyn_adj + N, 0); } if (i > 0) { struct blasfeo_dvec *dyn_adj = config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]); blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], mem->dyn_adj+i, nu[i], mem->dyn_adj+i, nu[i]); } // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); blasfeo_dveccp(2 * ni[i], ineq_fun, 0, mem->ineq_fun + i, 0); // nlp mem: ineq_adj struct blasfeo_dvec *ineq_adj = config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]); blasfeo_dveccp(nv[i], ineq_adj, 0, mem->ineq_adj + i, 0); } for (i = 0; i <= N; i++) { // TODO(rien) where should the update happen??? move to qp update ??? // TODO(all): fix and move where appropriate // if (i<N) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme != NULL && opts->scheme->type != exact) // { // for (int_t j = 0; j < nx; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j]; // for (int_t j = 0; j < nu; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j]; // } // } } return; } // update QP rhs for SQP (step prim var, abs dual var) // TODO(all): move in dynamics, cost, constraints modules ??? void ocp_nlp_approximate_qp_vectors_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // g blasfeo_dveccp(nv[i], mem->cost_grad + i, 0, mem->qp_in->rqz + i, 0); // b if (i < N) blasfeo_dveccp(nx[i + 1], mem->dyn_fun + i, 0, mem->qp_in->b + i, 0); // d blasfeo_dveccp(2 * ni[i], mem->ineq_fun + i, 0, mem->qp_in->d + i, 0); } return; } void ocp_nlp_embed_initial_value(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int *ni = dims->ni; // constraints config->constraints[0]->bounds_update(config->constraints[0], dims->constraints[0], in->constraints[0], opts->constraints[0], mem->constraints[0], work->constraints[0]); // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[0]->memory_get_fun_ptr(mem->constraints[0]); blasfeo_dveccp(2 * ni[0], ineq_fun, 0, mem->ineq_fun, 0); // d blasfeo_dveccp(2 * ni[0], mem->ineq_fun, 0, mem->qp_in->d, 0); return; } double ocp_nlp_evaluate_merit_fun(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i, j; int N = dims->N; int *nx = dims->nx; int *ni = dims->ni; double merit_fun = 0.0; // compute fun value #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i<=N; i++) { // cost config->cost[i]->compute_fun(config->cost[i], dims->cost[i], in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i<N; i++) { // dynamics config->dynamics[i]->compute_fun(config->dynamics[i], dims->dynamics[i], in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i<=N; i++) { // constr config->constraints[i]->compute_fun(config->constraints[i], dims->constraints[i], in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } double *tmp_fun; double tmp; struct blasfeo_dvec *tmp_fun_vec; double cost_fun = 0.0; for(i=0; i<=N; i++) { tmp_fun = config->cost[i]->memory_get_fun_ptr(mem->cost[i]); cost_fun += *tmp_fun; } double dyn_fun = 0.0; for(i=0; i<N; i++) { tmp_fun_vec = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); // printf("\nMerit: dyn will multiply tmp_fun, weights\n"); // blasfeo_print_exp_tran_dvec(nx[i+1], tmp_fun_vec, 0); // blasfeo_print_exp_tran_dvec(nx[i+1], work->weight_merit_fun->pi+i, 0); for(j=0; j<nx[i+1]; j++) { // printf("\n%e %e\n", fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)), fabs(BLASFEO_DVECEL(tmp_fun_vec, j))); dyn_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)) * fabs(BLASFEO_DVECEL(tmp_fun_vec, j)); } } double constr_fun = 0.0; for(i=0; i<=N; i++) { // printf("\ni %d\n", i); tmp_fun_vec = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); // blasfeo_print_exp_tran_dvec(2*ni[i], tmp_fun_vec, 0); // blasfeo_print_exp_tran_dvec(2*ni[i], work->weight_merit_fun->lam+i, 0); for(j=0; j<2*ni[i]; j++) { tmp = BLASFEO_DVECEL(tmp_fun_vec, j); tmp = tmp>0.0 ? fabs(tmp) : 0.0; // tmp = constraint violation // printf("IN merit fun: ineq i %d, j %d tmp_fun%e, multiplier %e\n", i, j, BLASFEO_DVECEL(tmp_fun_vec, j), BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); constr_fun += fabs(BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)) * tmp; } } merit_fun = cost_fun + dyn_fun + constr_fun; // printf("\nMerit fun: %e cost: %e dyn: %e constr: %e\n", merit_fun, cost_fun, dyn_fun, constr_fun); return merit_fun; } double ocp_nlp_line_search(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *ni = dims->ni; double alpha = opts->step_length; double tmp0, tmp1; int j; #if 0 // Line Search Gianluca version // current point for (i = 0; i <= N; i++) blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); for (i = 0; i < N; i++) blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0); for (i = 0; i <= N; i++) blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0); // linear update of algebraic variables using state and input sensitivity // if (i < N) // { // blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); // } // initialize weights if(mem->sqp_iter[0]==0) { for (i = 0; i < N; i++) blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->weight_merit_fun->pi+i, 0); for (i = 0; i <= N; i++) blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->weight_merit_fun->lam+i, 0); } // update weigths for (i = 0; i < N; i++) { for(j=0; j<nx[i+1]; j++) { tmp0 = fabs(BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)); tmp1 = 0.5 * (tmp0 + fabs(BLASFEO_DVECEL(mem->qp_out->pi+i, j))); BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } for (i = 0; i <= N; i++) { for(j=0; j<2*ni[i]; j++) { tmp0 = fabs(BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); tmp1 = 0.5 * (tmp0 + fabs(BLASFEO_DVECEL(mem->qp_out->lam+i, j))); BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } printf("\n\nmerit fun value\n"); double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); double alpha_min = 0.1; for (j=0; j<10 & alpha>alpha_min; j++) { for (i = 0; i <= N; i++) blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); printf("\n%d tmp merit fun value\n", j); double merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); if(merit_fun1 < merit_fun0) { break; } else { alpha *= 0.7; } } printf("\nalpha %f\n", alpha); #endif if (opts->globalization == MERIT_BACKTRACKING) { // Line search version Jonathan // Following Leineweber1999 // copy out (current iterate) to work->tmp_nlp_out for (i = 0; i <= N; i++) blasfeo_dveccp(nv[i], out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); for (i = 0; i < N; i++) blasfeo_dveccp(nx[i+1], out->pi+i, 0, work->tmp_nlp_out->pi+i, 0); for (i = 0; i <= N; i++) blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->tmp_nlp_out->lam+i, 0); // linear update of algebraic variables using state and input sensitivity // if (i < N) // { // blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); // } /* initialize (Leineweber1999 M5.1) */ if (mem->sqp_iter[0]==0) { // initialize weights // equality merit weights = abs( eq multipliers ) for (i = 0; i < N; i++) { for (j=0; j<nx[i+1]; j++) { tmp0 = fabs(BLASFEO_DVECEL(out->pi+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0; } } // printf("merit fun: initialize weights lam\n"); for (i = 0; i <= N; i++) { blasfeo_dveccp(2*ni[i], out->lam+i, 0, work->weight_merit_fun->lam+i, 0); // blasfeo_print_dvec(nx[i+1], work->weight_merit_fun->lam+i, 0); } } else { // update weights // printf("merit fun: update weights, sqp_iter = %d\n", mem->sqp_iter[0]); for (i = 0; i < N; i++) { for(j=0; j<nx[i+1]; j++) { // abs(lambda) (LW) tmp0 = fabs(BLASFEO_DVECEL(out->pi+i, j)); // .5 * (abs(lambda) + sigma) tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->pi+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } for (i = 0; i <= N; i++) { for(j=0; j<2*ni[i]; j++) { // mu (LW) tmp0 = BLASFEO_DVECEL(out->lam+i, j); // .5 * (mu + tau) tmp1 = 0.5 * (tmp0 + BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j)); BLASFEO_DVECEL(work->weight_merit_fun->lam+i, j) = tmp0>tmp1 ? tmp0 : tmp1; } } } if (1) // (mem->sqp_iter[0]!=0) // TODO: why does Leineweber do full step in first SQP iter? { double merit_fun0 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); double alpha_min = opts->alpha_min; double reduction_factor = opts->alpha_reduction; /* actual Line Search*/ alpha = 1.0; // TODO: check out more advanced step search Leineweber1995 for (j=0; alpha*reduction_factor > alpha_min; j++) { for (i = 0; i <= N; i++) blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux+i, 0, out->ux+i, 0, work->tmp_nlp_out->ux+i, 0); // printf("\ntmp merit fun value step search iter: %d", j); double merit_fun1 = ocp_nlp_evaluate_merit_fun(config, dims, in, out, opts, mem, work); // TODO(oj): also check Armijo-type condition Leinweber1999 (2.35) if (merit_fun1 < merit_fun0) { break; } else { alpha *= reduction_factor; } } } printf("\nalpha %f\n", alpha); } return alpha; } void ocp_nlp_update_variables_sqp(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work, double alpha) { int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // step in primal variables blasfeo_daxpy(nv[i], alpha, mem->qp_out->ux + i, 0, out->ux + i, 0, out->ux + i, 0); // update dual variables if (i < N) { blasfeo_dvecsc(nx[i+1], 1.0-alpha, out->pi+i, 0); blasfeo_daxpy(nx[i+1], alpha, mem->qp_out->pi+i, 0, out->pi+i, 0, out->pi+i, 0); } blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->lam+i, 0); blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->lam+i, 0, out->lam+i, 0, out->lam+i, 0); // update slack values blasfeo_dvecsc(2*ni[i], 1.0-alpha, out->t+i, 0); blasfeo_daxpy(2*ni[i], alpha, mem->qp_out->t+i, 0, out->t+i, 0, out->t+i, 0); // linear update of algebraic variables using state and input sensitivity if (i < N) { blasfeo_dgemv_t(nu[i]+nx[i], nz[i], alpha, mem->dzduxt+i, 0, 0, mem->qp_out->ux+i, 0, 1.0, mem->z_alg+i, 0, out->z+i, 0); } } return; } /************************************************ * residuals ************************************************/ int ocp_nlp_res_calculate_size(ocp_nlp_dims *dims) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int size = sizeof(ocp_nlp_res); size += 3 * (N + 1) * sizeof(struct blasfeo_dvec); // res_stat res_ineq res_comp size += 1 * N * sizeof(struct blasfeo_dvec); // res_eq for (int ii = 0; ii < N; ii++) { size += 1 * blasfeo_memsize_dvec(nv[ii]); // res_stat size += 1 * blasfeo_memsize_dvec(nx[ii + 1]); // res_eq size += 2 * blasfeo_memsize_dvec(2 * ni[ii]); // res_ineq res_comp } size += 1 * blasfeo_memsize_dvec(nv[N]); // res_stat size += 2 * blasfeo_memsize_dvec(2 * ni[N]); // res_ineq res_comp size += 8; // initial align size += 8; // blasfeo_struct align size += 64; // blasfeo_mem align make_int_multiple_of(8, &size); return size; } ocp_nlp_res *ocp_nlp_res_assign(ocp_nlp_dims *dims, void *raw_memory) { char *c_ptr = (char *) raw_memory; // extract sizes int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // initial align align_char_to(8, &c_ptr); // struct ocp_nlp_res *res = (ocp_nlp_res *) c_ptr; c_ptr += sizeof(ocp_nlp_res); // blasfeo_struct align align_char_to(8, &c_ptr); // res_stat assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_stat, &c_ptr); // res_eq assign_and_advance_blasfeo_dvec_structs(N, &res->res_eq, &c_ptr); // res_ineq assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_ineq, &c_ptr); // res_comp assign_and_advance_blasfeo_dvec_structs(N + 1, &res->res_comp, &c_ptr); // blasfeo_mem align align_char_to(64, &c_ptr); // res_stat for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(nv[ii], res->res_stat + ii, &c_ptr); } // res_eq for (int ii = 0; ii < N; ii++) { assign_and_advance_blasfeo_dvec_mem(nx[ii + 1], res->res_eq + ii, &c_ptr); } // res_ineq for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], res->res_ineq + ii, &c_ptr); } // res_comp for (int ii = 0; ii <= N; ii++) { assign_and_advance_blasfeo_dvec_mem(2 * ni[ii], res->res_comp + ii, &c_ptr); } res->memsize = ocp_nlp_res_calculate_size(dims); return res; } void ocp_nlp_res_compute(ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_res *res, ocp_nlp_memory *mem) { // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; double tmp_res; // res_stat res->inf_norm_res_stat = 0.0; for (int ii = 0; ii <= N; ii++) { blasfeo_daxpy(nv[ii], -1.0, mem->ineq_adj + ii, 0, mem->cost_grad + ii, 0, res->res_stat + ii, 0); blasfeo_daxpy(nu[ii] + nx[ii], -1.0, mem->dyn_adj + ii, 0, res->res_stat + ii, 0, res->res_stat + ii, 0); blasfeo_dvecnrm_inf(nv[ii], res->res_stat + ii, 0, &tmp_res); res->inf_norm_res_stat = tmp_res > res->inf_norm_res_stat ? tmp_res : res->inf_norm_res_stat; } // res_eq res->inf_norm_res_eq = 0.0; for (int ii = 0; ii < N; ii++) { blasfeo_dveccp(nx[ii + 1], mem->dyn_fun + ii, 0, res->res_eq + ii, 0); blasfeo_dvecnrm_inf(nx[ii + 1], res->res_eq + ii, 0, &tmp_res); res->inf_norm_res_eq = tmp_res > res->inf_norm_res_eq ? tmp_res : res->inf_norm_res_eq; } // res_ineq res->inf_norm_res_ineq = 0.0; for (int ii = 0; ii <= N; ii++) { blasfeo_daxpy(2 * ni[ii], 1.0, out->t + ii, 0, mem->ineq_fun + ii, 0, res->res_ineq + ii, 0); blasfeo_dvecnrm_inf(2 * ni[ii], res->res_ineq + ii, 0, &tmp_res); res->inf_norm_res_ineq = tmp_res > res->inf_norm_res_ineq ? tmp_res : res->inf_norm_res_ineq; } // res_comp res->inf_norm_res_comp = 0.0; for (int ii = 0; ii <= N; ii++) { blasfeo_dvecmul(2 * ni[ii], out->lam + ii, 0, out->t + ii, 0, res->res_comp + ii, 0); blasfeo_dvecnrm_inf(2 * ni[ii], res->res_comp + ii, 0, &tmp_res); res->inf_norm_res_comp = tmp_res > res->inf_norm_res_comp ? tmp_res : res->inf_norm_res_comp; } // printf("computed residuals g: %e, b: %e, d: %e, m: %e\n", res->inf_norm_res_stat, res->inf_norm_res_eq, // res->inf_norm_res_ineq, res->inf_norm_res_comp); return; } void ocp_nlp_cost_compute(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_in *in, ocp_nlp_out *out, ocp_nlp_opts *opts, ocp_nlp_memory *mem, ocp_nlp_workspace *work) { // extract dims int N = dims->N; double* tmp_cost = NULL; double total_cost = 0.0; for (int ii = 0; ii <= N; ii++) { // set pointers // NOTE(oj): the cost compute function takes the tmp_ux_ptr as input, // since it is also used for globalization, // especially with primal variables that are NOT current SQP iterates. config->cost[ii]->memory_set_tmp_ux_ptr(out->ux+ii, mem->cost[ii]); config->cost[ii]->compute_fun(config->cost[ii], dims->cost[ii], in->cost[ii], opts->cost[ii], mem->cost[ii], work->cost[ii]); tmp_cost = config->cost[ii]->memory_get_fun_ptr(mem->cost[ii]); // printf("cost at stage %d = %e, total = %e\n", ii, *tmp_cost, total_cost); total_cost += *tmp_cost; } mem->cost_value = total_cost; // printf("\ncomputed total cost: %e\n", total_cost); return; }
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1+2,2),ceild(4*t2-Nz+9,4));t3<=min(min(floord(4*Nt+Ny-9,4),floord(2*t1+Ny-3,4)),floord(4*t2+Ny-9,4));t3++) { for (t4=max(max(ceild(t1-124,128),ceild(4*t2-Nz-243,256)),ceild(4*t3-Ny-243,256));t4<=min(min(min(floord(4*Nt+Nx-9,256),floord(2*t1+Nx-3,256)),floord(4*t2+Nx-9,256)),floord(4*t3+Nx-9,256));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(4*t3-Ny+5,4)),ceild(256*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=4*t3;t7<=min(4*t3+3,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
forConstruct.c
int main() { int x = 5; #pragma omp parallel { int localX; localX = x; int i; #pragma omp for private(i) for(i = 0; i < 10; i++) { localX = 10; } localX = 10; } }
c55c7aec73df0f31d67fbe39510946453b899e1d.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; double section1; double section2; } ; int Forward(struct dataobj *restrict damp_vec, const float dt, const float o_x, const float o_y, const float o_z, struct dataobj *restrict rec_vec, struct dataobj *restrict rec_coords_vec, struct dataobj *restrict src_vec, struct dataobj *restrict src_coords_vec, struct dataobj *restrict u_vec, struct dataobj *restrict vp_vec, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int p_rec_M, const int p_rec_m, const int p_src_M, const int p_src_m, const int time_M, const int time_m, struct profiler * timers) { float (*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[damp_vec->size[1]][damp_vec->size[2]]) damp_vec->data; float (*restrict rec)[rec_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_vec->size[1]]) rec_vec->data; float (*restrict rec_coords)[rec_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[rec_coords_vec->size[1]]) rec_coords_vec->data; float (*restrict src)[src_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_vec->size[1]]) src_vec->data; float (*restrict src_coords)[src_coords_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[src_coords_vec->size[1]]) src_coords_vec->data; float (*restrict u)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]] __attribute__ ((aligned (64))) = (float (*)[u_vec->size[1]][u_vec->size[2]][u_vec->size[3]]) u_vec->data; float (*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]][vp_vec->size[2]]) vp_vec->data; #pragma omp target enter data map(to: rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target enter data map(to: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target enter data map(to: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) #pragma omp target enter data map(to: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]]) #pragma omp target enter data map(to: src[0:src_vec->size[0]][0:src_vec->size[1]]) #pragma omp target enter data map(to: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]]) #pragma omp target enter data map(to: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]]) for (int time = time_m, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3); time <= time_M; time += 1, t0 = (time)%(3), t1 = (time + 1)%(3), t2 = (time + 2)%(3)) { struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ #pragma omp target teams distribute parallel for collapse(3) for (int x = x_m; x <= x_M; x += 1) { for (int y = y_m; y <= y_M; y += 1) { for (int z = z_m; z <= z_M; z += 1) { float r0 = vp[x + 12][y + 12][z + 12]*vp[x + 12][y + 12][z + 12]; u[t1][x + 12][y + 12][z + 12] = 2.0F*(5.0e-1F*r0*(dt*dt)*(-1.50312647e-7F*(u[t0][x + 6][y + 12][z + 12] + u[t0][x + 12][y + 6][z + 12] + u[t0][x + 12][y + 12][z + 6] + u[t0][x + 12][y + 12][z + 18] + u[t0][x + 12][y + 18][z + 12] + u[t0][x + 18][y + 12][z + 12]) + 2.59740254e-6F*(u[t0][x + 7][y + 12][z + 12] + u[t0][x + 12][y + 7][z + 12] + u[t0][x + 12][y + 12][z + 7] + u[t0][x + 12][y + 12][z + 17] + u[t0][x + 12][y + 17][z + 12] + u[t0][x + 17][y + 12][z + 12]) - 2.23214281e-5F*(u[t0][x + 8][y + 12][z + 12] + u[t0][x + 12][y + 8][z + 12] + u[t0][x + 12][y + 12][z + 8] + u[t0][x + 12][y + 12][z + 16] + u[t0][x + 12][y + 16][z + 12] + u[t0][x + 16][y + 12][z + 12]) + 1.32275129e-4F*(u[t0][x + 9][y + 12][z + 12] + u[t0][x + 12][y + 9][z + 12] + u[t0][x + 12][y + 12][z + 9] + u[t0][x + 12][y + 12][z + 15] + u[t0][x + 12][y + 15][z + 12] + u[t0][x + 15][y + 12][z + 12]) - 6.69642842e-4F*(u[t0][x + 10][y + 12][z + 12] + u[t0][x + 12][y + 10][z + 12] + u[t0][x + 12][y + 12][z + 10] + u[t0][x + 12][y + 12][z + 14] + u[t0][x + 12][y + 14][z + 12] + u[t0][x + 14][y + 12][z + 12]) + 4.28571419e-3F*(u[t0][x + 11][y + 12][z + 12] + u[t0][x + 12][y + 11][z + 12] + u[t0][x + 12][y + 12][z + 11] + u[t0][x + 12][y + 12][z + 13] + u[t0][x + 12][y + 13][z + 12] + u[t0][x + 13][y + 12][z + 12]) - 2.23708328e-2F*u[t0][x + 12][y + 12][z + 12]) + 5.0e-1F*(r0*dt*damp[x + 1][y + 1][z + 1]*u[t0][x + 12][y + 12][z + 12] - u[t2][x + 12][y + 12][z + 12]) + 1.0F*u[t0][x + 12][y + 12][z + 12])/(r0*dt*damp[x + 1][y + 1][z + 1] + 1); } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; struct timeval start_section1, end_section1; gettimeofday(&start_section1, NULL); /* Begin section1 */ #pragma omp target teams distribute parallel for collapse(1) for (int p_src = p_src_m; p_src <= p_src_M; p_src += 1) { int ii_src_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])); int ii_src_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])); int ii_src_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])); int ii_src_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*src_coords[p_src][2])) + 1; int ii_src_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*src_coords[p_src][1])) + 1; int ii_src_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*src_coords[p_src][0])) + 1; float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*src_coords[p_src][0])) + src_coords[p_src][0]); float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*src_coords[p_src][1])) + src_coords[p_src][1]); float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*src_coords[p_src][2])) + src_coords[p_src][2]); if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1) { float r1 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_2 + 12] += r1; } if (ii_src_0 >= x_m - 1 && ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_0 <= x_M + 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1) { float r2 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_1 + 12][ii_src_3 + 12] += r2; } if (ii_src_0 >= x_m - 1 && ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r3 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_2 + 12] += r3; } if (ii_src_0 >= x_m - 1 && ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_0 <= x_M + 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1) { float r4 = (dt*dt)*(vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_0 + 12][ii_src_4 + 12][ii_src_3 + 12] += r4; } if (ii_src_1 >= y_m - 1 && ii_src_2 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_2 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r5 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12])*(1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_2 + 12] += r5; } if (ii_src_1 >= y_m - 1 && ii_src_3 >= z_m - 1 && ii_src_5 >= x_m - 1 && ii_src_1 <= y_M + 1 && ii_src_3 <= z_M + 1 && ii_src_5 <= x_M + 1) { float r6 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_1 + 12][ii_src_3 + 12] += r6; } if (ii_src_2 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_2 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r7 = (dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12])*(-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_2 + 12] += r7; } if (ii_src_3 >= z_m - 1 && ii_src_4 >= y_m - 1 && ii_src_5 >= x_m - 1 && ii_src_3 <= z_M + 1 && ii_src_4 <= y_M + 1 && ii_src_5 <= x_M + 1) { float r8 = 1.25e-4F*px*py*pz*(dt*dt)*(vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12]*vp[ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12])*src[time][p_src]; #pragma omp atomic update u[t1][ii_src_5 + 12][ii_src_4 + 12][ii_src_3 + 12] += r8; } } /* End section1 */ gettimeofday(&end_section1, NULL); timers->section1 += (double)(end_section1.tv_sec-start_section1.tv_sec)+(double)(end_section1.tv_usec-start_section1.tv_usec)/1000000; struct timeval start_section2, end_section2; gettimeofday(&start_section2, NULL); /* Begin section2 */ #pragma omp target teams distribute parallel for collapse(1) for (int p_rec = p_rec_m; p_rec <= p_rec_M; p_rec += 1) { int ii_rec_0 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])); int ii_rec_1 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])); int ii_rec_2 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])); int ii_rec_3 = (int)(floor(-5.0e-2*o_z + 5.0e-2*rec_coords[p_rec][2])) + 1; int ii_rec_4 = (int)(floor(-5.0e-2*o_y + 5.0e-2*rec_coords[p_rec][1])) + 1; int ii_rec_5 = (int)(floor(-5.0e-2*o_x + 5.0e-2*rec_coords[p_rec][0])) + 1; float px = (float)(-o_x - 2.0e+1F*(int)(floor(-5.0e-2F*o_x + 5.0e-2F*rec_coords[p_rec][0])) + rec_coords[p_rec][0]); float py = (float)(-o_y - 2.0e+1F*(int)(floor(-5.0e-2F*o_y + 5.0e-2F*rec_coords[p_rec][1])) + rec_coords[p_rec][1]); float pz = (float)(-o_z - 2.0e+1F*(int)(floor(-5.0e-2F*o_z + 5.0e-2F*rec_coords[p_rec][2])) + rec_coords[p_rec][2]); float sum = 0.0F; if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py + 2.5e-3F*px*pz - 5.0e-2F*px + 2.5e-3F*py*pz - 5.0e-2F*py - 5.0e-2F*pz + 1)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_2 + 12]; } if (ii_rec_0 >= x_m - 1 && ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1) { sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*pz - 2.5e-3F*py*pz + 5.0e-2F*pz)*u[t0][ii_rec_0 + 12][ii_rec_1 + 12][ii_rec_3 + 12]; } if (ii_rec_0 >= x_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1) { sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*py*pz + 5.0e-2F*py)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_2 + 12]; } if (ii_rec_0 >= x_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_0 <= x_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*py*pz)*u[t0][ii_rec_0 + 12][ii_rec_4 + 12][ii_rec_3 + 12]; } if (ii_rec_1 >= y_m - 1 && ii_rec_2 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_2 <= z_M + 1 && ii_rec_5 <= x_M + 1) { sum += (1.25e-4F*px*py*pz - 2.5e-3F*px*py - 2.5e-3F*px*pz + 5.0e-2F*px)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_2 + 12]; } if (ii_rec_1 >= y_m - 1 && ii_rec_3 >= z_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_1 <= y_M + 1 && ii_rec_3 <= z_M + 1 && ii_rec_5 <= x_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*pz)*u[t0][ii_rec_5 + 12][ii_rec_1 + 12][ii_rec_3 + 12]; } if (ii_rec_2 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_2 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1) { sum += (-1.25e-4F*px*py*pz + 2.5e-3F*px*py)*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_2 + 12]; } if (ii_rec_3 >= z_m - 1 && ii_rec_4 >= y_m - 1 && ii_rec_5 >= x_m - 1 && ii_rec_3 <= z_M + 1 && ii_rec_4 <= y_M + 1 && ii_rec_5 <= x_M + 1) { sum += 1.25e-4F*px*py*pz*u[t0][ii_rec_5 + 12][ii_rec_4 + 12][ii_rec_3 + 12]; } rec[time][p_rec] = sum; } /* End section2 */ gettimeofday(&end_section2, NULL); timers->section2 += (double)(end_section2.tv_sec-start_section2.tv_sec)+(double)(end_section2.tv_usec-start_section2.tv_usec)/1000000; } #pragma omp target update from(rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target exit data map(release: rec[0:rec_vec->size[0]][0:rec_vec->size[1]]) #pragma omp target update from(u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target exit data map(release: u[0:u_vec->size[0]][0:u_vec->size[1]][0:u_vec->size[2]][0:u_vec->size[3]]) #pragma omp target exit data map(delete: damp[0:damp_vec->size[0]][0:damp_vec->size[1]][0:damp_vec->size[2]]) #pragma omp target exit data map(delete: rec_coords[0:rec_coords_vec->size[0]][0:rec_coords_vec->size[1]]) #pragma omp target exit data map(delete: src[0:src_vec->size[0]][0:src_vec->size[1]]) #pragma omp target exit data map(delete: src_coords[0:src_coords_vec->size[0]][0:src_coords_vec->size[1]]) #pragma omp target exit data map(delete: vp[0:vp_vec->size[0]][0:vp_vec->size[1]][0:vp_vec->size[2]]) return 0; } /* Backdoor edit at Wed Mar 4 19:35:59 2020*/ /* Backdoor edit at Wed Mar 4 19:37:03 2020*/ /* Backdoor edit at Wed Mar 4 19:41:50 2020*/ /* Backdoor edit at Wed Mar 4 19:44:17 2020*/
time.h
/*===---- time.h - OpenMP time header wrapper ------------------------ c ---=== * * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. * See https://llvm.org/LICENSE.txt for license information. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception * *===-----------------------------------------------------------------------=== */ #ifndef __CLANG_OPENMP_TIME_H__ #define __CLANG_OPENMP_TIME_H__ #ifndef _OPENMP #error "This file is for OpenMP compilation only." #endif #if defined(__cplusplus) #define __DEVICE__ static constexpr __attribute__((always_inline, nothrow)) #else #define __DEVICE__ static __attribute__((always_inline, nothrow)) #endif #include_next <time.h> #pragma omp begin declare variant match( \ device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)}) __DEVICE__ clock_t clock() { return __nvvm_read_ptx_sreg_clock(); } #pragma omp end declare variant #endif
prepress.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR EEEEE PPPP RRRR EEEEE SSSSS SSSSS % % P P R R E P P R R E SS SS % % PPPP RRRR EEE PPPP RRRR EEE SSS SSS % % P R R E P R R E SS SS % % P R R EEEEE P R R EEEEE SSSSS SSSSS % % % % % % MagickCore Prepress Methods % % % % Software Design % % Cristy % % October 2001 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/cache-view.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/prepress.h" #include "MagickCore/resource_.h" #include "MagickCore/registry.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T o t a l I n k D e n s i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageTotalInkDensity() returns the total ink density for a CMYK image. % Total Ink Density (TID) is determined by adding the CMYK values in the % darkest shadow area in an image. % % The format of the GetImageTotalInkDensity method is: % % double GetImageTotalInkDensity(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport double GetImageTotalInkDensity(Image *image, ExceptionInfo *exception) { CacheView *image_view; double total_ink_density; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",image->filename); return(0.0); } status=MagickTrue; total_ink_density=0.0; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double density; const Quantum *p; ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { density=(double) GetPixelRed(image,p)+GetPixelGreen(image,p)+ GetPixelBlue(image,p)+GetPixelBlack(image,p); if (density > total_ink_density) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageTotalInkDensity) #endif { if (density > total_ink_density) total_ink_density=density; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) total_ink_density=0.0; return(total_ink_density); }
par_vector.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_Vector class. * *****************************************************************************/ #include "_hypre_parcsr_mv.h" #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int hypre_FillResponseParToVectorAll(void*, HYPRE_Int, HYPRE_Int, void*, MPI_Comm, void**, HYPRE_Int*); #endif /*-------------------------------------------------------------------------- * hypre_ParVectorCreate *--------------------------------------------------------------------------*/ /* If create is called for HYPRE_NO_GLOBAL_PARTITION and partitioning is NOT null, then it is assumed that it is array of length 2 containing the start row of the calling processor followed by the start row of the next processor - AHB 6/05 */ hypre_ParVector * hypre_ParVectorCreate( MPI_Comm comm, HYPRE_BigInt global_size, HYPRE_BigInt *partitioning ) { hypre_ParVector *vector; HYPRE_Int num_procs, my_id; if (global_size < 0) { hypre_error_in_arg(2); return NULL; } vector = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_MPI_Comm_rank(comm,&my_id); if (!partitioning) { hypre_MPI_Comm_size(comm,&num_procs); #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_GenerateLocalPartitioning(global_size, num_procs, my_id, &partitioning); #else hypre_GeneratePartitioning(global_size, num_procs, &partitioning); #endif } hypre_ParVectorAssumedPartition(vector) = NULL; hypre_ParVectorComm(vector) = comm; hypre_ParVectorGlobalSize(vector) = global_size; #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_ParVectorFirstIndex(vector) = partitioning[0]; hypre_ParVectorLastIndex(vector) = partitioning[1]-1; hypre_ParVectorPartitioning(vector) = partitioning; hypre_ParVectorLocalVector(vector) = hypre_SeqVectorCreate(partitioning[1] - partitioning[0]); #else hypre_ParVectorFirstIndex(vector) = partitioning[my_id]; hypre_ParVectorLastIndex(vector) = partitioning[my_id+1] - 1; hypre_ParVectorPartitioning(vector) = partitioning; hypre_ParVectorLocalVector(vector) = hypre_SeqVectorCreate(partitioning[my_id+1] - partitioning[my_id]); #endif /* set defaults */ hypre_ParVectorOwnsData(vector) = 1; hypre_ParVectorOwnsPartitioning(vector) = 1; hypre_ParVectorActualLocalSize(vector) = 0; return vector; } /*-------------------------------------------------------------------------- * hypre_ParMultiVectorCreate *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_ParMultiVectorCreate( MPI_Comm comm, HYPRE_BigInt global_size, HYPRE_BigInt *partitioning, HYPRE_Int num_vectors ) { /* note that global_size is the global length of a single vector */ hypre_ParVector *vector = hypre_ParVectorCreate( comm, global_size, partitioning ); hypre_ParVectorNumVectors(vector) = num_vectors; return vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorDestroy( hypre_ParVector *vector ) { if (vector) { if ( hypre_ParVectorOwnsData(vector) ) { hypre_SeqVectorDestroy(hypre_ParVectorLocalVector(vector)); } if ( hypre_ParVectorOwnsPartitioning(vector) ) { hypre_TFree(hypre_ParVectorPartitioning(vector), HYPRE_MEMORY_HOST); } if (hypre_ParVectorAssumedPartition(vector)) { hypre_AssumedPartitionDestroy(hypre_ParVectorAssumedPartition(vector)); } hypre_TFree(vector, HYPRE_MEMORY_HOST); } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorInitialize_v2( hypre_ParVector *vector, HYPRE_MemoryLocation memory_location ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_SeqVectorInitialize_v2(hypre_ParVectorLocalVector(vector), memory_location); hypre_ParVectorActualLocalSize(vector) = hypre_VectorSize(hypre_ParVectorLocalVector(vector)); return hypre_error_flag; } HYPRE_Int hypre_ParVectorInitialize( hypre_ParVector *vector ) { return hypre_ParVectorInitialize_v2(vector, hypre_ParVectorMemoryLocation(vector)); } /*-------------------------------------------------------------------------- * hypre_ParVectorSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetDataOwner( hypre_ParVector *vector, HYPRE_Int owns_data ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParVectorOwnsData(vector) = owns_data; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetPartitioningOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetPartitioningOwner( hypre_ParVector *vector, HYPRE_Int owns_partitioning ) { if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } hypre_ParVectorOwnsPartitioning(vector) = owns_partitioning; return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetNumVectors * call before calling hypre_ParVectorInitialize * probably this will do more harm than good, use hypre_ParMultiVectorCreate *--------------------------------------------------------------------------*/ #if 0 HYPRE_Int hypre_ParVectorSetNumVectors( hypre_ParVector *vector, HYPRE_Int num_vectors ) { HYPRE_Int ierr=0; hypre_Vector *local_vector = hypre_ParVectorLocalVector(v); hypre_SeqVectorSetNumVectors( local_vector, num_vectors ); return ierr; } #endif /*-------------------------------------------------------------------------- * hypre_ParVectorRead *--------------------------------------------------------------------------*/ hypre_ParVector *hypre_ParVectorRead( MPI_Comm comm, const char *file_name ) { char new_file_name[80]; hypre_ParVector *par_vector; HYPRE_Int my_id, num_procs; HYPRE_BigInt *partitioning; HYPRE_BigInt global_size; HYPRE_Int i; FILE *fp; hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_name, "r"); hypre_fscanf(fp, "%b\n", &global_size); #ifdef HYPRE_NO_GLOBAL_PARTITION for (i=0; i < 2; i++) hypre_fscanf(fp, "%b\n", &partitioning[i]); fclose (fp); #else for (i=0; i < num_procs; i++) hypre_fscanf(fp, "%b\n", &partitioning[i]); fclose (fp); partitioning[num_procs] = global_size; #endif par_vector = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_ParVectorComm(par_vector) = comm; hypre_ParVectorGlobalSize(par_vector) = global_size; #ifdef HYPRE_NO_GLOBAL_PARTITION hypre_ParVectorFirstIndex(par_vector) = partitioning[0]; hypre_ParVectorLastIndex(par_vector) = partitioning[1]-1; #else hypre_ParVectorFirstIndex(par_vector) = partitioning[my_id]; hypre_ParVectorLastIndex(par_vector) = partitioning[my_id+1]-1; #endif hypre_ParVectorPartitioning(par_vector) = partitioning; hypre_ParVectorOwnsData(par_vector) = 1; hypre_ParVectorOwnsPartitioning(par_vector) = 1; hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); hypre_ParVectorLocalVector(par_vector) = hypre_SeqVectorRead(new_file_name); /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(par_vector) == 1 ); return par_vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorPrint( hypre_ParVector *vector, const char *file_name ) { char new_file_name[80]; hypre_Vector *local_vector; MPI_Comm comm; HYPRE_Int my_id, num_procs, i; HYPRE_BigInt *partitioning; HYPRE_BigInt global_size; FILE *fp; if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } local_vector = hypre_ParVectorLocalVector(vector); comm = hypre_ParVectorComm(vector); partitioning = hypre_ParVectorPartitioning(vector); global_size = hypre_ParVectorGlobalSize(vector); hypre_MPI_Comm_rank(comm,&my_id); hypre_MPI_Comm_size(comm,&num_procs); hypre_sprintf(new_file_name,"%s.%d",file_name,my_id); hypre_SeqVectorPrint(local_vector,new_file_name); hypre_sprintf(new_file_name,"%s.INFO.%d",file_name,my_id); fp = fopen(new_file_name, "w"); hypre_fprintf(fp, "%b\n", global_size); #ifdef HYPRE_NO_GLOBAL_PARTITION for (i=0; i < 2; i++) hypre_fprintf(fp, "%b\n", partitioning[i]); #else for (i=0; i < num_procs; i++) hypre_fprintf(fp, "%b\n", partitioning[i]); #endif fclose (fp); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorSetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetConstantValues( hypre_ParVector *v, HYPRE_Complex value ) { hypre_Vector *v_local = hypre_ParVectorLocalVector(v); return hypre_SeqVectorSetConstantValues(v_local,value); } /*-------------------------------------------------------------------------- * hypre_ParVectorSetRandomValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorSetRandomValues( hypre_ParVector *v, HYPRE_Int seed ) { HYPRE_Int my_id; hypre_Vector *v_local = hypre_ParVectorLocalVector(v); MPI_Comm comm = hypre_ParVectorComm(v); hypre_MPI_Comm_rank(comm,&my_id); seed *= (my_id+1); return hypre_SeqVectorSetRandomValues(v_local, seed); } /*-------------------------------------------------------------------------- * hypre_ParVectorCopy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorCopy( hypre_ParVector *x, hypre_ParVector *y ) { hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorCopy(x_local, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorCloneShallow * returns a complete copy of a hypre_ParVector x - a shallow copy, re-using * the partitioning and data arrays of x *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_ParVectorCloneShallow( hypre_ParVector *x ) { hypre_ParVector * y = hypre_ParVectorCreate(hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x), hypre_ParVectorPartitioning(x)); hypre_ParVectorOwnsData(y) = 1; /* ...This vector owns its local vector, although the local vector doesn't * own _its_ data */ hypre_ParVectorOwnsPartitioning(y) = 0; hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) ); hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneShallow(hypre_ParVectorLocalVector(x) ); hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x); return y; } hypre_ParVector * hypre_ParVectorCloneDeep_v2( hypre_ParVector *x, HYPRE_MemoryLocation memory_location ) { hypre_ParVector *y = hypre_ParVectorCreate(hypre_ParVectorComm(x), hypre_ParVectorGlobalSize(x), hypre_ParVectorPartitioning(x)); hypre_ParVectorOwnsData(y) = 1; hypre_ParVectorOwnsPartitioning(y) = 0; hypre_SeqVectorDestroy( hypre_ParVectorLocalVector(y) ); hypre_ParVectorLocalVector(y) = hypre_SeqVectorCloneDeep_v2( hypre_ParVectorLocalVector(x), memory_location ); hypre_ParVectorFirstIndex(y) = hypre_ParVectorFirstIndex(x); //RL: WHY HERE? return y; } HYPRE_Int hypre_ParVectorMigrate(hypre_ParVector *x, HYPRE_MemoryLocation memory_location) { if (!x) { return hypre_error_flag; } if ( hypre_GetActualMemLocation(memory_location) != hypre_GetActualMemLocation(hypre_ParVectorMemoryLocation(x)) ) { hypre_Vector *x_local = hypre_SeqVectorCloneDeep_v2(hypre_ParVectorLocalVector(x), memory_location); hypre_SeqVectorDestroy(hypre_ParVectorLocalVector(x)); hypre_ParVectorLocalVector(x) = x_local; } else { hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(x)) = memory_location; } return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorScale( HYPRE_Complex alpha, hypre_ParVector *y ) { hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorScale( alpha, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorAxpy( HYPRE_Complex alpha, hypre_ParVector *x, hypre_ParVector *y ) { hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); return hypre_SeqVectorAxpy( alpha, x_local, y_local); } /*-------------------------------------------------------------------------- * hypre_ParVectorMassAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorMassAxpy( HYPRE_Complex *alpha, hypre_ParVector **x, hypre_ParVector *y, HYPRE_Int k, HYPRE_Int unroll ) { HYPRE_Int i; hypre_Vector **x_local; hypre_Vector *y_local = hypre_ParVectorLocalVector(y); x_local = hypre_TAlloc(hypre_Vector *, k, HYPRE_MEMORY_HOST); for (i=0; i < k; i++) { x_local[i] = hypre_ParVectorLocalVector(x[i]); } hypre_SeqVectorMassAxpy( alpha, x_local, y_local, k, unroll); hypre_TFree(x_local, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_ParVectorInnerProd( hypre_ParVector *x, hypre_ParVector *y ) { MPI_Comm comm = hypre_ParVectorComm(x); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_Real result = 0.0; HYPRE_Real local_result = hypre_SeqVectorInnerProd(x_local, y_local); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] -= hypre_MPI_Wtime(); #endif hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] += hypre_MPI_Wtime(); #endif return result; } /*-------------------------------------------------------------------------- * hypre_ParVectorMassInnerProd *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorMassInnerProd( hypre_ParVector *x, hypre_ParVector **y, HYPRE_Int k, HYPRE_Int unroll, HYPRE_Real *result ) { MPI_Comm comm = hypre_ParVectorComm(x); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); HYPRE_Real *local_result; HYPRE_Int i; hypre_Vector **y_local; y_local = hypre_TAlloc(hypre_Vector *, k, HYPRE_MEMORY_HOST); for (i=0; i < k; i++) { y_local[i] = (hypre_Vector *) hypre_ParVectorLocalVector(y[i]); } local_result = hypre_CTAlloc(HYPRE_Real, k, HYPRE_MEMORY_HOST); hypre_SeqVectorMassInnerProd(x_local, y_local, k, unroll, local_result); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] -= hypre_MPI_Wtime(); #endif hypre_MPI_Allreduce(local_result, result, k, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] += hypre_MPI_Wtime(); #endif hypre_TFree(y_local, HYPRE_MEMORY_HOST); hypre_TFree(local_result, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorMassDotpTwo *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorMassDotpTwo ( hypre_ParVector *x, hypre_ParVector *y, hypre_ParVector **z, HYPRE_Int k, HYPRE_Int unroll, HYPRE_Real *result_x, HYPRE_Real *result_y ) { MPI_Comm comm = hypre_ParVectorComm(x); hypre_Vector *x_local = hypre_ParVectorLocalVector(x); hypre_Vector *y_local = hypre_ParVectorLocalVector(y); HYPRE_Real *local_result, *result; HYPRE_Int i; hypre_Vector **z_local; z_local = hypre_TAlloc(hypre_Vector*, k, HYPRE_MEMORY_HOST); for (i=0; i < k; i++) { z_local[i] = (hypre_Vector *) hypre_ParVectorLocalVector(z[i]); } local_result = hypre_CTAlloc(HYPRE_Real, 2*k, HYPRE_MEMORY_HOST); result = hypre_CTAlloc(HYPRE_Real, 2*k, HYPRE_MEMORY_HOST); hypre_SeqVectorMassDotpTwo(x_local, y_local, z_local, k, unroll, &local_result[0], &local_result[k]); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] -= hypre_MPI_Wtime(); #endif hypre_MPI_Allreduce(local_result, result, 2*k, HYPRE_MPI_REAL, hypre_MPI_SUM, comm); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_ALL_REDUCE] += hypre_MPI_Wtime(); #endif for (i=0; i < k; i++) { result_x[i] = result[i]; result_y[i] = result[k+i]; } hypre_TFree(z_local, HYPRE_MEMORY_HOST); hypre_TFree(local_result, HYPRE_MEMORY_HOST); hypre_TFree(result, HYPRE_MEMORY_HOST); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_VectorToParVector: * generates a ParVector from a Vector on proc 0 and distributes the pieces * to the other procs in comm * * the length of vec_starts depends on HYPRE_NO_GLOBAL_PARTITION *--------------------------------------------------------------------------*/ hypre_ParVector * hypre_VectorToParVector ( MPI_Comm comm, hypre_Vector *v, HYPRE_BigInt *vec_starts ) { HYPRE_BigInt global_size; HYPRE_BigInt *global_vec_starts = NULL; HYPRE_BigInt first_index; HYPRE_BigInt last_index; HYPRE_Int local_size; HYPRE_Int num_vectors; HYPRE_Int num_procs, my_id; HYPRE_Int global_vecstride, vecstride, idxstride; hypre_ParVector *par_vector; hypre_Vector *local_vector; HYPRE_Complex *v_data; HYPRE_Complex *local_data; hypre_MPI_Request *requests; hypre_MPI_Status *status, status0; HYPRE_Int i, j, k, p; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); if (my_id == 0) { global_size = (HYPRE_BigInt)hypre_VectorSize(v); v_data = hypre_VectorData(v); num_vectors = hypre_VectorNumVectors(v); /* for multivectors */ global_vecstride = hypre_VectorVectorStride(v); } hypre_MPI_Bcast(&global_size,1,HYPRE_MPI_INT,0,comm); hypre_MPI_Bcast(&num_vectors,1,HYPRE_MPI_INT,0,comm); hypre_MPI_Bcast(&global_vecstride,1,HYPRE_MPI_INT,0,comm); if ( num_vectors == 1 ) par_vector = hypre_ParVectorCreate(comm, global_size, vec_starts); else par_vector = hypre_ParMultiVectorCreate(comm, global_size, vec_starts, num_vectors); vec_starts = hypre_ParVectorPartitioning(par_vector); first_index = hypre_ParVectorFirstIndex(par_vector); last_index = hypre_ParVectorLastIndex(par_vector); local_size = (HYPRE_Int)(last_index - first_index) + 1; #ifdef HYPRE_NO_GLOBAL_PARTITION if (my_id == 0) { global_vec_starts = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); } hypre_MPI_Gather(&first_index, 1, HYPRE_MPI_BIG_INT, global_vec_starts, 1, HYPRE_MPI_BIG_INT, 0, comm); if (my_id == 0) { global_vec_starts[num_procs] = hypre_ParVectorGlobalSize(par_vector); } #else global_vec_starts = vec_starts; #endif hypre_ParVectorInitialize(par_vector); local_vector = hypre_ParVectorLocalVector(par_vector); local_data = hypre_VectorData(local_vector); vecstride = hypre_VectorVectorStride(local_vector); idxstride = hypre_VectorIndexStride(local_vector); /* so far the only implemented multivector StorageMethod is 0 */ hypre_assert( idxstride==1 ); if (my_id == 0) { requests = hypre_CTAlloc(hypre_MPI_Request, num_vectors*(num_procs-1), HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_vectors*(num_procs-1), HYPRE_MEMORY_HOST); k = 0; for (p = 1; p<num_procs; p++) for (j = 0; j<num_vectors; ++j) { hypre_MPI_Isend( &v_data[(HYPRE_Int) global_vec_starts[p]] + j*global_vecstride, (HYPRE_Int)(global_vec_starts[p+1] - global_vec_starts[p]), HYPRE_MPI_COMPLEX, p, 0, comm, &requests[k++] ); } if (num_vectors == 1) { for (i = 0; i < local_size; i++) local_data[i] = v_data[i]; } else { for (j = 0; j<num_vectors; ++j) { for (i = 0; i < local_size; i++) local_data[i+j*vecstride] = v_data[i+j*global_vecstride]; } } hypre_MPI_Waitall(num_procs-1,requests, status); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); } else { for ( j=0; j<num_vectors; ++j ) hypre_MPI_Recv( local_data+j*vecstride, local_size, HYPRE_MPI_COMPLEX, 0, 0, comm,&status0 ); } #ifdef HYPRE_NO_GLOBAL_PARTITION if (global_vec_starts) { hypre_TFree(global_vec_starts, HYPRE_MEMORY_HOST); } #endif return par_vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorToVectorAll: * generates a Vector on every proc which has a piece of the data * from a ParVector on several procs in comm, * vec_starts needs to contain the partitioning across all procs in comm *--------------------------------------------------------------------------*/ hypre_Vector * hypre_ParVectorToVectorAll( hypre_ParVector *par_v ) { MPI_Comm comm = hypre_ParVectorComm(par_v); HYPRE_BigInt global_size = hypre_ParVectorGlobalSize(par_v); #ifndef HYPRE_NO_GLOBAL_PARTITION HYPRE_BigInt *vec_starts = hypre_ParVectorPartitioning(par_v); #endif hypre_Vector *local_vector = hypre_ParVectorLocalVector(par_v); HYPRE_Int num_procs, my_id; HYPRE_Int num_vectors = hypre_ParVectorNumVectors(par_v); hypre_Vector *vector; HYPRE_Complex *vector_data; HYPRE_Complex *local_data; HYPRE_Int local_size; hypre_MPI_Request *requests; hypre_MPI_Status *status; HYPRE_Int i, j; HYPRE_Int *used_procs; HYPRE_Int num_types, num_requests; HYPRE_Int vec_len, proc_id; #ifdef HYPRE_NO_GLOBAL_PARTITION HYPRE_Int *new_vec_starts; HYPRE_Int num_contacts; HYPRE_Int contact_proc_list[1]; HYPRE_Int contact_send_buf[1]; HYPRE_Int contact_send_buf_starts[2]; HYPRE_Int max_response_size; HYPRE_Int *response_recv_buf=NULL; HYPRE_Int *response_recv_buf_starts = NULL; hypre_DataExchangeResponse response_obj; hypre_ProcListElements send_proc_obj; HYPRE_Int *send_info = NULL; hypre_MPI_Status status1; HYPRE_Int count, tag1 = 112, tag2 = 223; HYPRE_Int start; #endif hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &my_id); #ifdef HYPRE_NO_GLOBAL_PARTITION local_size = (HYPRE_Int)(hypre_ParVectorLastIndex(par_v) - hypre_ParVectorFirstIndex(par_v) + 1); /* determine procs which hold data of par_v and store ids in used_procs */ /* we need to do an exchange data for this. If I own row then I will contact processor 0 with the endpoint of my local range */ if (local_size > 0) { num_contacts = 1; contact_proc_list[0] = 0; contact_send_buf[0] = hypre_ParVectorLastIndex(par_v); contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 1; } else { num_contacts = 0; contact_send_buf_starts[0] = 0; contact_send_buf_starts[1] = 0; } /*build the response object*/ /*send_proc_obj will be for saving info from contacts */ send_proc_obj.length = 0; send_proc_obj.storage_length = 10; send_proc_obj.id = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts = hypre_CTAlloc(HYPRE_Int, send_proc_obj.storage_length + 1, HYPRE_MEMORY_HOST); send_proc_obj.vec_starts[0] = 0; send_proc_obj.element_storage_length = 10; send_proc_obj.elements = hypre_CTAlloc(HYPRE_BigInt, send_proc_obj.element_storage_length, HYPRE_MEMORY_HOST); max_response_size = 0; /* each response is null */ response_obj.fill_response = hypre_FillResponseParToVectorAll; response_obj.data1 = NULL; response_obj.data2 = &send_proc_obj; /*this is where we keep info from contacts*/ hypre_DataExchangeList(num_contacts, contact_proc_list, contact_send_buf, contact_send_buf_starts, sizeof(HYPRE_Int), //0, &response_obj, sizeof(HYPRE_Int), &response_obj, max_response_size, 1, comm, (void**) &response_recv_buf, &response_recv_buf_starts); /* now processor 0 should have a list of ranges for processors that have rows - these are in send_proc_obj - it needs to create the new list of processors and also an array of vec starts - and send to those who own row*/ if (my_id) { if (local_size) { /* look for a message from processor 0 */ hypre_MPI_Probe(0, tag1, comm, &status1); hypre_MPI_Get_count(&status1, HYPRE_MPI_INT, &count); send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); hypre_MPI_Recv(send_info, count, HYPRE_MPI_INT, 0, tag1, comm, &status1); /* now unpack */ num_types = send_info[0]; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); for (i=1; i<= num_types; i++) { used_procs[i-1] = (HYPRE_Int)send_info[i]; } for (i=num_types+1; i< count; i++) { new_vec_starts[i-num_types-1] = send_info[i] ; } } else /* clean up and exit */ { hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); return NULL; } } else /* my_id ==0 */ { num_types = send_proc_obj.length; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); new_vec_starts = hypre_CTAlloc(HYPRE_Int, num_types+1, HYPRE_MEMORY_HOST); new_vec_starts[0] = 0; for (i=0; i< num_types; i++) { used_procs[i] = send_proc_obj.id[i]; new_vec_starts[i+1] = send_proc_obj.elements[i]+1; } hypre_qsort0(used_procs, 0, num_types-1); hypre_qsort0(new_vec_starts, 0, num_types); /*now we need to put into an array to send */ count = 2*num_types+2; send_info = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST); send_info[0] = num_types; for (i=1; i<= num_types; i++) { send_info[i] = (HYPRE_Int)used_procs[i-1]; } for (i=num_types+1; i< count; i++) { send_info[i] = new_vec_starts[i-num_types-1]; } requests = hypre_CTAlloc(hypre_MPI_Request, num_types, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_types, HYPRE_MEMORY_HOST); /* don't send to myself - these are sorted so my id would be first*/ start = 0; if (used_procs[0] == 0) { start = 1; } for (i=start; i < num_types; i++) { hypre_MPI_Isend(send_info, count, HYPRE_MPI_INT, used_procs[i], tag1, comm, &requests[i-start]); } hypre_MPI_Waitall(num_types-start, requests, status); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); } /* clean up */ hypre_TFree(send_proc_obj.vec_starts, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.id, HYPRE_MEMORY_HOST); hypre_TFree(send_proc_obj.elements, HYPRE_MEMORY_HOST); hypre_TFree(send_info, HYPRE_MEMORY_HOST); if(response_recv_buf) hypre_TFree(response_recv_buf, HYPRE_MEMORY_HOST); if(response_recv_buf_starts) hypre_TFree(response_recv_buf_starts, HYPRE_MEMORY_HOST); /* now proc 0 can exit if it has no rows */ if (!local_size) { hypre_TFree(used_procs, HYPRE_MEMORY_HOST); hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); return NULL; } /* everyone left has rows and knows: new_vec_starts, num_types, and used_procs */ /* this vector should be rather small */ local_data = hypre_VectorData(local_vector); vector = hypre_SeqVectorCreate((HYPRE_Int)global_size); hypre_VectorNumVectors(vector) = num_vectors; hypre_SeqVectorInitialize(vector); vector_data = hypre_VectorData(vector); num_requests = 2*num_types; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* initialize data exchange among used_procs and generate vector - here we send to ourself also*/ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(new_vec_starts[i+1] - new_vec_starts[i]); hypre_MPI_Irecv(&vector_data[(HYPRE_Int)new_vec_starts[i]], num_vectors*vec_len, HYPRE_MPI_COMPLEX, proc_id, tag2, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_data, num_vectors*local_size, HYPRE_MPI_COMPLEX, used_procs[i], tag2, comm, &requests[j++]); } hypre_MPI_Waitall(num_requests, requests, status); if (num_requests) { hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); hypre_TFree(used_procs, HYPRE_MEMORY_HOST); } hypre_TFree(new_vec_starts, HYPRE_MEMORY_HOST); #else local_size = (HYPRE_Int)(vec_starts[my_id+1] - vec_starts[my_id]); /* if my_id contains no data, return NULL */ if (!local_size) return NULL; local_data = hypre_VectorData(local_vector); vector = hypre_SeqVectorCreate(global_size); hypre_VectorNumVectors(vector) = num_vectors; hypre_SeqVectorInitialize(vector); vector_data = hypre_VectorData(vector); /* determine procs which hold data of par_v and store ids in used_procs */ num_types = -1; for (i=0; i < num_procs; i++) if (vec_starts[i+1]-vec_starts[i]) num_types++; num_requests = 2*num_types; used_procs = hypre_CTAlloc(HYPRE_Int, num_types, HYPRE_MEMORY_HOST); j = 0; for (i=0; i < num_procs; i++) if (vec_starts[i+1]-vec_starts[i] && i-my_id) used_procs[j++] = i; requests = hypre_CTAlloc(hypre_MPI_Request, num_requests, HYPRE_MEMORY_HOST); status = hypre_CTAlloc(hypre_MPI_Status, num_requests, HYPRE_MEMORY_HOST); /* initialize data exchange among used_procs and generate vector */ j = 0; for (i = 0; i < num_types; i++) { proc_id = used_procs[i]; vec_len = (HYPRE_Int)(vec_starts[proc_id+1] - vec_starts[proc_id]); hypre_MPI_Irecv(&vector_data[vec_starts[proc_id]], num_vectors*vec_len, HYPRE_MPI_COMPLEX, proc_id, 0, comm, &requests[j++]); } for (i = 0; i < num_types; i++) { hypre_MPI_Isend(local_data, num_vectors*local_size, HYPRE_MPI_COMPLEX, used_procs[i], 0, comm, &requests[j++]); } for (i=0; i < num_vectors*local_size; i++) vector_data[vec_starts[my_id]+i] = local_data[i]; hypre_MPI_Waitall(num_requests, requests, status); if (num_requests) { hypre_TFree(used_procs, HYPRE_MEMORY_HOST); hypre_TFree(requests, HYPRE_MEMORY_HOST); hypre_TFree(status, HYPRE_MEMORY_HOST); } #endif return vector; } /*-------------------------------------------------------------------------- * hypre_ParVectorPrintIJ *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorPrintIJ( hypre_ParVector *vector, HYPRE_Int base_j, const char *filename ) { MPI_Comm comm; HYPRE_BigInt global_size, j; HYPRE_BigInt *partitioning; HYPRE_Complex *local_data; HYPRE_Int myid, num_procs, i, part0; char new_filename[255]; FILE *file; if (!vector) { hypre_error_in_arg(1); return hypre_error_flag; } comm = hypre_ParVectorComm(vector); global_size = hypre_ParVectorGlobalSize(vector); partitioning = hypre_ParVectorPartitioning(vector); /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(vector) == 1 ); if ( hypre_ParVectorNumVectors(vector) != 1 ) hypre_error_in_arg(1); hypre_MPI_Comm_rank(comm, &myid); hypre_MPI_Comm_size(comm, &num_procs); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "w")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } local_data = hypre_VectorData(hypre_ParVectorLocalVector(vector)); hypre_fprintf(file, "%b \n", global_size); #ifdef HYPRE_NO_GLOBAL_PARTITION for (i=0; i < 2; i++) { hypre_fprintf(file, "%b ", partitioning[i] + base_j); } #else for (i=0; i <= num_procs; i++) { hypre_fprintf(file, "%b ", partitioning[i] + base_j); } #endif hypre_fprintf(file, "\n"); #ifdef HYPRE_NO_GLOBAL_PARTITION part0 = partitioning[0]; for (j = part0; j < partitioning[1]; j++) { hypre_fprintf(file, "%b %.14e\n", j + base_j, local_data[(HYPRE_Int)(j-part0)]); } #else part0 = partitioning[myid]; for (j = part0; j < partitioning[myid+1]; j++) { hypre_fprintf(file, "%b %.14e\n", j + base_j, local_data[(HYPRE_Int)(j-part0)]); } #endif fclose(file); return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_ParVectorReadIJ * Warning: wrong base for assumed partition if base > 0 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_ParVectorReadIJ( MPI_Comm comm, const char *filename, HYPRE_Int *base_j_ptr, hypre_ParVector **vector_ptr ) { HYPRE_BigInt global_size, J; hypre_ParVector *vector; hypre_Vector *local_vector; HYPRE_Complex *local_data; HYPRE_BigInt *partitioning; HYPRE_Int base_j; HYPRE_Int myid, num_procs, i, j; char new_filename[255]; FILE *file; hypre_MPI_Comm_size(comm, &num_procs); hypre_MPI_Comm_rank(comm, &myid); hypre_sprintf(new_filename,"%s.%05d", filename, myid); if ((file = fopen(new_filename, "r")) == NULL) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Error: can't open output file %s\n"); return hypre_error_flag; } hypre_fscanf(file, "%b", &global_size); #ifdef HYPRE_NO_GLOBAL_PARTITION /* this may need to be changed so that the base is available in the file! */ partitioning = hypre_CTAlloc(HYPRE_BigInt, 2, HYPRE_MEMORY_HOST); hypre_fscanf(file, "%b", partitioning); for (i = 0; i < 2; i++) { hypre_fscanf(file, "%b", partitioning+i); } /* This is not yet implemented correctly! */ base_j = 0; #else partitioning = hypre_CTAlloc(HYPRE_BigInt, num_procs+1, HYPRE_MEMORY_HOST); hypre_fscanf(file, "%b", partitioning); for (i = 1; i <= num_procs; i++) { hypre_fscanf(file, "%b", partitioning+i); partitioning[i] -= partitioning[0]; } base_j = (HYPRE_Int)partitioning[0]; partitioning[0] = 0; #endif vector = hypre_ParVectorCreate(comm, global_size, partitioning); hypre_ParVectorInitialize(vector); local_vector = hypre_ParVectorLocalVector(vector); local_data = hypre_VectorData(local_vector); #ifdef HYPRE_NO_GLOBAL_PARTITION for (j = 0; j < (HYPRE_Int)(partitioning[1] - partitioning[0]); j++) { hypre_fscanf(file, "%b %le", &J, local_data + j); } #else for (j = 0; j < (HYPRE_Int)(partitioning[myid+1] - partitioning[myid]); j++) { hypre_fscanf(file, "%b %le", &J, local_data + j); } #endif fclose(file); *base_j_ptr = base_j; *vector_ptr = vector; /* multivector code not written yet */ hypre_assert( hypre_ParVectorNumVectors(vector) == 1 ); if ( hypre_ParVectorNumVectors(vector) != 1 ) hypre_error(HYPRE_ERROR_GENERIC); return hypre_error_flag; } /*-------------------------------------------------------------------- * hypre_FillResponseParToVectorAll * Fill response function for determining the send processors * data exchange *--------------------------------------------------------------------*/ HYPRE_Int hypre_FillResponseParToVectorAll( void *p_recv_contact_buf, HYPRE_Int contact_size, HYPRE_Int contact_proc, void *ro, MPI_Comm comm, void **p_send_response_buf, HYPRE_Int *response_message_size ) { HYPRE_Int myid; HYPRE_Int i, index, count, elength; HYPRE_BigInt *recv_contact_buf = (HYPRE_BigInt * ) p_recv_contact_buf; hypre_DataExchangeResponse *response_obj = (hypre_DataExchangeResponse*)ro; hypre_ProcListElements *send_proc_obj = (hypre_ProcListElements*)response_obj->data2; hypre_MPI_Comm_rank(comm, &myid ); /*check to see if we need to allocate more space in send_proc_obj for ids*/ if (send_proc_obj->length == send_proc_obj->storage_length) { send_proc_obj->storage_length +=10; /*add space for 10 more processors*/ send_proc_obj->id = hypre_TReAlloc(send_proc_obj->id, HYPRE_Int, send_proc_obj->storage_length, HYPRE_MEMORY_HOST); send_proc_obj->vec_starts = hypre_TReAlloc(send_proc_obj->vec_starts, HYPRE_Int, send_proc_obj->storage_length + 1, HYPRE_MEMORY_HOST); } /*initialize*/ count = send_proc_obj->length; index = send_proc_obj->vec_starts[count]; /*this is the number of elements*/ /*send proc*/ send_proc_obj->id[count] = contact_proc; /*do we need more storage for the elements?*/ if (send_proc_obj->element_storage_length < index + contact_size) { elength = hypre_max(contact_size, 10); elength += index; send_proc_obj->elements = hypre_TReAlloc(send_proc_obj->elements, HYPRE_BigInt, elength, HYPRE_MEMORY_HOST); send_proc_obj->element_storage_length = elength; } /*populate send_proc_obj*/ for (i=0; i< contact_size; i++) { send_proc_obj->elements[index++] = recv_contact_buf[i]; } send_proc_obj->vec_starts[count+1] = index; send_proc_obj->length++; /*output - no message to return (confirmation) */ *response_message_size = 0; return hypre_error_flag; } /* ----------------------------------------------------------------------------- * return the sum of all local elements of the vector * ----------------------------------------------------------------------------- */ HYPRE_Complex hypre_ParVectorLocalSumElts( hypre_ParVector * vector ) { return hypre_SeqVectorSumElts( hypre_ParVectorLocalVector(vector) ); } /* #ifdef HYPRE_USING_UNIFIED_MEMORY hypre_int hypre_ParVectorIsManaged(hypre_ParVector *vector){ if (vector==NULL) return 1; return hypre_SeqVectorIsManaged(hypre_ParVectorLocalVector(vector)); } #endif */ HYPRE_Int hypre_ParVectorGetValues(hypre_ParVector *vector, HYPRE_Int num_values, HYPRE_BigInt *indices, HYPRE_Complex *values) { HYPRE_Int i, j; HYPRE_BigInt first_index, last_index, index; hypre_Vector *local_vector; HYPRE_Complex *data; first_index = hypre_ParVectorFirstIndex(vector); last_index = hypre_ParVectorLastIndex(vector); local_vector = hypre_ParVectorLocalVector(vector); data = hypre_VectorData(local_vector); if (hypre_VectorOwnsData(local_vector) == 0) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Vector does not own data! -- hypre_ParVectorGetValues."); return hypre_error_flag; } if (indices) { for (i=0; i < num_values; i++) { index = indices[i]; if (index < first_index || index > last_index) { hypre_error_w_msg(HYPRE_ERROR_GENERIC,"Index out of range! -- hypre_ParVectorGetValues."); return hypre_error_flag; } } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i,j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_values; j++) { i = (HYPRE_Int)(indices[j] - first_index); values[j] = data[i]; } } else { if (num_values > hypre_VectorSize(local_vector)) { hypre_error_in_arg(2); return hypre_error_flag; } #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE #endif for (j = 0; j < num_values; j++) values[j] = data[j]; } return hypre_error_flag; }
GB_unaryop__lnot_bool_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_int32 // op(A') function: GB_tran__lnot_bool_int32 // C type: bool // A type: int32_t // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, aij) \ bool z = (bool) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_int32 ( bool *Cx, // Cx and Ax may be aliased int32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
OpenMPWrapper.h
#ifndef PICMDK_OPENMPWRAPPER_H #define PICMDK_OPENMPWRAPPER_H /* This file should be included everywhere instead of <omp.h>. If the code is build with OpenMP it just includes <omp.h>, otherwise it provides wrappers for OpenMP functions and #pragma omp. The code including this file can use OpenMP functions and pragmas independently of whether it will be build with OpenMP or not. */ #ifdef _OPENMP #include <omp.h> namespace picmdk { namespace utility { inline bool useOpenMP() { return true; } inline int getNumThreads() { int numThreads = 0; #pragma omp parallel { #pragma omp master numThreads = omp_get_num_threads(); } return numThreads; } } // namespace picmdk::utility } // namespace picmdk #else typedef void* omp_lock_t; inline int omp_get_max_threads() { return 1; } inline int omp_get_thread_num() { return 0; } inline int omp_get_num_threads() { return 1; } inline void omp_init_lock(omp_lock_t *) {} inline void omp_set_lock(omp_lock_t *) {} inline void omp_unset_lock(omp_lock_t *) {} inline void omp_destroy_lock(omp_lock_t *) {} #pragma omp namespace picmdk { namespace utility { inline bool useOpenMP() { return false; } inline int getNumThreads() { return 1; } } // namespace picmdk::utility } // namespace picmdk #endif #ifdef _MSC_VER #define collapse(N) #endif #endif
GB_unaryop__identity_int8_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_int8_uint32 // op(A') function: GB_tran__identity_int8_uint32 // C type: int8_t // A type: uint32_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ int8_t z = (int8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_int8_uint32 ( int8_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_int8_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,2);t1++) { lbp=max(ceild(t1,2),ceild(4*t1-Nt+3,4)); ubp=min(floord(Nt+Nz-4,4),floord(2*t1+Nz-1,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-7,8)),ceild(4*t2-Nz-12,16));t3<=min(min(min(floord(4*t2+Ny,16),floord(Nt+Ny-4,16)),floord(2*t1+Ny+1,16)),floord(4*t1-4*t2+Nz+Ny-1,16));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(4*t2-Nz-124,128)),ceild(16*t3-Ny-124,128));t4<=min(min(min(min(floord(4*t2+Nx,128),floord(Nt+Nx-4,128)),floord(2*t1+Nx+1,128)),floord(16*t3+Nx+12,128)),floord(4*t1-4*t2+Nz+Nx-1,128));t4++) { for (t5=max(max(max(max(max(0,2*t1),4*t1-4*t2+1),4*t2-Nz+2),16*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,2*t1+3),4*t2+2),16*t3+14),128*t4+126),4*t1-4*t2+Nz+1);t5++) { for (t6=max(max(4*t2,t5+1),-4*t1+4*t2+2*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
maiorelemento.c
// compilar: make // executar: make run tam=100 // // original por: Profº. Dr. Paulo Sérgio Lopes de Souza // // modificado por: // Gustavo T. Mastrobuono NUSP 10734411, Henrique de S. Q. dos Santos NUSP 10819029, // Jhordan P. V. Pesantes NUSP 11733353, Witor M. A. de Oliveira NUSP 10692190 e Yorvin A. R. Carrion NUSP 11733332 #include <omp.h> #include <stdio.h> #include <stdlib.h> #define T 4 // macro para definir o numero de threads utilizadas int main(int argc, char **argv) { double wtime; // variável para controlar o tempo de busca int *vetor, i, maior_global = 0, maior_local = 0, tam; if (argc != 2) { printf("Wrong arguments. Please use binary <amount_of_elements>\n"); exit(0); } tam = atoi(argv[1]); printf("Amount of vetor=%d\n", tam); fflush(0); vetor = (int *)malloc(tam * sizeof(int)); // Aloca o vetor da dimensão lida int iteracoes = 0; // variavel para controlar o numero de iteracoes de cada thread int thread_num = 0, nthreads = 0, resto = 0; iteracoes = tam / T; // limite de iterações de cada laço "for resto = tam % T; // resto da divisao para quando tam/T não for exata omp_lock_t lock; omp_init_lock(&lock); vetor[tam / 2] = tam; // atribui o maior valor ao meio do vetor if(resto == 0){ wtime = omp_get_wtime(); // Salva o tempo de início #pragma omp parallel num_threads(T) private(i, maior_local) shared(maior_global) { // Região de preenchimento do vetor for (i = omp_get_thread_num() * iteracoes; i < iteracoes + omp_get_thread_num() * iteracoes; i++) { if(i == (tam/2)) continue; else vetor[i] = 1; } // Fim da região #pragma omp barrier // Sincronização das threads: esperamos todas as threads terminarem o preenchimento do vetor // Região de busca pelo maior elemento for (i = omp_get_thread_num() * iteracoes; i < iteracoes + omp_get_thread_num() * iteracoes; i++) { if(vetor[i] > maior_local) maior_local=vetor[i]; } // região crítica da operação, deve ser protegida pra não sobrescrever o valor do "maior" global omp_set_lock(&lock); if (maior_local > maior_global) maior_global = maior_local; omp_unset_lock(&lock); // fim da região crítica } wtime = omp_get_wtime() - wtime; // salva o tempo de término // Fim da região de busca } else { wtime = omp_get_wtime(); #pragma omp parallel num_threads(T) private(i, maior_local) shared(maior_global) { if(omp_get_thread_num() == 0){ for(i = 0; i < (tam/2); i++){ if(i == (tam/2)) continue; else vetor[i] = 1; } } else{ for(i = (tam/2)+1; i < tam; i++){ if(i == (tam/2)) continue; else vetor[i] = 1; } } #pragma omp barrier // estamos esperando todas as threads terminarem de atribuir os valores ao vetor if(omp_get_thread_num() == 0){ for (i = 0; i < (tam/2); i++) { if(vetor[i] > maior_local) maior_local=vetor[i]; } omp_set_lock(&lock); // região crítica da operação, deve ser protegida pra não sobrescrever o valor do "maior" global if (maior_local > maior_global) maior_global = maior_local; omp_unset_lock(&lock); } else{ for (i = (tam/2); i < tam; i++) { if(vetor[i] > maior_local) maior_local=vetor[i]; } omp_set_lock(&lock); // região crítica da operação, deve ser protegida pra não sobrescrever o valor do "maior" global if (maior_local > maior_global) maior_global = maior_local; omp_unset_lock(&lock); } } wtime = omp_get_wtime() - wtime; } printf("PAR REDUCTION: Tam=%d, maior=%d, Elapsed wall clock time = %f \n", tam, maior_global, wtime); //Imprime o vetor ordenado free(vetor); //Desaloca o vetor lido return 0; }
reduction-clause-Modificado.c
/* $ gcc -fopenmp -O2 reduction-clause.c -o reduction-clause */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main(int argc, char **argv) { int i, n=20, a[n],suma=10; if(argc < 2) { fprintf(stderr,"Falta iteraciones\n"); exit(-1); } n = atoi(argv[1]); if (n>20){ n=20; printf("n=%d",n); } for (i=0; i<n; i++) a[i] = i; #pragma omp parallel for reduction(+:suma) for (i=0; i<n; i++) suma += a[i]; printf("Tras 'parallel' suma=%d\n",suma); }
sip_fmt_plug.c
/* SIP cracker patch for JtR. Hacked together during March of 2012 by * Dhiru Kholia <dhiru.kholia at gmail.com> . * * Copyright (C) 2007 Martin J. Muench <mjm@codito.de> * SIP digest authentication password (hash) cracker * See doc/SIPcrack-LICENSE */ #if FMT_EXTERNS_H extern struct fmt_main fmt_sip; #elif FMT_REGISTERS_H john_register_one(&fmt_sip); #else #include "md5.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <errno.h> #include "arch.h" #include "crc32.h" #include "misc.h" #include "common.h" #include "formats.h" #include "params.h" #include "options.h" #include "sip_fmt_plug.h" #ifdef _OPENMP static int omp_t = 1; #include <omp.h> // Tuned on core i7 quad HT // 1 4963K // 16 8486K // 32 8730K ** this was chosen. // 64 8791k // 128 8908k #ifndef OMP_SCALE #define OMP_SCALE 32 #endif #endif #include "memdbg.h" typedef struct sip_salt_t { int static_hash_data_len; MD5_CTX ctx_dyna_data; char static_hash_data[STATIC_HASH_SIZE+1]; } sip_salt; static sip_salt *pSalt; #define FORMAT_LABEL "SIP" #define FORMAT_NAME "" #define FORMAT_TAG "$sip$*" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 32 #define BINARY_SIZE 16 #define SALT_SIZE sizeof(sip_salt) #define BINARY_ALIGN 4 #define SALT_ALIGN sizeof(int) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 64 static struct fmt_tests sip_tests[] = { {"$sip$*192.168.1.111*192.168.1.104*200*asterisk*REGISTER*sip*192.168.1.104**46cce857****MD5*4dfc7515936a667565228dbaa0293dfc", "123456"}, {"$sip$*10.0.1.20*10.0.1.10*1001*asterisk*REGISTER*sips*10.0.1.20*5061*0ef95b07****MD5*576e39e9de6a9ed053eb218f65fe470e", "q1XCLF0KaBObo797"}, // generated with pass_gen.pl {"$sip$*192.168.163.238*192.168.163.239*50894*asterisk*REGISTER*sip*192.168.163.239**303535c9****MD5*e32c95d6ad0fecbc3967b7534d7b5b3b", "123456"}, {"$sip$*192.168.196.105*192.168.196.192*81670*asterisk*REGISTER*sip*192.168.196.192**747f072a****MD5*d15c84b1bdc2155db12b721d7fb9445b", "password"}, {"$sip$*192.168.119.6*192.168.119.154*65790*asterisk*REGISTER*sip*192.168.119.154**8d4e1a4b****MD5*dcc0d8a4c105dbf3ecf5b281f4c57356", "happy123"}, {"$sip$*192.168.113.63*192.168.113.78*59810*asterisk*REGISTER*sip*192.168.113.78**b778256e****MD5*cb13933a5986df471265231d08206509", "aobdataeteag"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_key)[BINARY_SIZE/sizeof(uint32_t)]; static char bin2hex_table[256][2]; /* table for bin<->hex mapping */ static void init(struct fmt_main *self) { #ifdef _OPENMP omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif /* Init bin 2 hex table for faster conversions later */ init_bin2hex(bin2hex_table); saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *p = ciphertext, *q; int i,res = 0; if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)) return 0; if (strlen(ciphertext) > 2048) // sizeof(saltBuf) in get_salt return 0; for (i = 0; i < strlen(ciphertext); i++) if (ciphertext[i] == '*') res++; if (res != 14) goto err; res = 0; p += FORMAT_TAG_LEN; if ((q = strchr(p, '*')) == NULL) goto err; if ((q - p) > HOST_MAXLEN) /* host */ goto err; p = q + 1; if ((q = strchr(p, '*')) == NULL) goto err; if ((q - p) > HOST_MAXLEN) /* host */ goto err; p = q + 1; if ((q = strchr(p, '*')) == NULL) goto err; if ((q - p) > USER_MAXLEN) /* user */ goto err; p = q + 1; if ((q = strchr(p, '*')) == NULL) goto err; if ((q - p) > HOST_MAXLEN) /* realm */ goto err; p = q + 1; if ((q = strchr(p, '*')) == NULL) goto err; if ((q - p) > METHOD_MAXLEN) /* method */ goto err; p = q + 1; /* uri stuff */ if ((q = strchr(p, '*')) == NULL) goto err; res += q - p; p = q + 1; if ((q = strchr(p, '*')) == NULL) goto err; res += q - p; p = q + 1; if ((q = strchr(p, '*')) == NULL) goto err; res += q - p; if (res > URI_MAXLEN) /* uri */ goto err; p = q + 1; if ((q = strchr(p, '*')) == NULL) goto err; if ((q - p) > NONCE_MAXLEN) /* nonce */ goto err; p = q + 1; if ((q = strchr(p, '*')) == NULL) goto err; if ((q - p) > NONCE_MAXLEN) /* cnonce */ goto err; p = q + 1; if ((q = strchr(p, '*')) == NULL) goto err; if ((q - p) > CNONCE_MAXLEN) /* nonce_count */ goto err; p = q + 1; if ((q = strchr(p, '*')) == NULL) goto err; if ((q - p) > QOP_MAXLEN) /* qop */ goto err; if ((q = strchr(p, '*')) == NULL) goto err; if ((q - p) > ALG_MAXLEN) /* algorithm */ goto err; p = q + 1; if ((q = strchr(p, '*')) == NULL) goto err; if (strncmp("MD5*", p, 4)) goto err; p = q + 1; if (strspn(p, HEXCHARS_lc) != MD5_LEN_HEX) /* hash */ goto err; return 1; err: return 0; } static void *get_salt(char *ciphertext) { static sip_salt salt; char saltBuf[2048]; char *lines[16]; login_t login; int num_lines; MD5_CTX md5_ctx; unsigned char md5_bin_hash[MD5_LEN]; char static_hash[MD5_LEN_HEX+1]; char *saltcopy = saltBuf; memset(&salt, 0, sizeof(salt)); strcpy(saltBuf, ciphertext); saltcopy += FORMAT_TAG_LEN; /* skip over "$sip$*" */ memset(&login, 0, sizeof(login_t)); num_lines = stringtoarray(lines, saltcopy, '*'); assert(num_lines == 14); strncpy(login.server, lines[0], sizeof(login.server) - 1 ); strncpy(login.client, lines[1], sizeof(login.client) - 1 ); strncpy(login.user, lines[2], sizeof(login.user) - 1 ); strncpy(login.realm, lines[3], sizeof(login.realm) - 1 ); strncpy(login.method, lines[4], sizeof(login.method) - 1 ); /* special handling for uri */ if (!strcmp(lines[7], "")) sprintf(login.uri, "%s:%s", lines[5], lines[6]); else sprintf(login.uri, "%s:%s:%s", lines[5], lines[6], lines[7]); strncpy(login.nonce, lines[8], sizeof(login.nonce) - 1 ); strncpy(login.cnonce, lines[9], sizeof(login.cnonce) - 1 ); strncpy(login.nonce_count, lines[10], sizeof(login.nonce_count) - 1 ); strncpy(login.qop, lines[11], sizeof(login.qop) - 1 ); strncpy(login.algorithm, lines[12], sizeof(login.algorithm) - 1 ); strncpy(login.hash, lines[13], sizeof(login.hash) - 1 ); if (strncmp(login.algorithm, "MD5", strlen(login.algorithm))) { printf("\n* Cannot crack '%s' hash, only MD5 supported so far...\n", login.algorithm); error(); } /* Generating MD5 static hash: 'METHOD:URI' */ MD5_Init(&md5_ctx); MD5_Update(&md5_ctx, (unsigned char*)login.method, strlen( login.method )); MD5_Update(&md5_ctx, (unsigned char*)":", 1); MD5_Update(&md5_ctx, (unsigned char*)login.uri, strlen( login.uri )); MD5_Final(md5_bin_hash, &md5_ctx); bin_to_hex(bin2hex_table, md5_bin_hash, MD5_LEN, static_hash, MD5_LEN_HEX); /* Constructing first part of dynamic hash: 'USER:REALM:' */ MD5_Init(&md5_ctx); MD5_Update(&md5_ctx, login.user, strlen(login.user)); MD5_Update(&md5_ctx, ":", 1); MD5_Update(&md5_ctx, login.realm, strlen(login.realm)); MD5_Update(&md5_ctx, ":", 1); memcpy(&(salt.ctx_dyna_data), &md5_ctx, sizeof(md5_ctx)); // we now construct the MD5_CTX with this data loaded. Thus we no longer store this buffer. //snprintf(salt.dynamic_hash_data, DYNAMIC_HASH_SIZE, "%s:%s:", login.user, login.realm); //salt.dynamic_hash_data_len = strlen(salt.dynamic_hash_data); /* Construct last part of final hash data: ':NONCE(:CNONCE:NONCE_COUNT:QOP):<static_hash>' */ /* no qop */ if (!strlen(login.qop)) snprintf(salt.static_hash_data, STATIC_HASH_SIZE, ":%s:%s", login.nonce, static_hash); /* qop/conce/cnonce_count */ else snprintf(salt.static_hash_data, STATIC_HASH_SIZE, ":%s:%s:%s:%s:%s", login.nonce, login.nonce_count, login.cnonce, login.qop, static_hash); /* Get lens of static buffers */ salt.static_hash_data_len = strlen(salt.static_hash_data); /* Begin brute force attack */ #ifdef SIP_DEBUG printf("Starting bruteforce against user '%s' (%s: '%s')\n", login.user, login.algorithm, login.hash); #endif return &salt; } static void set_salt(void *salt) { pSalt = (sip_salt*)salt; } static void * get_binary(char *ciphertext) { static char *bin_val; char *p; int i; if (!bin_val) bin_val = (char*)mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD); p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; ++i) { bin_val[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return (void *)bin_val; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { /* password */ MD5_CTX md5_ctx; unsigned char md5_bin_hash[MD5_LEN]; char dynamic_hash[MD5_LEN_HEX+1]; /* Generate dynamic hash including pw (see above) */ //MD5_Init(&md5_ctx); //MD5_Update(&md5_ctx, (unsigned char*)pSalt->dynamic_hash_data, pSalt->dynamic_hash_data_len); // salt.ctx_dyna_data contains the ctx already loaded. memcpy(&md5_ctx, &(pSalt->ctx_dyna_data), sizeof(md5_ctx)); MD5_Update(&md5_ctx, (unsigned char*)saved_key[index], strlen(saved_key[index])); MD5_Final(md5_bin_hash, &md5_ctx); bin_to_hex(bin2hex_table, md5_bin_hash, MD5_LEN, dynamic_hash, MD5_LEN_HEX); /* Generate digest response hash */ MD5_Init(&md5_ctx); MD5_Update(&md5_ctx, (unsigned char*)dynamic_hash, MD5_LEN_HEX); MD5_Update(&md5_ctx, (unsigned char*)pSalt->static_hash_data, pSalt->static_hash_data_len); MD5_Final((unsigned char*)crypt_key[index], &md5_ctx); } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) if ( ((uint32_t*)binary)[0] == ((uint32_t*)&(crypt_key[index][0]))[0] ) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_key[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void sip_set_key(char *key, int index) { strnzcpy(saved_key[index], key, sizeof(*saved_key)); } static char *get_key(int index) { return saved_key[index]; } struct fmt_main fmt_sip = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, sip_tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, sip_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
floyd_warshall.c
// Implementation based on // http://www.cs.virginia.edu/~pact2006/program/pact2006/pact139_han4.pdf // mixed with BLIS macro kernel. // New features: // - Only lower triangle of blocks is considered. // - Next diagonal block is scheduled as soon as possible, since it is the slowest (real F-W algorithm) // - Last diagonal block is generally smaller to handle arbitrary size matrices. #include <omp.h> #include <stdbool.h> #include <stdio.h> // tiles #define NL1 128 // subtiles //#define NL2 32 // BLIS macro kernel uses large tiles #define MC NL1 #define NC NL1 #define KC NL1 // generalized inner product // new reduction operation #define min(x,y) (((x)<(y)) ? (x) : (y)) #define asmmin minpd // zero element of the semiring // e.g. infinity for min and plus/times // 0 for plus and times #define MAX 1000 // unit element for reduction #define UNIT 1.0 // new pointwise operation #define add(x,y) ((x)*(y)) #define asmadd mulpd // redefine these to get other generalized inner products // for assembly, provide SSE instructions // avx versions (prefixed with v) are automatically created in AVX kernel // right now only AVX kernel #include "kernel_BLIS_avx.c" // diagonal block (shared) static double _diag[NL1*NL1] __attribute__ ((aligned (32))); // next diagonal block static double _diag2[NL1*NL1] __attribute__ ((aligned (32))); // horizontal blocks, also used by macro kernel static double _A[NL1*NL1] __attribute__ ((aligned (32))); // vertical blocks, also used by macro kernel static double _B[NL1*NL1] __attribute__ ((aligned (32))); // temporary matrix for macro kernel static double _C[MR*NR] __attribute__ ((aligned (32))); #pragma omp threadprivate(_A, _B, _C) // BLIS macro kernel #include "macro.c" /* static void show(const double *M, int rows, int cols, int n) { int i, j; for (i=0; i<rows;i++) { for (j=0; j<cols;j++) printf("%.2f ", M[i*n+j]); printf("\n"); } printf("\n"); } */ // // versions of Floyd-Warshall on up to three matrices // static void diagonal(double *diag, int n) { // Update a diagonal block // diag += diag*diag // FIXME update lower triangle, then copy results int i, j, k; for (k=0;k<n;k++) for (i=0;i<n;i++) { double d = diag[i*n+k]; for (j=0;j<n;j++) { double s = add(d, diag[k*n+j]); diag[i*n+j] = min(diag[i*n+j], s); } } } /* static void fwCc(double *C, int n) { // n = 32 ?? // 8 registers for C[k,:] - read only // vbroadcastsd C[i*n+k] - 9th // two at a time: // - load C[i, j] (13, 14) // - add (11, 12) // - min (13, 14) // - store int i, j, k; for (k=0;k<n;k++) for (i=0;i<n;i++) { double c = C[i*n+k]; for (j=0;j<n;j++) C[i*n+j] = min(C[i*n+j], add(c, C[k*n+j])); } } */ static void horizontal(const double *diag, double *horz, int rows, int n) { // Update a block from the same row as the current diagonal block // horz += diag*horz // The horizontal block may have small number of rows, so the diag is also smaller int i, j, k; for (k=0;k<rows;k++) for (i=0;i<rows;i++) { double d = diag[i*rows+k]; for (j=0;j<n;j++) { double s = add(d, horz[k*n+j]); horz[i*n+j] = min(horz[i*n+j], s); } } } static void vertical(const double *diag, double *vert, int rows, int n) { // Update a block from the same column as the current diagonal block // vert += vert*diag // The vertical block may have small number of rows int i, j, k; for (k=0;k<n;k++) for (i=0;i<rows;i++) { double v = vert[i*n+k]; for (j=0;j<n;j++) { double s = add(v, diag[k*n+j]); vert[i*n+j] = min(vert[i*n+j], s); } } } /* // C <- min(C, A+B) - tropical matrix multiplication // B packed by columns void fwABC(const double *A, const double *B, double *C, int n, int full_n) { for (int i=0;i<n;i++) for (int j=0;j<n;j++) { double c = C[i*full_n+j]; for (int k=0;k<n;k++) if (c > A[i*n+k]+B[j*n+k]) c = A[i*n+k]+B[j*n+k]; C[i*full_n+j] = c; } } */ // move a block into contiguous memory static void pack(double* _M, const double* M, int rows, int cols, int n) { int i, j; for (i=0;i<rows;i++) for (j=0;j<cols;j++) _M[i*cols+j] = M[i*n+j]; } /* // move a block into contiguous memory column first void pack_col(double* _M,const double* M, int n) { for (int j=0;j<NL1;j++) for (int i=0;i<NL1;i++) _M[j*NL1+i] = M[i*n+j]; } */ // move a block back to its place static void unpack(const double* _M, double* M, int rows, int cols, int n) { int i, j; for (i=0;i<rows;i++) for (j=0;j<cols;j++) M[i*n+j] = _M[i*cols+j]; } void fw(double *d, const int n) { const int m = (n+NL1-1)/NL1; // number of blocks // The last diagonal block might be smaller // It has this many rows: const int small = n % NL1; // If small > 0 we may be dealing with the last, smaller block #ifdef NUMCORE int numcore = NUMCORE; #else int numcore = omp_get_max_threads(); #endif // first diagonal block int cols, rows, reduction; // It might be small if the whole matrix is small rows = n < NL1 ? small : NL1; pack(_diag, d, rows, rows, n); diagonal(_diag, rows); unpack(_diag, d, rows, rows, n); if (n < NL1) return; int i, j, k; for (k=0;k<m;k++) { // diagonal block already in _diag #pragma omp parallel default(none) shared(_diag, _diag2, k, d) private(i, j, rows, cols, reduction) num_threads(numcore) #pragma omp single { // (k, k+1), (k+1, k) and (k+1, k+1) - new diagonal as soon as possible if (k+1<m) { // FIXME // k+1 -> always vertical // diagonal based on vertical and transpose of vertical // these two could be run by one thread ensuring diagonal is done quickly // // We might be working on the last, small row rows = ((k+1 < m-1) || (small == 0)) ? NL1 : small; #pragma omp task depend(inout:d[((k+1)*n+k)*NL1]) { // vertical in _A, diagonal already in _diag pack(_A, &d[((k+1)*n+k)*NL1], rows, NL1, n); vertical(_diag, _A, rows, NL1); unpack(_A, &d[((k+1)*n+k)*NL1], rows, NL1, n); } // now update (k+1, k+1) and run the next diagonal element #pragma omp task depend(in:d[((k+1)*n+k)*NL1]) { pack_A(rows, NL1, &d[((k+1)*n+k)*NL1], n, 1, _A); pack_B(NL1, rows, &d[((k+1)*n+k)*NL1], 1, n, _B); // transposed A dgemm_macro_kernel(rows, rows, NL1, &d[((k+1)*n+k+1)*NL1], 1, n); // We can run the next diagonal element // Can't be placed in _diag yet pack(_diag2, &d[((k+1)*n+k+1)*NL1], rows, rows, n); diagonal(_diag2, rows); unpack(_diag2, &d[((k+1)*n+k+1)*NL1], rows, rows, n); } } // j < k -> horizontal // If k==m-1, we might be updating the last smller row rows = ((k < m-1) || (small == 0)) ? NL1 : small; for (j=0;j<k;j++) { #pragma omp task depend(inout:d[(k*n+j)*NL1]) { // horizontal in _B, diagonal already in _diag pack(_B, &d[(k*n+j)*NL1], rows, NL1, n); horizontal(_diag, _B, rows, NL1); unpack(_B, &d[(k*n+j)*NL1], rows, NL1, n); } } // j > k -> vertical (k+1 already done) for (j=k+2;j<m;j++) { // The last vertical might be small. rows = ((j < m-1) || (small == 0)) ? NL1 : small; #pragma omp task depend(inout:d[(j*n+k)*NL1]) { // vertical in _A, diagonal already in _diag pack(_A, &d[(j*n+k)*NL1], rows, NL1, n); vertical(_diag, _A, rows, NL1); unpack(_A, &d[(j*n+k)*NL1], rows, NL1, n); } } for (i=0;i<m;i++) { if (i==k) continue; // only lower triangle with diagonal (j<=i) for (j=0;j<=i;j++) { if (j==k) continue; if ((j==k+1) && (i==k+1)) continue; int indexA = j<k ? (k*n+j)*NL1 : (j*n+k)*NL1; int indexB = i<k ? (k*n+i)*NL1 : (i*n+k)*NL1; // The last row might be small rows = ((i < m-1) || (small == 0)) ? NL1 : small; // The last column might be small cols = ((j < m-1) || (small == 0)) ? NL1 : small; // Large tile can be updated from the last small row reduction = ((k<m-1) || (small == 0)) ? NL1 : small; #pragma omp task depend(in:d[indexA], d[indexB]) { // other blocks // multiplication of transposed matrices to get column order // AVX kernel is slightly faster this way // also make sure to use only lower triangle (by matrix symmetry) // 1, n -> n, 1 in pack_* switches from column to row storage (transposes) // transposed horizontal block if (j<k) pack_A(NL1, reduction, &d[indexA], 1, n, _A); else pack_A(cols, reduction, &d[indexA], n, 1, _A); // transposed vertical block if (i<k) pack_B(reduction, NL1, &d[indexB], n, 1, _B); else pack_B(reduction, rows, &d[indexB], 1, n, _B); dgemm_macro_kernel(cols, rows, reduction, &d[(i*n+j)*NL1], 1, n); } } } } // single for (i=0;i<NL1*NL1;i++) _diag[i] = _diag2[i]; } // copy lower triangle to upper triangle for (i=0;i<n;i++) for (j=0;j<i;j++) d[j*n+i] = d[i*n+j]; }
ParSHUM_SBBD_util.c
#include <math.h> #include <string.h> #include <limits.h> #include "ParSHUM_enum.h" #include "ParSHUM_auxiliary.h" #include "ParSHUM_SBBD_util.h" /* TODO: instead od using proc 0 as master, define a master and use it in the rest of the code. This makes it possible to call ParSHUM SBBD in a subset of procs in a simulation for exmaple. This should be done by adding a root node in the MPI_info struct. */ void ParSHUM_get_col_blocks(ParSHUM_schur_matrix A, col_block col_blocks, row_block row_blocks) { int n = A->n, nb_blocks = row_blocks->nb_blocks, block, i, j, k; int *tmp, *BB_index; col_blocks->nb_blocks = nb_blocks; col_blocks->nb_BB_cols = n; col_blocks->n = n; col_blocks->perms = malloc((size_t) n * sizeof(*col_blocks->perms)); col_blocks->invr_perms = malloc((size_t) n * sizeof(*col_blocks->invr_perms)); col_blocks->sizes = calloc((size_t) (nb_blocks + 2 ), sizeof(*col_blocks->sizes)); col_blocks->nnz = calloc((size_t) nb_blocks, sizeof(*col_blocks->nnz)); col_blocks->BB_index = calloc((size_t) nb_blocks, sizeof(*col_blocks->BB_index)); col_blocks->BB_size = calloc((size_t) nb_blocks, sizeof(*col_blocks->BB_size)); col_blocks->local_schur_m = calloc((size_t) nb_blocks + 1, sizeof(*col_blocks->local_schur_m)); tmp = malloc((size_t) col_blocks->n * sizeof(*tmp)); BB_index = malloc((size_t) col_blocks->n * sizeof(*BB_index)); int_array_memset(col_blocks->invr_perms, ParSHUM_UNUSED_PIVOT, n); int_array_memset(tmp, ParSHUM_UNUSED_PIVOT, n); for( block = 0; block < nb_blocks; block++) { int col_block_size = col_blocks->sizes[block]; int start_block = row_blocks->sizes[block]; int end_block = row_blocks->sizes[block+1]; for( i = start_block; i < end_block; i++) { int row = row_blocks->perms[i]; CSR_struct *CSR = &A->CSR[row]; int *cols = CSR->col; int row_nb_elem = CSR->nb_elem; col_blocks->nnz[block] += row_nb_elem; for ( j = 0; j < row_nb_elem; j++) { int col = cols[j]; if (col_blocks->invr_perms[col] != ParSHUM_UNUSED_PIVOT) { if (col_blocks->invr_perms[col] >= col_blocks->nb_BB_cols) { if (tmp[col] > block || tmp[col] < 0) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"something is wrong"); if ( tmp[col] < block) { BB_index[col_blocks->BB_size[block]++] = col; tmp[col] = block; } } continue; } CSC_struct *CSC = &A->CSC[col]; int *rows = CSC->row; int col_nb_elem = CSC->nb_elem; int BB_col = 0; for ( k = 0; k < col_nb_elem; k++) if (row_blocks->invr_perms[rows[k]] < start_block || row_blocks->invr_perms[rows[k]] >= end_block) { BB_col = 1; break; } if (BB_col) { col_blocks->perms[--col_blocks->nb_BB_cols] = col; col_blocks->invr_perms[col] = col_blocks->nb_BB_cols; BB_index[col_blocks->BB_size[block]++] = col; tmp[col] = block; } else { col_blocks->perms[col_block_size] = col; col_blocks->invr_perms[col] = col_block_size++; } } } col_blocks->sizes[block+1] = col_block_size; col_blocks->BB_index[block] = malloc((size_t) col_blocks->BB_size[block] * sizeof(BB_index)); memcpy((void *) col_blocks->BB_index[block], BB_index, (size_t) col_blocks->BB_size[block] * sizeof(BB_index)); col_blocks->local_schur_m[block+1] = col_blocks->local_schur_m[block] + (row_blocks->sizes[block+1] - row_blocks->sizes[block]) - (col_blocks->sizes[block+1] - col_blocks->sizes[block]); } col_blocks->nb_BB_cols = n - col_blocks->nb_BB_cols; col_blocks->sizes[nb_blocks+1] = n; /* make BB_index local to BB */ for(block = 0; block < nb_blocks; block++) { int base = n - col_blocks->nb_BB_cols; int *_BB_index = col_blocks->BB_index[block]; for (i = 0; i < col_blocks->BB_size[block]; i++) _BB_index[i] = col_blocks->invr_perms[_BB_index[i]] - base; } for(block = 0; block < nb_blocks; block++) { int *_BB_index = col_blocks->BB_index[block]; for (i = 0; i < col_blocks->BB_size[block]; i++) { int col = _BB_index[i]; if (col >= col_blocks->nb_BB_cols || col < 0) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"BB_index is not correct"); } } if ( col_blocks->local_schur_m[nb_blocks] != col_blocks->nb_BB_cols) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"local_schur_m is not correct"); if (col_blocks->sizes[nb_blocks] + col_blocks->nb_BB_cols != n) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"col_block sizes are not correct"); for (i = 0; i < n - col_blocks->nb_BB_cols; i++) if(tmp[col_blocks->perms[i]] != ParSHUM_UNUSED_PIVOT) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"tmp is wrong before"); for (i = n - col_blocks->nb_BB_cols; i < n; i++) if(tmp[col_blocks->perms[i]] == ParSHUM_UNUSED_PIVOT || tmp[col_blocks->perms[i]] >= nb_blocks) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"tmp is wrong before"); free(tmp); free(BB_index); } ParSHUM_matrix ParSHUM_get_block(ParSHUM_schur_matrix matrix, row_block row_blocks, col_block col_blocks, int block) { int i, j, k, l, start, end, n; int nnz = col_blocks->nnz[block], *rows; int local_BB = col_blocks->BB_size[block]; int *col_perms = col_blocks->perms; int *invr_row_perms = row_blocks->invr_perms; int *BB_indices = col_blocks->BB_index[block]; int start_block = row_blocks->sizes[block]; int end_block = row_blocks->sizes[block+1]; double *vals; long *col_ptr; ParSHUM_matrix self = ParSHUM_matrix_create(); start = col_blocks->sizes[block]; end = col_blocks->sizes[block+1]; n = end - start; self->n = n + local_BB; self->m = row_blocks->sizes[block+1] - row_blocks->sizes[block]; self->nnz = nnz; ParSHUM_matrix_allocate(self, self->n, self->m, self->nnz, 1.0, ParSHUM_CSC_matrix); rows = self->row; vals = self->val; col_ptr = self->col_ptr; *col_ptr = 0; for( i = start, l = 0; i < end; i++) { CSC_struct *CSC = &matrix->CSC[col_perms[i]]; int *CSC_rows = CSC->row; double *CSC_vals = CSC->val; int nb_elem = CSC->nb_elem; int start_ = (int) col_ptr[l], end_ = (int) col_ptr[l] + nb_elem; for( j = start_, k = 0; j < end_; j++, k++) { int local_row = invr_row_perms[CSC_rows[k]]; if (local_row < start_block || local_row >= end_block) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"independent column has row entry outside the block"); rows[j] = local_row - start_block; } memcpy((void *) &vals[start_], (void *) CSC_vals, (size_t) nb_elem*sizeof(*CSC_vals)); col_ptr[++l] = (long) end_; } for(j = 0; j < local_BB; j++){ CSC_struct *CSC = &matrix->CSC[col_perms[BB_indices[j] + col_blocks->sizes[col_blocks->nb_blocks]]]; int *CSC_rows = CSC->row; double *CSC_vals = CSC->val; int nb_elem = CSC->nb_elem; long col_index = col_ptr[l]; for(k = 0 ; k < nb_elem; k++) { int local_row = invr_row_perms[CSC_rows[k]]; if (local_row >= start_block && local_row < end_block) { if (col_index + 1 > (long) nnz) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"block has more entries then expected"); rows[col_index ] = local_row - start_block; vals[col_index++] = CSC_vals[k]; } } col_ptr[++l] = col_index; } /* for( j = start; j < end; j++) { */ /* CSC_struct *CSC = &matrix->CSC[col_perms[j]]; */ /* int *CSC_rows = CSC->row; */ /* double *CSC_vals = CSC->val; */ /* int nb_elem = CSC->nb_elem; */ /* int BB_col = 0; */ /* for( k = 0; k < nb_elem; k++) { */ /* int local_row = invr_row_perms[CSC_rows[k]]; */ /* if (local_row >= start_block && local_row < end_block) { */ /* BB_col = 1; */ /* break; */ /* } */ /* } */ /* if (BB_col) { */ /* long col_index = col_ptr[l]; */ /* for( ; k < nb_elem; k++) { */ /* int local_row = invr_row_perms[CSC_rows[k]]; */ /* if (local_row >= start_block && local_row < end_block) { */ /* if (col_index + 1 > (long) nnz) */ /* ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"block has more entries then expected"); */ /* rows[col_index ] = local_row - start_block; */ /* vals[col_index++] = CSC_vals[k]; */ /* } */ /* } */ /* if (++l > local_BB + n) */ /* ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"block has more columns then expected"); */ /* col_ptr[l] = col_index; */ /* } */ /* } */ if (l != n + local_BB) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"did not found all the columns in the block matrix"); if(nnz != col_ptr[l]) ParSHUM_fatal_error(__FUNCTION__, __FILE__, __LINE__,"did not found all the enteries in the block matrix"); return self; } void ParSHUM_print_blocks(row_block row_blocks, col_block col_blocks) { printf("row_block:\nn = %d \nnb_block %d \n", row_blocks->n, row_blocks->nb_blocks); print_int_array(row_blocks->perms, row_blocks->n, "row permutation"); print_int_array(row_blocks->invr_perms, row_blocks->n, "row inverse permutation"); print_int_array(row_blocks->sizes, row_blocks->nb_blocks + 1, "row block sizes"); printf("\n\ncol_block:\nn= %d \nnb_blocks= %d\nnb_BB_cols = %d\n", col_blocks->n, col_blocks->nb_blocks, col_blocks->nb_BB_cols); print_int_array(col_blocks->perms, col_blocks->n, "col permutation"); print_int_array(col_blocks->invr_perms, col_blocks->n, "col inverse permutation"); print_int_array(col_blocks->nnz, col_blocks->nb_blocks, "col block non-zero elements"); print_int_array(col_blocks->sizes, col_blocks->nb_blocks+2, "col block sizes"); print_int_array(col_blocks->BB_size, col_blocks->nb_blocks, "col local border-block sizes"); } void ParSHUM_blocks_print_stats(ParSHUM_schur_matrix A, row_block row_blocks, col_block col_blocks) { int i, j, m = A->m, n = A->n ; int nb_blocks = col_blocks->nb_blocks; int min_n = INT_MAX, max_n = 0, min_nnz = INT_MAX, max_nnz = 0; int min_m = INT_MAX, max_m = 0; int BB_n, BB_nnz = 0; double avg_n, std_n = 0, avg_nnz = 0, std_nnz = 0, total_nnz = 0; double avg_m, std_m = 0; double *block_nnz; block_nnz = malloc(n*sizeof(*block_nnz)); avg_m = m / nb_blocks; for( i = 0; i < nb_blocks; i++) { int block_size = row_blocks->sizes[i+1] - row_blocks->sizes[i]; std_m += (avg_m - block_size) * (avg_m - block_size); max_m = (block_size > max_m) ? block_size : max_m; min_m = (block_size < min_m) ? block_size : min_m; } std_m /= nb_blocks; std_m = sqrt(std_m); avg_n = col_blocks->sizes[nb_blocks] / nb_blocks; for( i = 0; i < nb_blocks; i++) { int block_n = col_blocks->sizes[i+1] - col_blocks->sizes[i]; block_nnz[i] = 0; for( j = col_blocks->sizes[i]; j < col_blocks->sizes[i+1]; j++) block_nnz[i] += A->CSC[col_blocks->perms[j]].nb_elem; std_n += (avg_n - block_n) * (avg_n - block_n); max_n = (block_n > max_n) ? block_n : max_n; min_n = (block_n < min_n) ? block_n : min_n; } for( i = 0; i < nb_blocks; i++) { total_nnz += block_nnz[i]; max_nnz = (block_nnz[i] > max_nnz) ? block_nnz[i] : max_nnz; min_nnz = (block_nnz[i] < min_nnz) ? block_nnz[i] : min_nnz; } avg_nnz = total_nnz / nb_blocks; for( i = 0; i < nb_blocks; i++) std_nnz += (avg_nnz - block_nnz[i]) * (avg_nnz - block_nnz[i]); BB_n = col_blocks->nb_BB_cols; for( i = col_blocks->sizes[nb_blocks]; i < col_blocks->sizes[nb_blocks+1]; i++) BB_nnz += A->CSC[col_blocks->perms[i]].nb_elem; std_nnz /= nb_blocks; std_nnz = sqrt(std_nnz); std_n /= nb_blocks; std_n = sqrt(std_n); printf("#blocks\tavg_m\t\tstd_m\t\tmax_m\tmin_m\tavg_n\t\tstd_n\t\tmax_n\tmin_n\tavg_nnz\t\tstd_nnz\t\tmax_nnz\tmin_nnz\tBB_n\tBB_nnz\n"); printf("%d\t%e\t%e\t%d\t%d\t%e\t%e\t%d\t%d\t%e\t%e\t%d\t%d\t%d\t%d\n", nb_blocks, avg_m, std_m, max_m, min_m, avg_n, std_n, max_n, min_n, avg_nnz, std_nnz, max_nnz,min_nnz, BB_n, BB_nnz); } void ParSHUM_check_blocks(ParSHUM_schur_matrix A, row_block row_blocks, col_block col_blocks) { int nb_blocks = row_blocks->nb_blocks, n = col_blocks->n; int block, i, j; char mess[2048]; /* check_vlaid_perms(col_blocks->perms, col_blocks->invr_perms, n, n, "col"); */ /* check_vlaid_perms(row_blocks->perms, row_blocks->invr_perms, m, m, "row"); */ if (*row_blocks->sizes) ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, "the first row_block size is not zero"); for( block = 1; block <= nb_blocks; block++) if (row_blocks->sizes[block] < row_blocks->sizes[block-1]) { snprintf(mess, 2048, "row_block_sizes[%d] = %d is larger then row_block_sizes[%d] = %d", block, row_blocks->sizes[block], block - 1, row_blocks->sizes[block - 1]); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } if (row_blocks->sizes[nb_blocks] != n) ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, "the last row_block size is not same as the matrix size"); if (*col_blocks->sizes) ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, "the first col_block size is not zero"); for( block = 1; block <= nb_blocks; block++) if (col_blocks->sizes[block] < col_blocks->sizes[block-1]) { snprintf(mess, 2048, "col_block_sizes[%d] = %d is larger then row_col_sizes[%d] = %d", block, col_blocks->sizes[block], block - 1, col_blocks->sizes[block - 1]); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } if (col_blocks->sizes[nb_blocks+1] != n) ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, "the last col_block size is not same as the matrix size"); if ((col_blocks->sizes[nb_blocks + 1] - col_blocks->sizes[nb_blocks]) != col_blocks->nb_BB_cols) ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, "the difference the last two col_block sizes are not same as the nb_BB_cols"); for (block = 0; block < nb_blocks; block++) { int start_block = row_blocks->sizes[block]; int end_block = row_blocks->sizes[block+1]; for (i = col_blocks->sizes[block]; i < col_blocks->sizes[block+1]; i++) { int col = col_blocks->perms[i]; CSC_struct *CSC = &A->CSC[col]; int *rows = CSC->row; int col_nb_elem = CSC->nb_elem; for ( j = 0; j < col_nb_elem; j++) if (row_blocks->invr_perms[rows[j]] < start_block || row_blocks->invr_perms[rows[j]] >= end_block) { snprintf(mess, 2048, "col %d belongs to col_block %d, but row %d is not in the same block ", col, block, rows[j]); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } } } /* Checking the BB cols */ for (i = col_blocks->sizes[nb_blocks]; i < col_blocks->sizes[nb_blocks + 1]; i++) { int col = col_blocks->perms[i]; CSC_struct *CSC = &A->CSC[col]; int *rows = CSC->row; int col_nb_elem = CSC->nb_elem; int first_block = -1; int first_invr_row = row_blocks->invr_perms[*rows]; int is_BB_col = 0, start_block, end_block; for ( block = 1; block <= nb_blocks; block++) if ( first_invr_row < row_blocks->sizes[block]) { first_block = block-1; start_block = row_blocks->sizes[first_block]; end_block = row_blocks->sizes[block]; break; } for ( j = 1; j < col_nb_elem; j++) if (row_blocks->invr_perms[rows[j]] < start_block || row_blocks->invr_perms[rows[j]] >= end_block) { is_BB_col = 1; break; } if (!is_BB_col) { snprintf(mess, 2048, "col %d is in the BB block, but all of its entries belongs to %d", col, first_block); ParSHUM_warning(__FUNCTION__, __FILE__, __LINE__, mess); } } } void ParSHUM_collect_BB_block(double *local_schur, double *global_schur, double **buffers, col_block col_blocks, row_block row_blocks,int m, int n, int BB_cols, int nb_threads, ParSHUM_MPI_info MPI_info) { int rank = MPI_info->rank; if (rank) { int i; MPI_Comm comm = MPI_info->world; int schur_m = m - n + BB_cols; MPI_Request request; double *buff = malloc((size_t) BB_cols * schur_m * sizeof(*buff)); for( i = 0; i < BB_cols; i++) memcpy((void *) &buff[i*schur_m], (void *) &local_schur[i*m + m - schur_m], (size_t) schur_m * sizeof(*buff)); MPI_Isend(buff, BB_cols*schur_m, MPI_DOUBLE, 0, 0, comm, &request); } else { /* #pragma omp parallel default(shared) */ /* { */ /* #pragma omp single */ /* { */ MPI_Comm comm = MPI_info->world; int BB_global = col_blocks->nb_BB_cols; int nb_blocks = col_blocks->nb_blocks; int *BB_indices = *col_blocks->BB_index; int *local_schur_m = col_blocks->local_schur_m; int local_n, local_m, local_nnz; int block; MPI_Status status; MPI_Request requests[nb_blocks]; for (block = 1; block < nb_blocks; block++) { local_n = col_blocks->BB_size[block]; local_m = local_schur_m[block+1] - local_schur_m[block]; local_nnz = local_n * local_m; MPI_Irecv(buffers[block], local_nnz, MPI_DOUBLE, block, 0, comm, &requests[block]); } local_m = m - n + BB_cols; /* #pragma omp task shared(global_schur, BB_indices, BB_global, local_schur, m, BB_cols) firstprivate(local_m) */ /* { */ for ( int i = 0; i < BB_cols; i++) memcpy((void *) &global_schur[BB_indices[i]*BB_global], (void *) &local_schur[i*m + m - local_m], (size_t) local_m * sizeof(*global_schur)); /* } */ for (block = 1; block < nb_blocks; block++) { int indx; MPI_Waitany(nb_blocks-1, &requests[1], &indx, &status); indx++; local_n = col_blocks->BB_size[indx]; local_m = local_schur_m[indx+1] - local_schur_m[indx]; BB_indices = col_blocks->BB_index[indx]; double *rr = buffers[indx]; /* #pragma omp task shared(global_schur, BB_indices, BB_global, local_schur, local_schur_m) firstprivate(local_m, indx, rr) */ /* { */ for ( int i = 0; i < local_n; i++) memcpy((void *) &global_schur[BB_indices[i]*BB_global + local_schur_m[indx]], (void *) &rr[i*local_m], (size_t) local_m * sizeof(*global_schur)); /* } */ /* } */ /* } */ /* } */ } } }
limits_threads.c
/* 1. how to include header 2. parallel region 3. runtime routine 4. undeterminated execution order 5. control number of threads By C. Liao */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif int main(void) { int i=0; #pragma omp parallel { #ifdef _OPENMP i=omp_get_thread_num(); #endif printf("Hello,world! I am thread %d\n",i); } return 0; }
test.c
#include <stdio.h> #include "../utilities/check.h" #define N 100 int test_aligned(){ int a[N], aa[N]; int i, error = 0; // initialize for(i=0; i<N; i++) aa[i] = a[i] = -1; int *b = a; // offload #pragma omp target map(tofrom: b[0:100]) { #pragma omp teams distribute simd aligned(b: 8*sizeof(int)) for(int k=0; k<N; k++) b[k] = k; } // host for(i=0; i<N; i++) aa[i] = i; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error); if (error > 10) { printf("abort\n"); return error; } } return error; } int test_collapsed(){ int a[N], aa[N]; int i, error = 0; // initialize for(i=0; i<N; i++) aa[i] = a[i] = -1; // offload #pragma omp target map(tofrom: a[0:100]) { #pragma omp teams distribute simd collapse(2) for(int k=0; k<N/4; k++) for(int l=0; l<4; l++) a[k*4+l] = k*4+l; } // host for(i=0; i<N; i++) aa[i] = i; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error); if (error > 10) { printf("abort\n"); return error; } } return error; } #if 0 int test_lastprivate(){ int a[N], aa[N]; int i, error = 0; // initialize for(i=0; i<N; i++) aa[i] = a[i] = -1; int n; // offload #pragma omp target map(tofrom: a[0:100]) { #pragma omp teams distribute simd lastprivate(n) for(int k=0; k<N; k++) { a[k] = k; n = k; } } a[0] = n; // host for(i=0; i<N; i++) aa[i] = i; aa[0] = N-1; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error); if (error > 10) { printf("abort\n"); return error; } } return error; } #endif int test_linear(){ int a[N], aa[N]; int i, error = 0; // initialize for(i=0; i<N; i++) aa[i] = a[i] = -1; int l = 0; // offload #pragma omp target map(tofrom: a[0:100]) { #pragma omp teams distribute simd num_teams(1) for(int k=0; k<N; k++) { l = 2*k; a[k] = l; } } // host for(i=0; i<N; i++) aa[i] = 2*i; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error); if (error > 10) { printf("abort\n"); return error;; } } return error; } #if 0 int test_private(){ int a[N], aa[N]; int i, error = 0; // initialize for(i=0; i<N; i++) aa[i] = a[i] = -1; int n; // offload #pragma omp target map(tofrom: a[0:100]) { #pragma omp teams distribute simd private(n) for(int k=0; k<N; k++) { n = k; a[k] = n; } } // host for(i=0; i<N; i++) aa[i] = i; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error); if (error > 10) { printf("abort\n"); return error; } } return error; } #endif int test_safelen(){ int a[N], aa[N]; int i, error = 0; // initialize for(i=0; i<N; i++) aa[i] = a[i] = -1; // offload #pragma omp target map(tofrom: a[0:100]) { #pragma omp teams distribute simd num_teams(1) safelen(2) for(int k=0; k<100; k++) { if (k > 1){ a[k] = a[k-2] + 2; } else{ a[k] = k; } } } // host for(i=0; i<N; i++) aa[i] = i; // check for(i=0; i<N; i++) { if (a[i] != aa[i]) printf("%d: a %d != %d (error %d)\n", i, a[i], aa[i], ++error); if (error > 10) { printf("abort\n"); return error; } } return error; } int main() { int error = 0; check_offloading(); // Clauses error += test_aligned(); error += test_collapsed(); // error += test_lastprivate(); error += test_linear(); // error += test_private(); error += test_safelen(); // report printf("done with %d errors\n", error); return error; }
trsm_x_coo_u_lo_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_COO *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = A->rows; int num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++) { for (ALPHA_INT r = 0; r < m; r++) { ALPHA_Number temp; alpha_setzero(temp); for (ALPHA_INT cr = 0; cr < A->nnz; cr++) { int row = A->row_indx[cr]; int col = A->col_indx[cr]; if(row == r && col < r) alpha_madde(temp, A->values[cr], y[out_y_col * ldy + col]); } ALPHA_Number t; alpha_mul(t, alpha, x[out_y_col * ldx + r]); alpha_sub(y[out_y_col * ldy + r], t, temp); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
bfs.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <omp.h> #include "graphio.h" #include "graph.h" #include <time.h> char gfile[2048]; void usage(){ printf("./bfs <filename> <sourceIndex>\n"); exit(0); } /* You can ignore the ewgths and vwghts. They are there as the read function expects those values row_ptr and col_ind are the CRS entities. nov is the Number of Vertices */ /* =============================================== CS 406 - Parallel Computation ********HOMEWORK 1*********** The implementation uses the ideas of Top-Down and Bottom-Up BFS approach introduced by Scott Beamer et. al. The algorithm is essentially is as follows: 1 - Keep an additional array of vertices for the current frontier, initialize it with source vertex 2 - Keep the number of edges to check from frontier, the number of vertices in the frontier and the number of edges to check from the unvisited vertices 3 - By computing a threshold with the variables initialized in step 2, decide if it is better to perform bottom up BFS or top down BFS 4 - Before performing the preferred strategy for BFS, reset the values for nf and mf to 0 because they will be updated as new items will be selected for the next frontier 5 - If bottom up BFS is preferred, linear search for a vertex that is NOT visited, try visiting each edge of this vertex [do this in parallel] to see if it has been visited earlier, if so we have found a parent for the vertex. If there is no such vertex proceed with the next unvisited vertex. Update values of variables initialized in step 2 6 - If top down BFS is preferred, create tasks to visit neighbors of each vertex in the frontier. Add them to the frontier after all such tasks have been finished and remove previous frontier vertices(not earlier!). Update values of variables initialized in step 2 7 - If there are 0 edges to check from unvisited vertices, terminate! Otherwise go to step 3 ================================================*/ typedef enum { false, true } bool; // Step 1 vtype * frontier; int frontier_tail; int frontier_head; int new_frontier_tail; int mu; int mf; int nf; bool visited_somewhere; int * distances; #define UNKNOWN -1 bool isInFrontier(const vtype v) { for (int i = frontier_head; i < frontier_tail; i++) { if (frontier[i] == v) { return true; } } return false; } void BottomUp(etype * row_ptr, vtype * col_ind, vtype nov) { // 1 - Search for an unvisited vertex in parallel // 2 - For each unvisited vertex, create tasks to reach neighbors and mark them // #pragma omp parallel for for (etype vertex = 0; vertex < nov; vertex++) { if (distances[vertex] == UNKNOWN) { // Unvisited vertex found #pragma omp task { for (vtype neighbor_ind = row_ptr[vertex]; neighbor_ind < row_ptr[vertex + 1]; neighbor_ind++) { const vtype neighbor = col_ind[neighbor_ind]; if (isInFrontier(neighbor)) { // A parent for vertex i is found distances[vertex] = distances[neighbor] + 1; // 1 - visited vertex is added to the list of items that will be // the frontiers of the next iteration // 2 - Update values of mf, nf and mu #pragma omp critical { frontier[new_frontier_tail++] = vertex; mf += row_ptr[vertex + 1] - row_ptr[vertex]; nf += 1; mu -= row_ptr[vertex + 1] - row_ptr[vertex]; } visited_somewhere = true; } } } } } } bool useBottomUp() { double tuning_param = 14.0; if (mf > mu/tuning_param) { return true; } return false; } bool useTopDown(int nov) { double tuning_param = 24.0; if (nf < nov/tuning_param) { return true; } return false; } void TopDown(etype * row_ptr, vtype * col_ind, vtype nov) { // For each vertex in the frontier, create a task that does the following: // Visit each edge, mark any unvisited neighbors //#pragma omp parallel for for (int frontier_ind = frontier_head; frontier_ind < frontier_tail; frontier_ind++) { //#pragma omp task { const vtype frontier_vertex = frontier[frontier_ind]; #pragma omp task { for (int neighbor_ind = row_ptr[frontier_vertex]; neighbor_ind < row_ptr[frontier_vertex + 1]; neighbor_ind++) { const vtype neighbor = col_ind[neighbor_ind]; if (distances[neighbor] == UNKNOWN) { distances[neighbor] = distances[frontier_vertex] + 1; // A vertex has been visited, now the following will happen: // 1 - visited vertex is added to the list of items that will be // the frontiers of the next iteration // 2 - Update the values of mf, nf and mu #pragma omp critical { frontier[new_frontier_tail++] = neighbor; mf += row_ptr[neighbor + 1] - row_ptr[neighbor]; nf += 1; mu -= row_ptr[neighbor + 1] - row_ptr[neighbor]; } visited_somewhere = true; } } } } } } void BFS(etype * row_ptr, vtype * col_ind, vtype nov, const vtype source) { printf("BFS() invoked\n"); etype row_1 = row_ptr[1]; // Step 2 mf = row_ptr[1] - row_ptr[0]; // number of edges to check from frontier nf = 1; // number of vertices in the frontier mu = row_ptr[nov] - mf - 1; // number of edges to check from unexplored vertices // distances are initialized to 0, this is the true value for the source vertex, // but for convenience let us mark it visited by setting its distance to 1 so that // we can use the value 0 to determine unvisited vertices distances[source] = 0; frontier[0] = source; frontier_head = 0; frontier_tail = 1; new_frontier_tail = 1; visited_somewhere = true; frontier[frontier_tail++] = source; bool topDownActive = true; while (visited_somewhere) { // while there exists vertices to visit visited_somewhere = false; if (topDownActive) { TopDown(row_ptr, col_ind, nov); } else { BottomUp(row_ptr, col_ind, nov); } #pragma omp taskwait if (topDownActive) { topDownActive = !useBottomUp(); } else { topDownActive = useTopDown(nov); } frontier_head = frontier_tail; frontier_tail = new_frontier_tail; mf = 0; nf = 0; } } int main(int argc, char *argv[]) { omp_set_num_threads(16); etype *row_ptr; vtype *col_ind; ewtype *ewghts; vwtype *vwghts; vtype nov, source; if(argc != 3) usage(); const char* fname = argv[1]; strcpy(gfile, fname); source = atoi(argv[2]); if(read_graph(gfile, &row_ptr, &col_ind, &ewghts, &vwghts, &nov, 0) == -1) { printf("error in graph read\n"); exit(1); } /****** YOUR CODE GOES HERE *******/ distances = (int*)malloc(nov*sizeof(int)); frontier = (vtype *)malloc(nov*sizeof(vtype)); for (int i = 0; i < nov; i++) { distances[i] = -1; } if (distances == NULL || frontier == NULL) { printf("Cannot allocate memory"); exit(1); } printf("Initiating Breadth First Search\n"); double start_time = omp_get_wtime(); BFS(row_ptr, col_ind, nov, source); double time = omp_get_wtime() - start_time; printf("Breadth First Search completed in [%g s]\nWriting results\n", time); FILE *fp; fp = fopen("./results.txt", "w+"); for (int i = 0; i < nov; i++) { fprintf(fp, "%i ", distances[i]); } fclose(fp); printf("results written to results.txt\n"); free(row_ptr); free(col_ind); return 1; }
ast-dump-openmp-teams-distribute-parallel-for-simd.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s void test_one(int x) { #pragma omp target #pragma omp teams distribute parallel for simd for (int i = 0; i < x; i++) ; } void test_two(int x, int y) { #pragma omp target #pragma omp teams distribute parallel for simd for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_three(int x, int y) { #pragma omp target #pragma omp teams distribute parallel for simd collapse(1) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_four(int x, int y) { #pragma omp target #pragma omp teams distribute parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) ; } void test_five(int x, int y, int z) { #pragma omp target #pragma omp teams distribute parallel for simd collapse(2) for (int i = 0; i < x; i++) for (int i = 0; i < y; i++) for (int i = 0; i < z; i++) ; } // CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc> // CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:3:1, line:8:1> line:3:6 test_one 'void (int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:8:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:4:9, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:6:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:5:9, col:47> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:9, col:47> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:9, col:47> openmp_structured_block // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:7:5> openmp_structured_block // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:5:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:9) *const restrict' // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:9) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:9) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:4:9) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:5:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:7:5> openmp_structured_block // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:5:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:9) *const restrict' // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:9) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5> openmp_structured_block // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:9) *const restrict' // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:4:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:5:9, col:47> openmp_structured_block // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:7:5> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:5:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:9) *const restrict' // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:9) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:7:5> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:5:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:9) *const restrict' // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:4:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:5:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:6:3, line:7:5> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:7:5> openmp_structured_block // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:5:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:9) *const restrict' // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:9) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:6:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <col:3, line:7:5> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:6:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:7:5> openmp_structured_block // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:5:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:5:9) *const restrict' // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:6:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <col:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:10:1, line:16:1> line:10:6 test_two 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:16:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:11:9, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:12:9, col:47> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:9, col:47> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:9, col:47> openmp_structured_block // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> openmp_structured_block // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:12:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:9) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:9) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:9) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:11:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:11:9) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:12:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> openmp_structured_block // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:12:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:9) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:9) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> openmp_structured_block // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:9) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:9> col:9 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:11:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:12:9, col:47> openmp_structured_block // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:12:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:9) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:9) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:12:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:9) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:11:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:11:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:12:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> openmp_structured_block // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:12:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:9) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:13:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:9) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:13:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:14:25> col:25 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:13:3, line:15:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:13:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:14:5, line:15:7> openmp_structured_block // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:14:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:15:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:12:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:12:9) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:13:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:14:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:13:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:14:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:18:1, line:24:1> line:18:6 test_three 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:24:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:19:9, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:20:9, col:59> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:9, col:59> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:9, col:59> openmp_structured_block // CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 1 // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> openmp_structured_block // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:20:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:9) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:9) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:9) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:19:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:19:9) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:20:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> openmp_structured_block // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:20:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:9) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:9) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> openmp_structured_block // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:9) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:9> col:9 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:19:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:20:9, col:59> openmp_structured_block // CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 1 // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:20:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:9) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:9) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:20:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:9) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:19:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:19:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:20:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> openmp_structured_block // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:20:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:9) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:21:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:9) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:21:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:22:25> col:25 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:21:3, line:23:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:21:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:22:5, line:23:7> openmp_structured_block // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:22:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:23:7> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:20:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:20:9) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:21:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:22:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:21:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <col:3, <invalid sloc>> col:3 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:22:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: |-FunctionDecl {{.*}} <line:26:1, line:32:1> line:26:6 test_four 'void (int, int)' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:32:1> // CHECK-NEXT: | `-OMPTargetDirective {{.*}} <line:27:9, col:19> // CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-CapturedStmt {{.*}} <line:28:9, col:59> // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:9, col:59> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:9, col:59> openmp_structured_block // CHECK-NEXT: | | | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2 // CHECK-NEXT: | | | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-NullStmt {{.*}} <line:31:7> openmp_structured_block // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <line:28:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:9) *const restrict' // CHECK-NEXT: | | | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:9) *const restrict' // CHECK-NEXT: | | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:9) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:27:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:27:9) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <line:28:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:31:7> openmp_structured_block // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:28:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:9) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:9) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7> openmp_structured_block // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:9) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:9> col:9 implicit .global_tid. 'const int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:27:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int' // CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:28:9, col:59> openmp_structured_block // CHECK-NEXT: | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2 // CHECK-NEXT: | | | `-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:31:7> openmp_structured_block // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:28:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:9) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:9) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:31:7> openmp_structured_block // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:28:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:9) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:27:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:27:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <line:28:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:31:7> openmp_structured_block // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:28:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:9) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:9) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:29:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:30:25> col:25 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:29:3, line:31:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:29:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:30:5, line:31:7> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:30:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:31:7> openmp_structured_block // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:28:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:28:9) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:29:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:30:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:29:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-OMPCapturedExprDecl {{.*}} <line:30:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-OMPCapturedExprDecl {{.*}} <line:29:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:3, line:30:28> 'long' '*' // CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <line:29:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <line:30:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:29:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:30:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-FunctionDecl {{.*}} <line:34:1, line:41:1> line:34:6 test_five 'void (int, int, int)' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int' // CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int' // CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:41:1> // CHECK-NEXT: `-OMPTargetDirective {{.*}} <line:35:9, col:19> // CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit> // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: `-CapturedStmt {{.*}} <line:36:9, col:59> // CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-CapturedStmt {{.*}} <col:9, col:59> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <col:9, col:59> openmp_structured_block // CHECK-NEXT: | | | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2 // CHECK-NEXT: | | | | `-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> openmp_structured_block // CHECK-NEXT: | | | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <line:36:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:9) *const restrict' // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:9) *const restrict' // CHECK-NEXT: | | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> openmp_structured_block // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:9) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:35:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:35:9) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <line:36:9> col:9 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> openmp_structured_block // CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:36:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:9) *const restrict' // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:9) *const restrict' // CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:9) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:9> col:9 implicit .global_tid. 'const int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:35:9) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int' // CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int' // CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9 // CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | |-OMPTeamsDistributeParallelForSimdDirective {{.*}} <line:36:9, col:59> openmp_structured_block // CHECK-NEXT: | | |-OMPCollapseClause {{.*}} <col:48, col:58> // CHECK-NEXT: | | | `-ConstantExpr {{.*}} <col:57> 'int' // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:57> 'int' 2 // CHECK-NEXT: | | `-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> openmp_structured_block // CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:36:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:9) *const restrict' // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:9) *const restrict' // CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> openmp_structured_block // CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:36:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:9) *const restrict' // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:35:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:35:9) *const restrict' // CHECK-NEXT: | |-RecordDecl {{.*}} <line:36:9> col:9 implicit struct definition // CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:39:7, line:40:9> openmp_structured_block // CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | | | |-<<<NULL>>> // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:36:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:9) *const restrict' // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:9) *const restrict' // CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition // CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:37:23> col:23 implicit 'int &' // CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:38:25> col:25 implicit 'int &' // CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:39:27> col:27 implicit 'int &' // CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow // CHECK-NEXT: | | |-ForStmt {{.*}} <line:37:3, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:37:8, col:17> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:38:5, line:40:9> // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:38:10, col:19> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-ForStmt {{.*}} <line:39:7, line:40:9> openmp_structured_block // CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:39:12, col:21> // CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | | | |-<<<NULL>>> // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<' // CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue> // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int' // CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++' // CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int' // CHECK-NEXT: | | | `-NullStmt {{.*}} <line:40:9> // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:36:9> col:9 implicit .global_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.lb. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit used .previous.ub. 'const unsigned long' // CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-teams-distribute-parallel-for-simd.c:36:9) *const restrict' // CHECK-NEXT: | | |-VarDecl {{.*}} <line:37:8, col:16> col:12 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | |-VarDecl {{.*}} <line:38:10, col:18> col:14 used i 'int' cinit // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | `-VarDecl {{.*}} <line:39:12, col:20> col:16 used i 'int' cinit // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:20> 'int' 0 // CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:37:23> col:23 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: | |-OMPCapturedExprDecl {{.*}} <line:38:25> col:25 implicit used .capture_expr. 'int' // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: | `-OMPCapturedExprDecl {{.*}} <line:37:3, <invalid sloc>> col:3 implicit used .capture_expr. 'long' // CHECK-NEXT: | `-BinaryOperator {{.*}} <col:3, <invalid sloc>> 'long' '-' // CHECK-NEXT: | |-BinaryOperator {{.*}} <col:3, line:38:28> 'long' '*' // CHECK-NEXT: | | |-ImplicitCastExpr {{.*}} <line:37:3, col:26> 'long' <IntegralCast> // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:3, col:26> 'int' '/' // CHECK-NEXT: | | | |-ParenExpr {{.*}} <col:3> 'int' // CHECK-NEXT: | | | | `-BinaryOperator {{.*}} <col:23, col:26> 'int' '+' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:23, col:16> 'int' '-' // CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0 // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:26> 'int' 1 // CHECK-NEXT: | | `-ImplicitCastExpr {{.*}} <line:38:5, col:28> 'long' <IntegralCast> // CHECK-NEXT: | | `-BinaryOperator {{.*}} <col:5, col:28> 'int' '/' // CHECK-NEXT: | | |-ParenExpr {{.*}} <col:5> 'int' // CHECK-NEXT: | | | `-BinaryOperator {{.*}} <col:25, col:28> 'int' '+' // CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:25, <invalid sloc>> 'int' '-' // CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:25, col:18> 'int' '-' // CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue> // CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue OMPCapturedExpr {{.*}} '.capture_expr.' 'int' // CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0 // CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:28> 'int' 1 // CHECK-NEXT: | `-ImplicitCastExpr {{.*}} <<invalid sloc>> 'long' <IntegralCast> // CHECK-NEXT: | `-IntegerLiteral {{.*}} <<invalid sloc>> 'int' 1 // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:37:23> 'int' lvalue ParmVar {{.*}} 'x' 'int' // CHECK-NEXT: |-DeclRefExpr {{.*}} <line:38:25> 'int' lvalue ParmVar {{.*}} 'y' 'int' // CHECK-NEXT: `-DeclRefExpr {{.*}} <line:39:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
BKTree.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_BKTREE_H_ #define _SPTAG_COMMON_BKTREE_H_ #include <iostream> #include <stack> #include <string> #include <vector> #include "../VectorIndex.h" #include "CommonUtils.h" #include "QueryResultSet.h" #include "WorkSpace.h" #pragma warning(disable:4996) // 'fopen': This function or variable may be unsafe. Consider using fopen_s instead. To disable deprecation, use _CRT_SECURE_NO_WARNINGS. See online help for details. namespace SPTAG { namespace COMMON { // node type for storing BKT struct BKTNode { SizeType centerid; SizeType childStart; SizeType childEnd; BKTNode(SizeType cid = -1) : centerid(cid), childStart(-1), childEnd(-1) {} }; template <typename T> struct KmeansArgs { int _K; DimensionType _D; int _T; T* centers; SizeType* counts; float* newCenters; SizeType* newCounts; int* label; SizeType* clusterIdx; float* clusterDist; T* newTCenters; KmeansArgs(int k, DimensionType dim, SizeType datasize, int threadnum) : _K(k), _D(dim), _T(threadnum) { centers = new T[k * dim]; counts = new SizeType[k]; newCenters = new float[threadnum * k * dim]; newCounts = new SizeType[threadnum * k]; label = new int[datasize]; clusterIdx = new SizeType[threadnum * k]; clusterDist = new float[threadnum * k]; newTCenters = new T[k * dim]; } ~KmeansArgs() { delete[] centers; delete[] counts; delete[] newCenters; delete[] newCounts; delete[] label; delete[] clusterIdx; delete[] clusterDist; delete[] newTCenters; } inline void ClearCounts() { memset(newCounts, 0, sizeof(SizeType) * _T * _K); } inline void ClearCenters() { memset(newCenters, 0, sizeof(float) * _T * _K * _D); } inline void ClearDists(float dist) { for (int i = 0; i < _T * _K; i++) { clusterIdx[i] = -1; clusterDist[i] = dist; } } void Shuffle(std::vector<SizeType>& indices, SizeType first, SizeType last) { SizeType* pos = new SizeType[_K]; pos[0] = first; for (int k = 1; k < _K; k++) pos[k] = pos[k - 1] + newCounts[k - 1]; for (int k = 0; k < _K; k++) { if (newCounts[k] == 0) continue; SizeType i = pos[k]; while (newCounts[k] > 0) { SizeType swapid = pos[label[i]] + newCounts[label[i]] - 1; newCounts[label[i]]--; std::swap(indices[i], indices[swapid]); std::swap(label[i], label[swapid]); } while (indices[i] != clusterIdx[k]) i++; std::swap(indices[i], indices[pos[k] + counts[k] - 1]); } delete[] pos; } }; class BKTree { public: BKTree(): m_iTreeNumber(1), m_iBKTKmeansK(32), m_iBKTLeafSize(8), m_iSamples(1000) {} BKTree(BKTree& other): m_iTreeNumber(other.m_iTreeNumber), m_iBKTKmeansK(other.m_iBKTKmeansK), m_iBKTLeafSize(other.m_iBKTLeafSize), m_iSamples(other.m_iSamples) {} ~BKTree() {} inline const BKTNode& operator[](SizeType index) const { return m_pTreeRoots[index]; } inline BKTNode& operator[](SizeType index) { return m_pTreeRoots[index]; } inline SizeType size() const { return (SizeType)m_pTreeRoots.size(); } inline const std::unordered_map<SizeType, SizeType>& GetSampleMap() const { return m_pSampleCenterMap; } template <typename T> void BuildTrees(VectorIndex* index, std::vector<SizeType>* indices = nullptr) { struct BKTStackItem { SizeType index, first, last; BKTStackItem(SizeType index_, SizeType first_, SizeType last_) : index(index_), first(first_), last(last_) {} }; std::stack<BKTStackItem> ss; std::vector<SizeType> localindices; if (indices == nullptr) { localindices.resize(index->GetNumSamples()); for (SizeType i = 0; i < index->GetNumSamples(); i++) localindices[i] = i; } else { localindices.assign(indices->begin(), indices->end()); } KmeansArgs<T> args(m_iBKTKmeansK, index->GetFeatureDim(), (SizeType)localindices.size(), omp_get_num_threads()); m_pSampleCenterMap.clear(); for (char i = 0; i < m_iTreeNumber; i++) { std::random_shuffle(localindices.begin(), localindices.end()); m_pTreeStart.push_back((SizeType)m_pTreeRoots.size()); m_pTreeRoots.push_back(BKTNode((SizeType)localindices.size())); std::cout << "Start to build BKTree " << i + 1 << std::endl; ss.push(BKTStackItem(m_pTreeStart[i], 0, (SizeType)localindices.size())); while (!ss.empty()) { BKTStackItem item = ss.top(); ss.pop(); SizeType newBKTid = (SizeType)m_pTreeRoots.size(); m_pTreeRoots[item.index].childStart = newBKTid; if (item.last - item.first <= m_iBKTLeafSize) { for (SizeType j = item.first; j < item.last; j++) { m_pTreeRoots.push_back(BKTNode(localindices[j])); } } else { // clustering the data into BKTKmeansK clusters int numClusters = KmeansClustering(index, localindices, item.first, item.last, args); if (numClusters <= 1) { SizeType end = min(item.last + 1, (SizeType)localindices.size()); std::sort(localindices.begin() + item.first, localindices.begin() + end); m_pTreeRoots[item.index].centerid = localindices[item.first]; m_pTreeRoots[item.index].childStart = -m_pTreeRoots[item.index].childStart; for (SizeType j = item.first + 1; j < end; j++) { m_pTreeRoots.push_back(BKTNode(localindices[j])); m_pSampleCenterMap[localindices[j]] = m_pTreeRoots[item.index].centerid; } m_pSampleCenterMap[-1 - m_pTreeRoots[item.index].centerid] = item.index; } else { for (int k = 0; k < m_iBKTKmeansK; k++) { if (args.counts[k] == 0) continue; m_pTreeRoots.push_back(BKTNode(localindices[item.first + args.counts[k] - 1])); if (args.counts[k] > 1) ss.push(BKTStackItem(newBKTid++, item.first, item.first + args.counts[k] - 1)); item.first += args.counts[k]; } } } m_pTreeRoots[item.index].childEnd = (SizeType)m_pTreeRoots.size(); } std::cout << i + 1 << " BKTree built, " << m_pTreeRoots.size() - m_pTreeStart[i] << " " << localindices.size() << std::endl; } } inline std::uint64_t BufferSize() const { return sizeof(int) + sizeof(SizeType) * m_iTreeNumber + sizeof(SizeType) + sizeof(BKTNode) * m_pTreeRoots.size(); } bool SaveTrees(std::ostream& p_outstream) const { p_outstream.write((char*)&m_iTreeNumber, sizeof(int)); p_outstream.write((char*)m_pTreeStart.data(), sizeof(SizeType) * m_iTreeNumber); SizeType treeNodeSize = (SizeType)m_pTreeRoots.size(); p_outstream.write((char*)&treeNodeSize, sizeof(SizeType)); p_outstream.write((char*)m_pTreeRoots.data(), sizeof(BKTNode) * treeNodeSize); std::cout << "Save BKT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } bool SaveTrees(std::string sTreeFileName) const { std::cout << "Save BKT to " << sTreeFileName << std::endl; std::ofstream output(sTreeFileName, std::ios::binary); if (!output.is_open()) return false; SaveTrees(output); output.close(); return true; } bool LoadTrees(char* pBKTMemFile) { m_iTreeNumber = *((int*)pBKTMemFile); pBKTMemFile += sizeof(int); m_pTreeStart.resize(m_iTreeNumber); memcpy(m_pTreeStart.data(), pBKTMemFile, sizeof(SizeType) * m_iTreeNumber); pBKTMemFile += sizeof(SizeType)*m_iTreeNumber; SizeType treeNodeSize = *((SizeType*)pBKTMemFile); pBKTMemFile += sizeof(SizeType); m_pTreeRoots.resize(treeNodeSize); memcpy(m_pTreeRoots.data(), pBKTMemFile, sizeof(BKTNode) * treeNodeSize); std::cout << "Load BKT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } bool LoadTrees(std::string sTreeFileName) { std::cout << "Load BKT From " << sTreeFileName << std::endl; std::ifstream input(sTreeFileName, std::ios::binary); if (!input.is_open()) return false; input.read((char*)&m_iTreeNumber, sizeof(int)); m_pTreeStart.resize(m_iTreeNumber); input.read((char*)m_pTreeStart.data(), sizeof(SizeType) * m_iTreeNumber); SizeType treeNodeSize; input.read((char*)&treeNodeSize, sizeof(SizeType)); m_pTreeRoots.resize(treeNodeSize); input.read((char*)m_pTreeRoots.data(), sizeof(BKTNode) * treeNodeSize); input.close(); std::cout << "Load BKT (" << m_iTreeNumber << "," << treeNodeSize << ") Finish!" << std::endl; return true; } template <typename T> void InitSearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space) const { for (char i = 0; i < m_iTreeNumber; i++) { const BKTNode& node = m_pTreeRoots[m_pTreeStart[i]]; if (node.childStart < 0) { p_space.m_SPTQueue.insert(COMMON::HeapCell(m_pTreeStart[i], p_index->ComputeDistance((const void*)p_query.GetTarget(), p_index->GetSample(node.centerid)))); } else { for (SizeType begin = node.childStart; begin < node.childEnd; begin++) { SizeType index = m_pTreeRoots[begin].centerid; p_space.m_SPTQueue.insert(COMMON::HeapCell(begin, p_index->ComputeDistance((const void*)p_query.GetTarget(), p_index->GetSample(index)))); } } } } template <typename T> void SearchTrees(const VectorIndex* p_index, const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space, const int p_limits) const { do { COMMON::HeapCell bcell = p_space.m_SPTQueue.pop(); const BKTNode& tnode = m_pTreeRoots[bcell.node]; if (tnode.childStart < 0) { if (!p_space.CheckAndSet(tnode.centerid)) { p_space.m_iNumberOfCheckedLeaves++; p_space.m_NGQueue.insert(COMMON::HeapCell(tnode.centerid, bcell.distance)); } if (p_space.m_iNumberOfCheckedLeaves >= p_limits) break; } else { if (!p_space.CheckAndSet(tnode.centerid)) { p_space.m_NGQueue.insert(COMMON::HeapCell(tnode.centerid, bcell.distance)); } for (SizeType begin = tnode.childStart; begin < tnode.childEnd; begin++) { SizeType index = m_pTreeRoots[begin].centerid; p_space.m_SPTQueue.insert(COMMON::HeapCell(begin, p_index->ComputeDistance((const void*)p_query.GetTarget(), p_index->GetSample(index)))); } } } while (!p_space.m_SPTQueue.empty()); } private: template <typename T> float KmeansAssign(VectorIndex* p_index, std::vector<SizeType>& indices, const SizeType first, const SizeType last, KmeansArgs<T>& args, const bool updateCenters) const { float currDist = 0; int threads = omp_get_num_threads(); float lambda = (updateCenters) ? COMMON::Utils::GetBase<T>() * COMMON::Utils::GetBase<T>() / (100.0f * (last - first)) : 0.0f; SizeType subsize = (last - first - 1) / threads + 1; #pragma omp parallel for for (int tid = 0; tid < threads; tid++) { SizeType istart = first + tid * subsize; SizeType iend = min(first + (tid + 1) * subsize, last); SizeType *inewCounts = args.newCounts + tid * m_iBKTKmeansK; float *inewCenters = args.newCenters + tid * m_iBKTKmeansK * p_index->GetFeatureDim(); SizeType * iclusterIdx = args.clusterIdx + tid * m_iBKTKmeansK; float * iclusterDist = args.clusterDist + tid * m_iBKTKmeansK; float idist = 0; for (SizeType i = istart; i < iend; i++) { int clusterid = 0; float smallestDist = MaxDist; for (int k = 0; k < m_iBKTKmeansK; k++) { float dist = p_index->ComputeDistance(p_index->GetSample(indices[i]), (const void*)(args.centers + k*p_index->GetFeatureDim())) + lambda*args.counts[k]; if (dist > -MaxDist && dist < smallestDist) { clusterid = k; smallestDist = dist; } } args.label[i] = clusterid; inewCounts[clusterid]++; idist += smallestDist; if (updateCenters) { const T* v = (const T*)p_index->GetSample(indices[i]); float* center = inewCenters + clusterid*p_index->GetFeatureDim(); for (DimensionType j = 0; j < p_index->GetFeatureDim(); j++) center[j] += v[j]; if (smallestDist > iclusterDist[clusterid]) { iclusterDist[clusterid] = smallestDist; iclusterIdx[clusterid] = indices[i]; } } else { if (smallestDist <= iclusterDist[clusterid]) { iclusterDist[clusterid] = smallestDist; iclusterIdx[clusterid] = indices[i]; } } } COMMON::Utils::atomic_float_add(&currDist, idist); } for (int i = 1; i < threads; i++) { for (int k = 0; k < m_iBKTKmeansK; k++) args.newCounts[k] += args.newCounts[i*m_iBKTKmeansK + k]; } if (updateCenters) { for (int i = 1; i < threads; i++) { float* currCenter = args.newCenters + i*m_iBKTKmeansK*p_index->GetFeatureDim(); for (size_t j = 0; j < ((size_t)m_iBKTKmeansK) * p_index->GetFeatureDim(); j++) args.newCenters[j] += currCenter[j]; for (int k = 0; k < m_iBKTKmeansK; k++) { if (args.clusterIdx[i*m_iBKTKmeansK + k] != -1 && args.clusterDist[i*m_iBKTKmeansK + k] > args.clusterDist[k]) { args.clusterDist[k] = args.clusterDist[i*m_iBKTKmeansK + k]; args.clusterIdx[k] = args.clusterIdx[i*m_iBKTKmeansK + k]; } } } int maxcluster = -1; SizeType maxCount = 0; for (int k = 0; k < m_iBKTKmeansK; k++) { if (args.newCounts[k] > maxCount && DistanceUtils::ComputeL2Distance((T*)p_index->GetSample(args.clusterIdx[k]), args.centers + k * p_index->GetFeatureDim(), p_index->GetFeatureDim()) > 1e-6) { maxcluster = k; maxCount = args.newCounts[k]; } } if (maxcluster != -1 && (args.clusterIdx[maxcluster] < 0 || args.clusterIdx[maxcluster] >= p_index->GetNumSamples())) std::cout << "first:" << first << " last:" << last << " maxcluster:" << maxcluster << "(" << args.newCounts[maxcluster] << ") Error dist:" << args.clusterDist[maxcluster] << std::endl; for (int k = 0; k < m_iBKTKmeansK; k++) { T* TCenter = args.newTCenters + k * p_index->GetFeatureDim(); if (args.newCounts[k] == 0) { if (maxcluster != -1) { //int nextid = Utils::rand_int(last, first); //while (args.label[nextid] != maxcluster) nextid = Utils::rand_int(last, first); SizeType nextid = args.clusterIdx[maxcluster]; std::memcpy(TCenter, p_index->GetSample(nextid), sizeof(T)*p_index->GetFeatureDim()); } else { std::memcpy(TCenter, args.centers + k * p_index->GetFeatureDim(), sizeof(T)*p_index->GetFeatureDim()); } } else { float* currCenters = args.newCenters + k * p_index->GetFeatureDim(); for (DimensionType j = 0; j < p_index->GetFeatureDim(); j++) currCenters[j] /= args.newCounts[k]; if (p_index->GetDistCalcMethod() == DistCalcMethod::Cosine) { COMMON::Utils::Normalize(currCenters, p_index->GetFeatureDim(), COMMON::Utils::GetBase<T>()); } for (DimensionType j = 0; j < p_index->GetFeatureDim(); j++) TCenter[j] = (T)(currCenters[j]); } } } else { for (int i = 1; i < threads; i++) { for (int k = 0; k < m_iBKTKmeansK; k++) { if (args.clusterIdx[i*m_iBKTKmeansK + k] != -1 && args.clusterDist[i*m_iBKTKmeansK + k] <= args.clusterDist[k]) { args.clusterDist[k] = args.clusterDist[i*m_iBKTKmeansK + k]; args.clusterIdx[k] = args.clusterIdx[i*m_iBKTKmeansK + k]; } } } } return currDist; } template <typename T> int KmeansClustering(VectorIndex* p_index, std::vector<SizeType>& indices, const SizeType first, const SizeType last, KmeansArgs<T>& args) const { int iterLimit = 100; SizeType batchEnd = min(first + m_iSamples, last); float currDiff, currDist, minClusterDist = MaxDist; for (int numKmeans = 0; numKmeans < 3; numKmeans++) { for (int k = 0; k < m_iBKTKmeansK; k++) { SizeType randid = COMMON::Utils::rand(last, first); std::memcpy(args.centers + k*p_index->GetFeatureDim(), p_index->GetSample(indices[randid]), sizeof(T)*p_index->GetFeatureDim()); } args.ClearCounts(); currDist = KmeansAssign(p_index, indices, first, batchEnd, args, false); if (currDist < minClusterDist) { minClusterDist = currDist; memcpy(args.newTCenters, args.centers, sizeof(T)*m_iBKTKmeansK*p_index->GetFeatureDim()); memcpy(args.counts, args.newCounts, sizeof(SizeType) * m_iBKTKmeansK); } } minClusterDist = MaxDist; int noImprovement = 0; for (int iter = 0; iter < iterLimit; iter++) { std::memcpy(args.centers, args.newTCenters, sizeof(T)*m_iBKTKmeansK*p_index->GetFeatureDim()); std::random_shuffle(indices.begin() + first, indices.begin() + last); args.ClearCenters(); args.ClearCounts(); args.ClearDists(-MaxDist); currDist = KmeansAssign(p_index, indices, first, batchEnd, args, true); memcpy(args.counts, args.newCounts, sizeof(SizeType) * m_iBKTKmeansK); currDiff = 0; for (int k = 0; k < m_iBKTKmeansK; k++) { currDiff += p_index->ComputeDistance((const void*)(args.centers + k*p_index->GetFeatureDim()), (const void*)(args.newTCenters + k*p_index->GetFeatureDim())); } if (currDist < minClusterDist) { noImprovement = 0; minClusterDist = currDist; } else { noImprovement++; } if (currDiff < 1e-3 || noImprovement >= 5) break; } args.ClearCounts(); args.ClearDists(MaxDist); currDist = KmeansAssign(p_index, indices, first, last, args, false); memcpy(args.counts, args.newCounts, sizeof(SizeType) * m_iBKTKmeansK); int numClusters = 0; for (int i = 0; i < m_iBKTKmeansK; i++) if (args.counts[i] > 0) numClusters++; if (numClusters <= 1) { //if (last - first > 1) std::cout << "large cluster:" << last - first << " dist:" << currDist << std::endl; return numClusters; } args.Shuffle(indices, first, last); return numClusters; } private: std::vector<SizeType> m_pTreeStart; std::vector<BKTNode> m_pTreeRoots; std::unordered_map<SizeType, SizeType> m_pSampleCenterMap; public: int m_iTreeNumber, m_iBKTKmeansK, m_iBKTLeafSize, m_iSamples; }; } } #endif
backprop.c
/* ****************************************************************** * HISTORY * 15-Oct-94 Jeff Shufelt (js), Carnegie Mellon University * Prepared for 15-681, Fall 1994. * Modified by Shuai Che ****************************************************************** */ #include <omp.h> #include <stdio.h> #include <stdlib.h> #include "backprop.h" #include <math.h> #include <string.h> //#define OPEN #define ABS(x) (((x) > 0.0) ? (x) : (-(x))) #define fastcopy(to,from,len)\ {\ register char *_to,*_from;\ register long _i,_l;\ _to = (char *)(to);\ _from = (char *)(from);\ _l = (len);\ for (_i = 0; _i < _l; _i++) *_to++ = *_from++;\ } /*** Return random number between 0.0 and 1.0 ***/ float drnd() { return ((float) rand() / (float) BIGRND); } /*** Return random number between -1.0 and 1.0 ***/ float dpn1() { return ((drnd() * 2.0) - 1.0); } /*** The squashing function. Currently, it's a sigmoid. ***/ float squash(float x) { float m; //x = -x; //m = 1 + x + x*x/2 + x*x*x/6 + x*x*x*x/24 + x*x*x*x*x/120; //return(1.0 / (1.0 + m)); return (1.0 / (1.0 + exp(-x))); } /*** Allocate 1d array of floats ***/ float *alloc_1d_dbl(long n) { float *new; new = (float *) malloc ((n * sizeof (float))); if (new == NULL) { printf("ALLOC_1D_DBL: Couldn't allocate array of floats\n"); return (NULL); } return (new); } /*** Allocate 2d array of floats ***/ float *alloc_2d_dbl(long m, long n) { long i; float *new; new = (float *) malloc ((m * n * sizeof(float))); if (new == NULL) { printf("ALLOC_2D_DBL: Couldn't allocate array of dbl ptrs\n"); return (NULL); } return (new); } void bpnn_randomize_weights(float *w, long m, long n) { long i, j; for (i = 0; i <= m; i++) { for (j = 0; j <= n; j++) { w[i * (n + 1) + j] = (float) rand()/RAND_MAX; // w[i][j] = dpn1(); } } } void bpnn_randomize_row(float *w, long m) { long i; for (i = 0; i <= m; i++) { //w[i] = (float) rand()/RAND_MAX; w[i] = 0.1; } } void bpnn_zero_weights(float *w, long m, long n) { memset(w, 0, sizeof(float) * (m + 1) * (n + 1)); } void bpnn_initialize(long seed) { printf("Random number generator seed: %d\n", seed); srand(seed); } BPNN *bpnn_internal_create(long n_in, long n_hidden, long n_out) { BPNN *newnet; newnet = (BPNN *)malloc(sizeof(BPNN)); if (newnet == NULL) { printf("BPNN_CREATE: Couldn't allocate neural network\n"); return (NULL); } newnet->input_n = n_in; newnet->hidden_n = n_hidden; newnet->output_n = n_out; newnet->input_units = alloc_1d_dbl(n_in + 1); newnet->hidden_units = alloc_1d_dbl(n_hidden + 1); newnet->output_units = alloc_1d_dbl(n_out + 1); newnet->hidden_delta = alloc_1d_dbl(n_hidden + 1); newnet->output_delta = alloc_1d_dbl(n_out + 1); newnet->target = alloc_1d_dbl(n_out + 1); newnet->input_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); newnet->input_prev_weights = alloc_2d_dbl(n_in + 1, n_hidden + 1); newnet->hidden_prev_weights = alloc_2d_dbl(n_hidden + 1, n_out + 1); return (newnet); } void bpnn_free(BPNN *net) { long n1, n2, i; n1 = net->input_n; n2 = net->hidden_n; free((char *) net->input_units); free((char *) net->hidden_units); free((char *) net->output_units); free((char *) net->hidden_delta); free((char *) net->output_delta); free((char *) net->target); free((char *) net->input_weights); free((char *) net->input_prev_weights); free((char *) net->hidden_weights); free((char *) net->hidden_prev_weights); free((char *) net); } /*** Creates a new fully-connected network from scratch, with the given numbers of input, hidden, and output units. Threshold units are automatically included. All weights are randomly initialized. Space is also allocated for temporary storage (momentum weights, error computations, etc). ***/ BPNN *bpnn_create(long n_in, long n_hidden, long n_out) { BPNN *newnet; newnet = bpnn_internal_create(n_in, n_hidden, n_out); bpnn_randomize_weights(newnet->input_weights, n_in, n_hidden); bpnn_randomize_weights(newnet->hidden_weights, n_hidden, n_out); bpnn_zero_weights(newnet->input_prev_weights, n_in, n_hidden); bpnn_zero_weights(newnet->hidden_prev_weights, n_hidden, n_out); bpnn_randomize_row(newnet->target, n_out); return (newnet); } void bpnn_layerforward(float *l1, float *l2, float *conn, long n1, long n2) { float sum; long j, k; /*** Set up thresholding unit ***/ l1[0] = 1.0; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for shared(conn, n1, n2, l1) private(k, j) reduction(+: sum) schedule(static) #endif /*** For each unit in second layer ***/ for (j = 1; j <= n2; j++) { /*** Compute weighted sum of its inputs ***/ sum = 0.0; for (k = 0; k <= n1; k++) { sum += conn[k * (n2 + 1) + j] * l1[k]; } l2[j] = squash(sum); } } //extern "C" void bpnn_output_error(float *delta, float *target, float *output, long nj, float *err) { long j; float o, t, errsum; errsum = 0.0; for (j = 1; j <= nj; j++) { o = output[j]; t = target[j]; delta[j] = o * (1.0 - o) * (t - o); errsum += ABS(delta[j]); } *err = errsum; } void bpnn_hidden_error(float *delta_h, long nh, float *delta_o, long no, float *who, float *hidden, float *err) { long j, k; float h, sum, errsum; errsum = 0.0; for (j = 1; j <= nh; j++) { h = hidden[j]; sum = 0.0; for (k = 1; k <= no; k++) { sum += delta_o[k] * who[j * (no + 1) + k]; } delta_h[j] = h * (1.0 - h) * sum; errsum += ABS(delta_h[j]); } *err = errsum; } void bpnn_adjust_weights(float *delta, long ndelta, float *ly, long nly, float *w, float *oldw) { float new_dw; long k, j; ly[0] = 1.0; //eta = 0.3; //momentum = 0.3; #ifdef OPEN omp_set_num_threads(NUM_THREAD); #pragma omp parallel for \ shared(oldw, w, delta) \ private(j, k, new_dw) \ firstprivate(ndelta, nly, momentum) #endif for (j = 1; j <= ndelta; j++) { for (k = 0; k <= nly; k++) { new_dw = ((ETA * delta[j] * ly[k]) + (MOMENTUM * oldw[k * (ndelta + 1) + j])); w[k * (ndelta + 1) + j] += new_dw; oldw[k * (ndelta + 1) + j] = new_dw; } } } void bpnn_feedforward(BPNN *net) { long in, hid, out; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); } void bpnn_train(BPNN *net, float *eo, float *eh) { long in, hid, out; float out_err, hid_err; in = net->input_n; hid = net->hidden_n; out = net->output_n; /*** Feed forward input activations. ***/ bpnn_layerforward(net->input_units, net->hidden_units, net->input_weights, in, hid); bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out); /*** Compute error on output and hidden units. ***/ bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err); bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err); *eo = out_err; *eh = hid_err; /*** Adjust input and hidden weights. ***/ bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights); bpnn_adjust_weights(net->hidden_delta, hid, net->input_units, in, net->input_weights, net->input_prev_weights); }
Par-07-ParallelOmpForNestedOmpParallelFor.c
int main(int argc, char **argv) { int a[4] = {1,2,3,4}; int b[4] = {0, 0, 0, 0}; #pragma omp parallel { #pragma omp for for (int i = 0; i < 4; ++i) { a[i] = 3*a[i]; #pragma omp parallel for for (int j = 0; j < 4; ++j) { b[j] += a[i]; } } } return 0; }
GB_binop__isle_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__isle_fp32 // A.*B function (eWiseMult): GB_AemultB__isle_fp32 // A*D function (colscale): GB_AxD__isle_fp32 // D*A function (rowscale): GB_DxB__isle_fp32 // C+=B function (dense accum): GB_Cdense_accumB__isle_fp32 // C+=b function (dense accum): GB_Cdense_accumb__isle_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__isle_fp32 // C=scalar+B GB_bind1st__isle_fp32 // C=scalar+B' GB_bind1st_tran__isle_fp32 // C=A+scalar GB_bind2nd__isle_fp32 // C=A'+scalar GB_bind2nd_tran__isle_fp32 // C type: float // A type: float // B,b type: float // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_FP32 || GxB_NO_ISLE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__isle_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__isle_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__isle_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__isle_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__isle_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__isle_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__isle_fp32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__isle_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__isle_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__isle_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__isle_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pubkeylp.h
/** * @file pubkeylp.h -- Public key type for lattice crypto operations. * @author TPOC: contact@palisade-crypto.org * * @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT) * All rights reserved. * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef LBCRYPTO_CRYPTO_PUBKEYLP_H #define LBCRYPTO_CRYPTO_PUBKEYLP_H //Includes Section #include <vector> #include <iomanip> #include "lattice/elemparams.h" #include "lattice/ilparams.h" #include "lattice/ildcrtparams.h" #include "lattice/ilelement.h" #include "utils/inttypes.h" #include "utils/hashutil.h" #include "math/distrgen.h" #include "encoding/encodingparams.h" /** * @namespace lbcrypto * The namespace of lbcrypto */ namespace lbcrypto { //forward declarations, used to resolve circular header dependencies template<typename Element> class CiphertextImpl; template<typename Element> class RationalCiphertext; template<typename Element> class LPCryptoParameters; template<typename Element> class LPCryptoParametersBGV; template<typename Element> class LPCryptoParametersBFV; template<typename Element> class LPCryptoParametersStehleSteinfeld; template<typename Element> class CryptoObject; struct EncryptResult { explicit EncryptResult() : isValid(false), numBytesEncrypted(0) {} explicit EncryptResult(size_t len) : isValid(true), numBytesEncrypted(len) {} bool isValid; /**< whether the encryption was successful */ usint numBytesEncrypted; /**< count of the number of plaintext bytes that were encrypted */ }; /** * @brief Decryption result. This represents whether the decryption of a cipheretext was performed correctly. * * This is intended to eventually incorporate information about the amount of padding in a decoded ciphertext, * to ensure that the correct amount of padding is stripped away. * It is intended to provided a very simple kind of checksum eventually. * This notion of a decoding output is inherited from the crypto++ library. * It is also intended to be used in a recover and restart robust functionality if not all ciphertext is recieved over a lossy channel, so that if all information is eventually recieved, decoding/decryption can be performed eventually. * This is intended to be returned with the output of a decryption operation. */ struct DecryptResult { /** * Constructor that initializes all message lengths to 0. */ explicit DecryptResult() : isValid(false), messageLength(0) {} /** * Constructor that initializes all message lengths. * @param len the new length. */ explicit DecryptResult(size_t len) : isValid(true), messageLength(len) {} bool isValid; /**< whether the decryption was successful */ usint messageLength; /**< the length of the decrypted plaintext message */ }; /** * @brief Abstract interface class for LP Keys * * @tparam Element a ring element. */ template <class Element> class LPKey : public CryptoObject<Element>, public Serializable { public: LPKey(CryptoContext<Element> cc, const string& id = "") : CryptoObject<Element>(cc, id) {} LPKey(shared_ptr<CryptoObject<Element>> co) : CryptoObject<Element>(co) {} virtual ~LPKey() {} template <class Archive> void save( Archive & ar, std::uint32_t const version ) const { ar( ::cereal::base_class<CryptoObject<Element>>( this ) ); } template <class Archive> void load( Archive & ar, std::uint32_t const version ) { ar( ::cereal::base_class<CryptoObject<Element>>( this ) ); } }; template<typename Element> class LPPublicKeyImpl; template<typename Element> using LPPublicKey = shared_ptr<LPPublicKeyImpl<Element>>; /** * @brief Class for LP public keys * @tparam Element a ring element. */ template <typename Element> class LPPublicKeyImpl : public LPKey<Element> { public: /** * Basic constructor * * @param cc - CryptoContext * @param id - key identifier */ LPPublicKeyImpl(CryptoContext<Element> cc = 0, const string& id = "") : LPKey<Element>(cc, id) {} /** * Copy constructor * *@param &rhs LPPublicKeyImpl to copy from */ explicit LPPublicKeyImpl(const LPPublicKeyImpl<Element> &rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) { m_h = rhs.m_h; } /** * Move constructor * *@param &rhs LPPublicKeyImpl to move from */ explicit LPPublicKeyImpl(LPPublicKeyImpl<Element> &&rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) { m_h = std::move(rhs.m_h); } operator bool() const { return bool(this->context) && m_h.size() != 0; } /** * Assignment Operator. * * @param &rhs LPPublicKeyImpl to copy from */ const LPPublicKeyImpl<Element>& operator=(const LPPublicKeyImpl<Element> &rhs) { CryptoObject<Element>::operator=(rhs); this->m_h = rhs.m_h; return *this; } /** * Move Assignment Operator. * * @param &rhs LPPublicKeyImpl to copy from */ const LPPublicKeyImpl<Element>& operator=(LPPublicKeyImpl<Element> &&rhs) { CryptoObject<Element>::operator=(rhs); m_h = std::move(rhs.m_h); return *this; } //@Get Properties /** * Gets the computed public key * @return the public key element. */ const std::vector<Element> &GetPublicElements() const { return this->m_h; } //@Set Properties /** * Sets the public key vector of Element. * @param &element is the public key Element vector to be copied. */ void SetPublicElements(const std::vector<Element> &element) { m_h = element; } /** * Sets the public key vector of Element. * @param &&element is the public key Element vector to be moved. */ void SetPublicElements(std::vector<Element> &&element) { m_h = std::move(element); } /** * Sets the public key Element at index idx. * @param &element is the public key Element to be copied. */ void SetPublicElementAtIndex(usint idx, const Element &element) { m_h.insert(m_h.begin() + idx, element); } /** * Sets the public key Element at index idx. * @param &&element is the public key Element to be moved. */ void SetPublicElementAtIndex(usint idx, Element &&element) { m_h.insert(m_h.begin() + idx, std::move(element)); } bool operator==(const LPPublicKeyImpl& other) const { if( !CryptoObject<Element>::operator ==(other) ) { return false; } if( m_h.size() != other.m_h.size() ) { return false; } for( size_t i = 0; i < m_h.size(); i++ ) { if( m_h[i] != other.m_h[i] ) { return false; } } return true; } bool operator!=(const LPPublicKeyImpl& other) const { return ! (*this == other); } template <class Archive> void save( Archive & ar, std::uint32_t const version ) const { ar( ::cereal::base_class<LPKey<Element>>( this ) ); ar( ::cereal::make_nvp("h",m_h) ); } template <class Archive> void load( Archive & ar, std::uint32_t const version ) { if( version > SerializedVersion() ) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar( ::cereal::base_class<LPKey<Element>>( this ) ); ar( ::cereal::make_nvp("h",m_h) ); } std::string SerializedObjectName() const { return "PublicKey"; } static uint32_t SerializedVersion() { return 1; } private: std::vector<Element> m_h; }; template<typename Element> class LPEvalKeyImpl; template<typename Element> using LPEvalKey = shared_ptr<LPEvalKeyImpl<Element>>; /** * @brief Abstract interface for LP evaluation/proxy keys * @tparam Element a ring element. */ template <class Element> class LPEvalKeyImpl : public LPKey<Element> { public: /** * Basic constructor for setting crypto params * * @param &cryptoParams is the reference to cryptoParams */ LPEvalKeyImpl(CryptoContext<Element> cc = 0) : LPKey<Element>(cc) {} virtual ~LPEvalKeyImpl() {} /** * Setter function to store Relinearization Element Vector A. * Throws exception, to be overridden by derived class. * * @param &a is the Element vector to be copied. */ virtual void SetAVector(const std::vector<Element> &a) { throw std::runtime_error("SetAVector copy operation not supported"); } /** * Setter function to store Relinearization Element Vector A. * Throws exception, to be overridden by derived class. * * @param &&a is the Element vector to be moved. */ virtual void SetAVector(std::vector<Element> &&a) { throw std::runtime_error("SetAVector move operation not supported"); } /** * Getter function to access Relinearization Element Vector A. * Throws exception, to be overridden by derived class. * * @return Element vector A. */ virtual const std::vector<Element> &GetAVector() const { throw std::runtime_error("GetAVector operation not supported"); } /** * Setter function to store Relinearization Element Vector B. * Throws exception, to be overridden by derived class. * * @param &b is the Element vector to be copied. */ virtual void SetBVector(const std::vector<Element> &b) { throw std::runtime_error("SetBVector copy operation not supported"); } /** * Setter function to store Relinearization Element Vector B. * Throws exception, to be overridden by derived class. * * @param &&b is the Element vector to be moved. */ virtual void SetBVector(std::vector<Element> &&b) { throw std::runtime_error("SetBVector move operation not supported"); } /** * Getter function to access Relinearization Element Vector B. * Throws exception, to be overridden by derived class. * * @return Element vector B. */ virtual const std::vector<Element> &GetBVector() const { throw std::runtime_error("GetBVector operation not supported"); } /** * Setter function to store key switch Element. * Throws exception, to be overridden by derived class. * * @param &a is the Element to be copied. */ virtual void SetA(const Element &a) { throw std::runtime_error("SetA copy operation not supported"); } /** * Setter function to store key switch Element. * Throws exception, to be overridden by derived class. * * @param &&a is the Element to be moved. */ virtual void SetA(Element &&a) { throw std::runtime_error("SetA move operation not supported"); } /** * Getter function to access key switch Element. * Throws exception, to be overridden by derived class. * * @return Element. */ virtual const Element &GetA() const { throw std::runtime_error("GetA operation not supported"); } friend bool operator==(const LPEvalKeyImpl& a, const LPEvalKeyImpl& b) { return a.key_compare(b); } friend bool operator!=(const LPEvalKeyImpl& a, LPEvalKeyImpl& b) { return ! (a == b); } virtual bool key_compare(const LPEvalKeyImpl& other) const { return false; } template <class Archive> void save( Archive & ar, std::uint32_t const version ) const { ar( ::cereal::base_class<LPKey<Element>>( this ) ); } template <class Archive> void load( Archive & ar, std::uint32_t const version ) { ar( ::cereal::base_class<LPKey<Element>>( this ) ); } std::string SerializedObjectName() const { return "EvalKey"; } }; template<typename Element> class LPEvalKeyRelinImpl; template<typename Element> using LPEvalKeyRelin = shared_ptr<LPEvalKeyRelinImpl<Element>>; /** * @brief Concrete class for Relinearization keys of RLWE scheme * @tparam Element a ring element. */ template <class Element> class LPEvalKeyRelinImpl : public LPEvalKeyImpl<Element> { public: /** * Basic constructor for setting crypto params * * @param &cryptoParams is the reference to cryptoParams */ LPEvalKeyRelinImpl(CryptoContext<Element> cc = 0) : LPEvalKeyImpl<Element>(cc) {} virtual ~LPEvalKeyRelinImpl() {} /** * Copy constructor * *@param &rhs key to copy from */ explicit LPEvalKeyRelinImpl(const LPEvalKeyRelinImpl<Element> &rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) { m_rKey = rhs.m_rKey; } /** * Move constructor * *@param &rhs key to move from */ explicit LPEvalKeyRelinImpl(LPEvalKeyRelinImpl<Element> &&rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) { m_rKey = std::move(rhs.m_rKey); } operator bool() const { return bool(this->context) && m_rKey.size() != 0; } /** * Assignment Operator. * * @param &rhs key to copy from */ const LPEvalKeyRelinImpl<Element>& operator=(const LPEvalKeyRelinImpl<Element> &rhs) { this->context = rhs.context; this->m_rKey = rhs.m_rKey; return *this; } /** * Move Assignment Operator. * * @param &rhs key to move from */ const LPEvalKeyRelinImpl<Element>& operator=(LPEvalKeyRelinImpl<Element> &&rhs) { this->context = rhs.context; rhs.context = 0; m_rKey = std::move(rhs.m_rKey); return *this; } /** * Setter function to store Relinearization Element Vector A. * Overrides base class implementation. * * @param &a is the Element vector to be copied. */ virtual void SetAVector(const std::vector<Element> &a) { m_rKey.insert(m_rKey.begin() + 0, a); } /** * Setter function to store Relinearization Element Vector A. * Overrides base class implementation. * * @param &&a is the Element vector to be moved. */ virtual void SetAVector(std::vector<Element> &&a) { m_rKey.insert(m_rKey.begin() + 0, std::move(a)); } /** * Getter function to access Relinearization Element Vector A. * Overrides base class implementation. * * @return Element vector A. */ virtual const std::vector<Element> &GetAVector() const { return m_rKey.at(0); } /** * Setter function to store Relinearization Element Vector B. * Overrides base class implementation. * * @param &b is the Element vector to be copied. */ virtual void SetBVector(const std::vector<Element> &b) { m_rKey.insert(m_rKey.begin() + 1, b); } /** * Setter function to store Relinearization Element Vector B. * Overrides base class implementation. * * @param &&b is the Element vector to be moved. */ virtual void SetBVector(std::vector<Element> &&b) { m_rKey.insert(m_rKey.begin() + 1, std::move(b)); } /** * Getter function to access Relinearization Element Vector B. * Overrides base class implementation. * * @return Element vector B. */ virtual const std::vector<Element> &GetBVector() const { return m_rKey.at(1); } bool key_compare(const LPEvalKeyImpl<Element>& other) const { const LPEvalKeyRelinImpl<Element> &oth = dynamic_cast<const LPEvalKeyRelinImpl<Element> &>(other); if( !CryptoObject<Element>::operator==(other) ) return false; if( this->m_rKey.size() != oth.m_rKey.size() ) return false; for( size_t i=0; i<this->m_rKey.size(); i++ ) { if( this->m_rKey[i].size() != oth.m_rKey[i].size() ) return false; for( size_t j=0; j<this->m_rKey[i].size(); j++ ) { if( this->m_rKey[i][j] != oth.m_rKey[i][j] ) return false; } } return true; } template <class Archive> void save( Archive & ar, std::uint32_t const version ) const { ar( ::cereal::base_class<LPEvalKeyImpl<Element>>( this ) ); ar( ::cereal::make_nvp("k", m_rKey) ); } template <class Archive> void load( Archive & ar, std::uint32_t const version ) { if( version > SerializedVersion() ) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar( ::cereal::base_class<LPEvalKeyImpl<Element>>( this ) ); ar( ::cereal::make_nvp("k", m_rKey) ); } std::string SerializedObjectName() const { return "EvalKeyRelin"; } static uint32_t SerializedVersion() { return 1; } private: //private member to store vector of vector of Element. std::vector< std::vector<Element> > m_rKey; }; template<typename Element> class LPEvalKeyNTRURelinImpl; template<typename Element> using LPEvalKeyNTRURelin = shared_ptr<LPEvalKeyNTRURelinImpl<Element>>; /** * @brief Evaluation Relinearization keys for NTRU scheme. * @tparam Element a ring element. */ template <class Element> class LPEvalKeyNTRURelinImpl : public LPEvalKeyImpl<Element> { public: /** * Basic constructor for setting crypto params * * @param &cryptoParams is the reference to cryptoParams */ LPEvalKeyNTRURelinImpl(CryptoContext<Element> cc = 0) : LPEvalKeyImpl<Element>(cc) {} virtual ~LPEvalKeyNTRURelinImpl() {} /** * Copy constructor * *@param &rhs key to copy from */ explicit LPEvalKeyNTRURelinImpl(const LPEvalKeyNTRURelinImpl<Element> &rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) { m_rKey = rhs.m_rKey; } /** * Move constructor * *@param &rhs key to move from */ explicit LPEvalKeyNTRURelinImpl(LPEvalKeyNTRURelinImpl<Element> &&rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) { m_rKey = std::move(rhs.m_rKey); } /** * Assignment Operator. * * @param &rhs key to copy from */ const LPEvalKeyNTRURelinImpl<Element>& operator=(const LPEvalKeyNTRURelinImpl<Element> &rhs) { this->context = rhs.context; this->m_rKey = rhs.m_rKey; return *this; } /** * Move Assignment Operator. * * @param &rhs key to move from */ const LPEvalKeyNTRURelinImpl<Element>& operator=(LPEvalKeyNTRURelinImpl<Element> &&rhs) { this->context = rhs.context; rhs.context = 0; m_rKey = std::move(rhs.m_rKey); return *this; } /** * Setter function to store Relinearization Element Vector A. * Overrides base class implementation. * * @param &a is the Element vector to be copied. */ virtual void SetAVector(const std::vector<Element> &a) { for (usint i = 0; i < a.size(); i++) { m_rKey.insert(m_rKey.begin() + i, a.at(i)); } } /** * Setter function to store Relinearization Element Vector A. * Overrides base class implementation. * * @param &&a is the Element vector to be moved. */ virtual void SetAVector(std::vector<Element> &&a) { m_rKey = std::move(a); } /** * Getter function to access Relinearization Element Vector A. * Overrides base class implementation. * * @return Element vector A. */ virtual const std::vector<Element> &GetAVector() const { return m_rKey; } bool key_compare(const LPEvalKeyImpl<Element>& other) const { const LPEvalKeyNTRURelinImpl<Element> &oth = dynamic_cast<const LPEvalKeyNTRURelinImpl<Element> &>(other); if( !CryptoObject<Element>::operator ==(other) ) return false; if( this->m_rKey.size() != oth.m_rKey.size() ) return false; for( size_t i=0; i<this->m_rKey.size(); i++ ) { if( this->m_rKey[i] != oth.m_rKey[i] ) return false; } return true; } template <class Archive> void save( Archive & ar, std::uint32_t const version ) const { ar( ::cereal::base_class<LPEvalKeyImpl<Element>>( this ) ); ar( ::cereal::make_nvp("k", m_rKey) ); } template <class Archive> void load( Archive & ar, std::uint32_t const version ) { if( version > SerializedVersion() ) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar( ::cereal::base_class<LPEvalKeyImpl<Element>>( this ) ); ar( ::cereal::make_nvp("k", m_rKey) ); } std::string SerializedObjectName() const { return "EvalKeyNTRURelin"; } static uint32_t SerializedVersion() { return 1; } private: //private member to store vector of Element. std::vector<Element> m_rKey; }; template<typename Element> class LPEvalKeyNTRUImpl; template<typename Element> using LPEvalKeyNTRU = shared_ptr<LPEvalKeyNTRUImpl<Element>>; /** * @brief Concrete class for facilitating NTRU key switch. * @tparam Element a ring element. */ template <class Element> class LPEvalKeyNTRUImpl : public LPEvalKeyImpl<Element> { public: /** * Basic constructor for setting crypto params * * @param &cryptoParams is the reference to cryptoParams */ LPEvalKeyNTRUImpl(CryptoContext<Element> cc = 0) : LPEvalKeyImpl<Element>(cc) {} virtual ~LPEvalKeyNTRUImpl() {} /** * Copy constructor * *@param &rhs key to copy from */ explicit LPEvalKeyNTRUImpl(const LPEvalKeyNTRUImpl<Element> &rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) { m_Key = rhs.m_Key; } /** * Move constructor * *@param &rhs key to move from */ explicit LPEvalKeyNTRUImpl(LPEvalKeyNTRUImpl<Element> &&rhs) : LPEvalKeyImpl<Element>(rhs.GetCryptoContext()) { m_Key = std::move(rhs.m_Key); } /** * Assignment Operator. * * @param &rhs key to copy from */ const LPEvalKeyNTRUImpl<Element>& operator=(const LPEvalKeyNTRUImpl<Element> &rhs) { this->context = rhs.context; this->m_Key = rhs.m_Key; return *this; } /** * Move Assignment Operator. * * @param &rhs key to move from */ const LPEvalKeyNTRUImpl<Element>& operator=(LPEvalKeyNTRUImpl<Element> &&rhs) { this->context = rhs.context; rhs.context = 0; m_Key = std::move(rhs.m_Key); return *this; } /** * Setter function to store NTRU key switch element. * Function copies the key. * Overrides the virtual function from base class LPEvalKeyImpl. * * @param &a is the key switch element to be copied. */ virtual void SetA(const Element &a) { m_Key = a; } /** * Setter function to store NTRU key switch Element. * Function moves the key. * Overrides the virtual function from base class LPEvalKeyImpl. * * @param &&a is the key switch Element to be moved. */ virtual void SetA(Element &&a) { m_Key = std::move(a); } /** * Getter function to access NTRU key switch Element. * Overrides the virtual function from base class LPEvalKeyImpl. * * @return NTRU key switch Element. */ virtual const Element& GetA() const { return m_Key; } bool key_compare(const LPEvalKeyImpl<Element>& other) const { const LPEvalKeyNTRUImpl<Element> &oth = dynamic_cast<const LPEvalKeyNTRUImpl<Element> &>(other); if( !CryptoObject<Element>::operator ==(other) ) return false; if( this->m_Key != oth.m_Key ) return false; return true; } template <class Archive> void save( Archive & ar, std::uint32_t const version ) const { ar( ::cereal::base_class<LPEvalKeyImpl<Element>>( this ) ); ar( ::cereal::make_nvp("k", m_Key) ); } template <class Archive> void load( Archive & ar, std::uint32_t const version ) { if( version > SerializedVersion() ) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar( ::cereal::base_class<LPEvalKeyImpl<Element>>( this ) ); ar( ::cereal::make_nvp("k", m_Key) ); } std::string SerializedObjectName() const { return "EvalKeyNTRU"; } static uint32_t SerializedVersion() { return 1; } private: /** * private member Element to store key. */ Element m_Key; }; template<typename Element> class LPPrivateKeyImpl; template<typename Element> using LPPrivateKey = shared_ptr<LPPrivateKeyImpl<Element>>; /** * @brief Class fpr LP Private keys * @tparam Element a ring element. */ template <class Element> class LPPrivateKeyImpl : public LPKey<Element> { public: /** * Construct in context */ LPPrivateKeyImpl(CryptoContext<Element> cc = 0) : LPKey<Element>(cc, GenerateUniqueKeyID()) {} /** * Copy constructor *@param &rhs the LPPrivateKeyImpl to copy from */ explicit LPPrivateKeyImpl(const LPPrivateKeyImpl<Element> &rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) { this->m_sk = rhs.m_sk; } /** * Move constructor *@param &rhs the LPPrivateKeyImpl to move from */ explicit LPPrivateKeyImpl(LPPrivateKeyImpl<Element> &&rhs) : LPKey<Element>(rhs.GetCryptoContext(), rhs.GetKeyTag()) { this->m_sk = std::move(rhs.m_sk); } operator bool() const { return bool(this->context); } /** * Assignment Operator. * * @param &rhs LPPrivateKeyto assign from. * @return the resulting LPPrivateKeyImpl */ const LPPrivateKeyImpl<Element>& operator=(const LPPrivateKeyImpl<Element> &rhs) { CryptoObject<Element>::operator=(rhs); this->m_sk = rhs.m_sk; return *this; } /** * Move Assignment Operator. * * @param &rhs LPPrivateKeyImpl to assign from. * @return the resulting LPPrivateKeyImpl */ const LPPrivateKeyImpl<Element>& operator=(LPPrivateKeyImpl<Element> &&rhs) { CryptoObject<Element>::operator=(rhs); this->m_sk = std::move(rhs.m_sk); return *this; } /** * Implementation of the Get accessor for private element. * @return the private element. */ const Element & GetPrivateElement() const { return m_sk; } /** * Set accessor for private element. * @private &x private element to set to. */ void SetPrivateElement(const Element &x) { m_sk = x; } /** * Set accessor for private element. * @private &x private element to set to. */ void SetPrivateElement(Element &&x) { m_sk = std::move(x); } bool operator==(const LPPrivateKeyImpl& other) const { return CryptoObject<Element>::operator ==(other) && m_sk == other.m_sk; } bool operator!=(const LPPrivateKeyImpl& other) const { return ! (*this == other); } template <class Archive> void save( Archive & ar, std::uint32_t const version ) const { ar( ::cereal::base_class<LPKey<Element>>( this ) ); ar( ::cereal::make_nvp("s",m_sk) ); } template <class Archive> void load( Archive & ar, std::uint32_t const version ) { if( version > SerializedVersion() ) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar( ::cereal::base_class<LPKey<Element>>( this ) ); ar( ::cereal::make_nvp("s",m_sk) ); } std::string SerializedObjectName() const { return "PrivateKey"; } static uint32_t SerializedVersion() { return 1; } private: static const size_t intsInID = 128 / (sizeof(uint32_t) * 8); static string GenerateUniqueKeyID() { std::uniform_int_distribution<uint32_t> distribution(0, std::numeric_limits<uint32_t>::max()); std::stringstream s; s.fill('0'); s << std::hex; for( size_t i = 0; i < intsInID; i++ ) s << std::setw(8) << distribution(PseudoRandomNumberGenerator::GetPRNG()); return s.str(); } Element m_sk; }; template <class Element> class LPKeyPair { public: LPPublicKey<Element> publicKey; LPPrivateKey<Element> secretKey; LPKeyPair(LPPublicKeyImpl<Element>* a=0, LPPrivateKeyImpl<Element>* b=0): publicKey(a), secretKey(b) {} bool good() { return publicKey && secretKey; } }; /** * @brief Abstract interface for parameter generation algorithm * @tparam Element a ring element. */ template <class Element> class LPParameterGenerationAlgorithm { public: virtual ~LPParameterGenerationAlgorithm() {} /** * Method for computing all derived parameters based on chosen primitive parameters * * @param *cryptoParams the crypto parameters object to be populated with parameters. * @param evalAddCount number of EvalAdds assuming no EvalMult and KeySwitch operations are performed. * @param evalMultCount number of EvalMults assuming no EvalAdd and KeySwitch operations are performed. * @param keySwitchCount number of KeySwitch operations assuming no EvalAdd and EvalMult operations are performed. * @param dcrtBits number of bits in each CRT modulus* */ virtual bool ParamsGen(shared_ptr<LPCryptoParameters<Element>> cryptoParams, int32_t evalAddCount = 0, int32_t evalMultCount = 0, int32_t keySwitchCount = 0, size_t dcrtBits = 0) const = 0; }; /** * @brief Abstract interface for encryption algorithm * @tparam Element a ring element. */ template <class Element> class LPEncryptionAlgorithm { public: virtual ~LPEncryptionAlgorithm() {} /** * Method for encrypting plaintext using LBC * * @param&publicKey public key used for encryption. * @param plaintext copy of the plaintext element. NOTE a copy is passed! That is NOT an error! * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @param *ciphertext ciphertext which results from encryption. */ virtual Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey, Element plaintext) const = 0; /** * Method for encrypting plaintex using LBC * * @param privateKey private key used for encryption. * @param plaintext copy of the plaintext input. NOTE a copy is passed! That is NOT an error! * @param doEncryption encrypts if true, embeds (encodes) the plaintext into cryptocontext if false * @param *ciphertext ciphertext which results from encryption. */ virtual Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey, Element plaintext) const = 0; /** * Method for decrypting plaintext using LBC * * @param &privateKey private key used for decryption. * @param &ciphertext ciphertext id decrypted. * @param *plaintext the plaintext output. * @return the decoding result. */ virtual DecryptResult Decrypt(const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext, NativePoly *plaintext) const = 0; /** * Function to generate public and private keys * * @param &publicKey private key used for decryption. * @param &privateKey private key used for decryption. * @return function ran correctly. */ virtual LPKeyPair<Element> KeyGen(CryptoContext<Element> cc, bool makeSparse=false) = 0; }; /** * @brief Abstract interface for Leveled SHE operations * @tparam Element a ring element. */ template <class Element> class LPLeveledSHEAlgorithm { public: virtual ~LPLeveledSHEAlgorithm() {} /** * Method for Modulus Reduction. * * @param &cipherText Ciphertext to perform mod reduce on. */ virtual Ciphertext<Element> ModReduce(ConstCiphertext<Element> cipherText) const = 0; /** * Method for Ring Reduction. * * @param &cipherText Ciphertext to perform ring reduce on. * @param &privateKey Private key used to encrypt the first argument. */ virtual Ciphertext<Element> RingReduce(ConstCiphertext<Element> cipherText, const LPEvalKey<Element> keySwitchHint) const = 0; /** * Method for Composed EvalMult * * @param &cipherText1 ciphertext1, first input ciphertext to perform multiplication on. * @param &cipherText2 cipherText2, second input ciphertext to perform multiplication on. * @param &quadKeySwitchHint is for resultant quadratic secret key after multiplication to the secret key of the particular level. * @param &cipherTextResult is the resulting ciphertext that can be decrypted with the secret key of the particular level. */ virtual Ciphertext<Element> ComposedEvalMult( ConstCiphertext<Element> cipherText1, ConstCiphertext<Element> cipherText2, const LPEvalKey<Element> quadKeySwitchHint) const = 0; /** * Method for Level Reduction from sk -> sk1. This method peforms a keyswitch on the ciphertext and then performs a modulus reduction. * * @param &cipherText1 is the original ciphertext to be key switched and mod reduced. * @param &linearKeySwitchHint is the linear key switch hint to perform the key switch operation. * @param &cipherTextResult is the resulting ciphertext. */ virtual Ciphertext<Element> LevelReduce(ConstCiphertext<Element> cipherText1, const LPEvalKey<Element> linearKeySwitchHint) const = 0; /** * Function that determines if security requirements are met if ring dimension is reduced by half. * * @param ringDimension is the original ringDimension * @param &moduli is the vector of moduli that is used * @param rootHermiteFactor is the security threshold */ virtual bool CanRingReduce(usint ringDimension, const std::vector<BigInteger> &moduli, const double rootHermiteFactor) const = 0; }; /** * @brief Abstract interface class for LBC PRE algorithms * @tparam Element a ring element. */ template <class Element> class LPPREAlgorithm { public: virtual ~LPPREAlgorithm() {} /** * Virtual function to generate 1..log(q) encryptions for each bit of the original private key * Variant that uses the public key for the new secret key. * * @param &newKey public key for the new secret key. * @param &origPrivateKey original private key used for decryption. * @param *evalKey the evaluation key. * @return the re-encryption key. */ virtual LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> origPrivateKey) const = 0; /** * Virtual function to define the interface for re-encypting ciphertext using the array generated by ProxyGen * * @param &evalKey proxy re-encryption key. * @param &ciphertext the input ciphertext. * @param publicKey the public key of the recipient of the re-encrypted ciphertext. * @param *newCiphertext the new ciphertext. */ virtual Ciphertext<Element> ReEncrypt(const LPEvalKey<Element> evalKey, ConstCiphertext<Element> ciphertext, const LPPublicKey<Element> publicKey = nullptr) const = 0; }; /** * @brief Abstract interface class for LBC Multiparty algorithms. A version of this multiparty scheme built on the BGV scheme is seen here: * - Asharov G., Jain A., López-Alt A., Tromer E., Vaikuntanathan V., Wichs D. (2012) Multiparty Computation with Low Communication, Computation and Interaction via Threshold FHE. In: Pointcheval D., Johansson T. (eds) Advances in Cryptology – EUROCRYPT 2012. EUROCRYPT 2012. Lecture Notes in Computer Science, vol 7237. Springer, Berlin, Heidelberg * * During offline key generation, this multiparty scheme relies on the clients coordinating their public key generation. To do this, a single client generates a public-secret key pair. * This public key is shared with other keys which use an element in the public key to generate their own public keys. * The clients generate a shared key pair using a scheme-specific approach, then generate re-encryption keys. Re-encryption keys are uploaded to the server. * Clients encrypt data with their public keys and send the encrypted data server. * The data is re-encrypted. Computations are then run on the data. * The result is sent to each of the clients. * One client runs a "Leader" multiparty decryption operation with its own secret key. All other clients run a regular "Main" multiparty decryption with their own secret key. * The resulting partially decrypted ciphertext are then fully decrypted with the decryption fusion algorithms. * * @tparam Element a ring element. */ template <class Element> class LPMultipartyAlgorithm { public: virtual ~LPMultipartyAlgorithm() {} /** * Function to generate public and private keys for multiparty homomrophic encryption in coordination with a leading client that generated a first public key. * * @param cc cryptocontext for the keys to be generated. * @param pk1 private key used for decryption to be fused. * @param makeSparse set to true if ring reduce by a factor of 2 is to be used. * @param pre set to true if proxy re-encryption is used in multi-party protocol * @return key pair including the private and public key */ virtual LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc, const LPPublicKey<Element> pk1, bool makeSparse=false, bool pre=false) = 0; /** * Function to generate public and private keys for multiparty homomrophic encryption server key pair in coordination with secret keys of clients. * * @param cc cryptocontext for the keys to be generated. * @param secretkeys private keys used for decryption to be fused. * @param makeSparse set to true if ring reduce by a factor of 2 is to be used. * @return key pair including the private and public key */ virtual LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc, const vector<LPPrivateKey<Element>>& secretKeys, bool makeSparse=false) = 0; /** * Method for main decryption operation run by most decryption clients for multiparty homomorphic encryption * * @param privateKey private key used for decryption. * @param ciphertext ciphertext id decrypted. */ virtual Ciphertext<Element> MultipartyDecryptMain(const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext) const = 0; /** * Method for decryption operation run by the lead decryption client for multiparty homomorphic encryption * * @param privateKey private key used for decryption. * @param ciphertext ciphertext id decrypted. */ virtual Ciphertext<Element> MultipartyDecryptLead(const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext) const = 0; /** * Method for fusing the partially decrypted ciphertext. * * @param &ciphertextVec ciphertext id decrypted. * @param *plaintext the plaintext output. * @return the decoding result. */ virtual DecryptResult MultipartyDecryptFusion(const vector<Ciphertext<Element>>& ciphertextVec, NativePoly *plaintext) const = 0; }; /** * @brief Abstract interface class for LBC SHE algorithms * @tparam Element a ring element. */ template <class Element> class LPSHEAlgorithm { public: virtual ~LPSHEAlgorithm() {} /** * Virtual function to define the interface for homomorphic addition of ciphertexts. * * @param ciphertext1 the input ciphertext. * @param ciphertext2 the input ciphertext. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2) const = 0; /** * Virtual function to define the interface for homomorphic addition of ciphertexts. * * @param ciphertext the input ciphertext. * @param plaintext the input plaintext. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const = 0; /** * Virtual function to define the interface for homomorphic subtraction of ciphertexts. * * @param ciphertext1 the input ciphertext. * @param ciphertext2 the input ciphertext. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2) const = 0; /** * Virtual function to define the interface for homomorphic subtraction of ciphertexts. * * @param ciphertext the input ciphertext. * @param plaintext the input plaintext. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const = 0; /** * Virtual function to define the interface for multiplicative homomorphic evaluation of ciphertext. * * @param ciphertext1 the input ciphertext. * @param ciphertext2 the input ciphertext. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2) const = 0; /** * Virtual function to define the interface for multiplication of ciphertext by plaintext. * * @param ciphertext the input ciphertext. * @param plaintext the input plaintext. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const = 0; /** * Virtual function to define the interface for multiplicative homomorphic evaluation of ciphertext using the evaluation key. * * @param &ciphertext1 first input ciphertext. * @param &ciphertext2 second input ciphertext. * @param &ek is the evaluation key to make the newCiphertext decryptable by the same secret key as that of ciphertext1 and ciphertext2. * @return the new ciphertext. */ virtual Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2, const LPEvalKey<Element> ek) const = 0; /** * Virtual function for evaluating multiplication of a ciphertext list which each multiplication is followed by relinearization operation. * * @param cipherTextList is the ciphertext list. * @param evalKeys is the evaluation key to make the newCiphertext * decryptable by the same secret key as that of ciphertext list. * @param *newCiphertext the new resulting ciphertext. */ virtual Ciphertext<Element> EvalMultMany(const vector<Ciphertext<Element>>& cipherTextList, const vector<LPEvalKey<Element>> &evalKeys) const { // default implementation if you don't have one in your scheme const size_t inSize = cipherTextList.size(); const size_t lim = inSize * 2 - 2; vector<Ciphertext<Element>> cipherTextResults; cipherTextResults.resize(inSize - 1); size_t ctrIndex = 0; for(size_t i=0; i < lim; i = i + 2) { cipherTextResults[ctrIndex++] = this->EvalMult( i < inSize ? cipherTextList[i] : cipherTextResults[i - inSize], i+1 < inSize ? cipherTextList[i+1] : cipherTextResults[i + 1 - inSize]); } return cipherTextResults.back(); } /** * Virtual function to define the interface for multiplicative homomorphic evaluation of ciphertext using the evaluation key. * * @param ct1 first input ciphertext. * @param ct2 second input ciphertext. * @param ek is the evaluation key to make the newCiphertext * decryptable by the same secret key as that of ciphertext1 and ciphertext2. * @param *newCiphertext the new resulting ciphertext. */ virtual Ciphertext<Element> EvalMultAndRelinearize(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2, const vector<LPEvalKey<Element>> &ek) const = 0; /** * EvalLinRegression - Computes the parameter vector for linear regression using the least squares method * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const { // multiplication is done in reverse order to minimize the number of inner products Matrix<RationalCiphertext<Element>> xTransposed = x->Transpose(); shared_ptr<Matrix<RationalCiphertext<Element>>> result(new Matrix<RationalCiphertext<Element>>(xTransposed * (*y))); Matrix<RationalCiphertext<Element>> xCovariance = xTransposed * (*x); Matrix<RationalCiphertext<Element>> cofactorMatrix = xCovariance.CofactorMatrix(); Matrix<RationalCiphertext<Element>> adjugateMatrix = cofactorMatrix.Transpose(); *result = adjugateMatrix * (*result); RationalCiphertext<Element> determinant; xCovariance.Determinant(&determinant); for (size_t row = 0; row < result->GetRows(); row++) for (size_t col = 0; col < result->GetCols(); col++) (*result)(row, col).SetDenominator(determinant.GetNumerator()); return result; } /** * Virtual function to define the interface for homomorphic negation of ciphertext. * * @param &ciphertext the input ciphertext. * @param *newCiphertext the new ciphertext. */ virtual Ciphertext<Element> EvalNegate(ConstCiphertext<Element> ciphertext) const = 0; /** * Function to add random noise to all plaintext slots except for the first one; used in EvalInnerProduct * * @param &ciphertext the input ciphertext. * @return modified ciphertext */ Ciphertext<Element> AddRandomNoise(ConstCiphertext<Element> ciphertext) const { string kID = ciphertext->GetKeyTag(); const auto cryptoParams = ciphertext->GetCryptoParameters(); const auto encodingParams = cryptoParams->GetEncodingParams(); const auto elementParams = cryptoParams->GetElementParams(); usint n = elementParams->GetRingDimension(); auto cc = ciphertext->GetCryptoContext(); DiscreteUniformGenerator dug; dug.SetModulus(encodingParams->GetPlaintextModulus()); BigVector randomVector = dug.GenerateVector(n - 1); std::vector<int64_t> randomIntVector(n); //first plaintext slot does not need to change randomIntVector[0] = 0; for (usint i = 0; i < n - 1; i++) { randomIntVector[i + 1] = randomVector[i].ConvertToInt(); } Plaintext plaintext = cc->MakePackedPlaintext(randomIntVector); plaintext->Encode(); plaintext->GetElement<Element>().SetFormat(EVALUATION); auto ans = EvalAdd(ciphertext, plaintext); return ans; }; /** * Method for KeySwitchGen * * @param &originalPrivateKey Original private key used for encryption. * @param &newPrivateKey New private key to generate the keyswitch hint. * @param *KeySwitchHint is where the resulting keySwitchHint will be placed. */ virtual LPEvalKey<Element> KeySwitchGen( const LPPrivateKey<Element> originalPrivateKey, const LPPrivateKey<Element> newPrivateKey) const = 0; /** * Method for KeySwitch * * @param &keySwitchHint Hint required to perform the ciphertext switching. * @param &cipherText Original ciphertext to perform switching on. */ virtual Ciphertext<Element> KeySwitch( const LPEvalKey<Element> keySwitchHint, ConstCiphertext<Element> cipherText) const = 0; /** * Method for KeySwitching based on RLWE relinearization (used only for the StSt scheme). * Function to generate 1..log(q) encryptions for each bit of the original private key * * @param &newPublicKey encryption key for the new ciphertext. * @param origPrivateKey original private key used for decryption. */ virtual LPEvalKey<Element> KeySwitchRelinGen(const LPPublicKey<Element> newPublicKey, const LPPrivateKey<Element> origPrivateKey) const = 0; /** * Method for KeySwitching based on RLWE relinearization (used only for the StSt scheme). * * @param evalKey the evaluation key. * @param ciphertext the input ciphertext. * @return the resulting Ciphertext */ virtual Ciphertext<Element> KeySwitchRelin(const LPEvalKey<Element> evalKey, ConstCiphertext<Element> ciphertext) const = 0; /** * Virtual function to define the interface for generating a evaluation key which is used after each multiplication. * * @param &ciphertext1 first input ciphertext. * @param &ciphertext2 second input ciphertext. * @param &ek is the evaluation key to make the newCiphertext decryptable by the same secret key as that of ciphertext1 and ciphertext2. * @param *newCiphertext the new resulting ciphertext. */ virtual LPEvalKey<Element> EvalMultKeyGen( const LPPrivateKey<Element> originalPrivateKey) const = 0; /** * Virtual function to define the interface for generating a evaluation key which is used after each multiplication for depth more than 2. * * @param &originalPrivateKey Original private key used for encryption. * @param *evalMultKeys the resulting evalution key vector list. */ virtual vector<LPEvalKey<Element>> EvalMultKeysGen( const LPPrivateKey<Element> originalPrivateKey) const = 0; /** * Virtual function to generate all isomorphism keys for a given private key * * @param publicKey encryption key for the new ciphertext. * @param origPrivateKey original private key used for decryption. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys */ virtual shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPublicKey<Element> publicKey, const LPPrivateKey<Element> origPrivateKey, const std::vector<usint> &indexList) const = 0; /** * Generates evaluation keys for a list of indices * Currently works only for power-of-two and cyclic-group cyclotomics * * @param publicKey encryption key for the new ciphertext. * @param origPrivateKey original private key used for decryption. * @param indexList list of indices to be computed * @return returns the evaluation keys */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAtIndexKeyGen(const LPPublicKey<Element> publicKey, const LPPrivateKey<Element> origPrivateKey, const std::vector<int32_t> &indexList) const { const auto cryptoParams = origPrivateKey->GetCryptoParameters(); const auto encodingParams = cryptoParams->GetEncodingParams(); const auto elementParams = cryptoParams->GetElementParams(); uint32_t m = elementParams->GetCyclotomicOrder(); std::vector<uint32_t> autoIndices(indexList.size()); if (!(m & (m-1))) { // power-of-two cyclotomics for (size_t i=0; i < indexList.size(); i++) autoIndices[i] = FindAutomorphismIndex2n(indexList[i],m); } else // cyclic groups { for (size_t i=0; i < indexList.size(); i++) autoIndices[i] = FindAutomorphismIndexCyclic(indexList[i],m,encodingParams->GetPlaintextGenerator()); } if (publicKey) // NTRU-based scheme return EvalAutomorphismKeyGen(publicKey,origPrivateKey,autoIndices); else // RLWE-based scheme return EvalAutomorphismKeyGen(origPrivateKey,autoIndices); } /** * Virtual function for evaluating automorphism of ciphertext at index i * * @param ciphertext the input ciphertext. * @param i automorphism index * @param &evalKeys - reference to the vector of evaluation keys generated by EvalAutomorphismKeyGen. * @return resulting ciphertext */ virtual Ciphertext<Element> EvalAutomorphism(ConstCiphertext<Element> ciphertext, usint i, const std::map<usint, LPEvalKey<Element>> &evalKeys) const = 0; /** * Moves i-th slot to slot 0 * * @param ciphertext. * @param i the index. * @param &evalAtIndexKeys - reference to the map of evaluation keys generated by EvalAtIndexKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalAtIndex(ConstCiphertext<Element> ciphertext, int32_t index, const std::map<usint, LPEvalKey<Element>> &evalAtIndexKeys) const { const auto cryptoParams = ciphertext->GetCryptoParameters(); const auto encodingParams = cryptoParams->GetEncodingParams(); const auto elementParams = cryptoParams->GetElementParams(); uint32_t m = elementParams->GetCyclotomicOrder(); uint32_t autoIndex; if (!(m & (m-1))) // power-of-two cyclotomics autoIndex = FindAutomorphismIndex2n(index,m); else // cyclyc-group cyclotomics autoIndex = FindAutomorphismIndexCyclic(index,m,encodingParams->GetPlaintextGenerator()); return EvalAutomorphism(ciphertext,autoIndex,evalAtIndexKeys); } /** * Virtual function to generate automophism keys for a given private key; Uses the private key for encryption * * @param privateKey private key. * @param indexList list of automorphism indices to be computed * @return returns the evaluation keys */ virtual shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPrivateKey<Element> privateKey, const std::vector<usint> &indexList) const = 0; /** * Virtual function to generate the automorphism keys for EvalSum; works only for packed encoding * * @param privateKey private key. * @return returns the evaluation keys */ shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumKeyGen(const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey) const { const auto cryptoParams = privateKey->GetCryptoParameters(); const auto encodingParams = cryptoParams->GetEncodingParams(); const auto elementParams = cryptoParams->GetElementParams(); usint batchSize = encodingParams->GetBatchSize(); usint m = elementParams->GetCyclotomicOrder(); // stores automorphism indices needed for EvalSum std::vector<usint> indices; if (!(m & (m-1))){ // Check if m is a power of 2 indices = GenerateIndices_2n(batchSize, m); } else { // Arbitray cyclotomics usint g = encodingParams->GetPlaintextGenerator(); for (int i = 0; i < floor(log2(batchSize)); i++) { indices.push_back(g); g = (g * g) % m; } } if (publicKey) // NTRU-based scheme return EvalAutomorphismKeyGen(publicKey, privateKey, indices); else // Regular RLWE scheme return EvalAutomorphismKeyGen(privateKey, indices); } /** * Sums all elements in log (batch size) time - works only with packed encoding * * @param ciphertext the input ciphertext. * @param batchSize size of the batch to be summed up * @param &evalKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalSum(ConstCiphertext<Element> ciphertext, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalKeys) const { const shared_ptr<LPCryptoParameters<Element>> cryptoParams = ciphertext->GetCryptoParameters(); Ciphertext<Element> newCiphertext(new CiphertextImpl<Element>(*ciphertext)); const auto encodingParams = cryptoParams->GetEncodingParams(); const auto elementParams = cryptoParams->GetElementParams(); usint m = elementParams->GetCyclotomicOrder(); if ((encodingParams->GetBatchSize() == 0)) throw std::runtime_error("EvalSum: Packed encoding parameters 'batch size' is not set; Please check the EncodingParams passed to the crypto context."); else { if (!(m & (m-1))){ // Check if m is a power of 2 newCiphertext = EvalSum_2n(batchSize, m, evalKeys,newCiphertext); } else { // Arbitray cyclotomics if (encodingParams->GetPlaintextGenerator() == 0) throw std::runtime_error("EvalSum: Packed encoding parameters 'plaintext generator' is not set; Please check the EncodingParams passed to the crypto context."); else { usint g = encodingParams->GetPlaintextGenerator(); for (int i = 0; i < floor(log2(batchSize)); i++) { auto ea = EvalAutomorphism(newCiphertext, g, evalKeys); newCiphertext = EvalAdd(newCiphertext, ea); g = (g * g) % m; } } } } return newCiphertext; } /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 second vector. * @param batchSize size of the batch to be summed up * @param &evalSumKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen. * @param &evalMultKey - reference to the evaluation key generated by EvalMultKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalSumKeys, const LPEvalKey<Element> evalMultKey) const { Ciphertext<Element> result = EvalMult(ciphertext1, ciphertext2, evalMultKey); result = EvalSum(result, batchSize, evalSumKeys); // add a random number to all slots except for the first one so that no information is leaked result = AddRandomNoise(result); return result; } /** * Evaluates inner product in batched encoding * * @param ciphertext1 first vector. * @param ciphertext2 plaintext. * @param batchSize size of the batch to be summed up * @param &evalSumKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen. * @param &evalMultKey - reference to the evaluation key generated by EvalMultKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstPlaintext ciphertext2, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalSumKeys) const { Ciphertext<Element> result = EvalMult(ciphertext1, ciphertext2); result = EvalSum(result, batchSize, evalSumKeys); // add a random number to all slots except for the first one so that no information is leaked return AddRandomNoise(result); } /** * Merges multiple ciphertexts with encrypted results in slot 0 into a single ciphertext * The slot assignment is done based on the order of ciphertexts in the vector * * @param ciphertextVector vector of ciphertexts to be merged. * @param &evalKeys - reference to the map of evaluation keys generated by EvalAutomorphismKeyGen. * @return resulting ciphertext */ Ciphertext<Element> EvalMerge(const vector<Ciphertext<Element>> &ciphertextVector, const std::map<usint, LPEvalKey<Element>> &evalKeys) const { if (ciphertextVector.size() == 0) throw std::runtime_error("EvalMerge: the vector of ciphertexts to be merged cannot be empty"); const shared_ptr<LPCryptoParameters<Element>> cryptoParams = ciphertextVector[0]->GetCryptoParameters(); Ciphertext<Element> newCiphertext(new CiphertextImpl<Element>(*(ciphertextVector[0]))); auto cc = ciphertextVector[0]->GetCryptoContext(); std::vector<int64_t> plaintextVector = {1,0}; Plaintext plaintext = cc->MakePackedPlaintext(plaintextVector); newCiphertext = EvalMult(newCiphertext,plaintext); for (size_t i = 1; i < ciphertextVector.size(); i++) { newCiphertext = EvalAdd(newCiphertext,EvalAtIndex(EvalMult(ciphertextVector[i],plaintext),-(int32_t)i,evalKeys)); } return newCiphertext; } /** * EvalLinRegressBatched - Computes the parameter vector for linear regression using the least squares method * Currently supports only two regressors * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalSumKeys, const LPEvalKey<Element> evalMultKey) const { Matrix<RationalCiphertext<Element>> covarianceMatrix(x->GetAllocator(), 2, 2); Ciphertext<Element> x0 = (*x)(0, 0).GetNumerator(); Ciphertext<Element> x1 = (*x)(0, 1).GetNumerator(); Ciphertext<Element> y0 = (*y)(0, 0).GetNumerator(); //Compute the covariance matrix for X covarianceMatrix(0, 0).SetNumerator(EvalInnerProduct(x0, x0, batchSize, evalSumKeys, evalMultKey)); covarianceMatrix(0, 1).SetNumerator(EvalInnerProduct(x0, x1, batchSize, evalSumKeys, evalMultKey)); covarianceMatrix(1, 0) = covarianceMatrix(0, 1); covarianceMatrix(1, 1).SetNumerator(EvalInnerProduct(x1, x1, batchSize, evalSumKeys, evalMultKey)); Matrix<RationalCiphertext<Element>> cofactorMatrix = covarianceMatrix.CofactorMatrix(); Matrix<RationalCiphertext<Element>> adjugateMatrix = cofactorMatrix.Transpose(); shared_ptr<Matrix<RationalCiphertext<Element>>> result(new Matrix<RationalCiphertext<Element>>(x->GetAllocator(), 2, 1)); (*result)(0, 0).SetNumerator(EvalInnerProduct(x0, y0, batchSize, evalSumKeys, evalMultKey)); (*result)(1, 0).SetNumerator(EvalInnerProduct(x1, y0, batchSize, evalSumKeys, evalMultKey)); *result = adjugateMatrix * (*result); RationalCiphertext<Element> determinant; covarianceMatrix.Determinant(&determinant); for (size_t row = 0; row < result->GetRows(); row++) for (size_t col = 0; col < result->GetCols(); col++) (*result)(row, col).SetDenominator(determinant.GetNumerator()); return result; } /** * EvalCrossCorrelation - Computes the sliding sum of inner products (known as * as cross-correlation, sliding inner product, or sliding dot product in * image processing * @param x - first vector of row vectors * @param y - second vector of row vectors * @param batchSize - batch size for packed encoding * @param indexStart - starting index in the vectors of row vectors * @param length - length of the slice in the vectors of row vectors * @param evalSumKeys - evaluation keys used for the automorphism operation * @param evalMultKey - the evaluation key used for multiplication * @return sum(x_i*y_i), i.e., a sum of inner products */ Ciphertext<Element> EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, usint indexStart, usint length, const std::map<usint, LPEvalKey<Element>> &evalSumKeys, const LPEvalKey<Element> evalMultKey) const { if (length == 0) length = x->GetRows(); if (length - indexStart > x->GetRows()) throw std::runtime_error("The number of rows exceeds the dimension of the vector"); //additional error checking can be added here Ciphertext<Element> result; Ciphertext<Element> x0 = (*x)(indexStart, 0).GetNumerator(); Ciphertext<Element> y0 = (*y)(indexStart, 0).GetNumerator(); result = EvalInnerProduct(x0, y0, batchSize, evalSumKeys, evalMultKey); #pragma omp parallel for ordered schedule(dynamic) for (usint i = indexStart + 1; i < indexStart + length; i++) { Ciphertext<Element> xi = (*x)(i, 0).GetNumerator(); Ciphertext<Element> yi = (*y)(i, 0).GetNumerator(); auto product = EvalInnerProduct(xi, yi, batchSize, evalSumKeys, evalMultKey); #pragma omp ordered { result = EvalAdd(result,product); } } return result; } private: std::vector<usint> GenerateIndices_2n(usint batchSize, usint m) const { // stores automorphism indices needed for EvalSum std::vector<usint> indices; usint g = 5; for (int i = 0; i < floor(log2(batchSize)) - 1; i++) { indices.push_back(g); g = (g * g) % m; } if (2*batchSize<m) indices.push_back(g); indices.push_back(m-1); return indices; } Ciphertext<Element> EvalSum_2n(usint batchSize, usint m, const std::map<usint, LPEvalKey<Element>> &evalKeys, ConstCiphertext<Element> ciphertext) const{ Ciphertext<Element> newCiphertext(new CiphertextImpl<Element>(*ciphertext)); usint g = 5; for (int i = 0; i < floor(log2(batchSize)) - 1; i++) { newCiphertext = EvalAdd(newCiphertext, EvalAutomorphism(newCiphertext, g, evalKeys)); g = (g * g) % m; } if (2*batchSize<m) newCiphertext = EvalAdd(newCiphertext, EvalAutomorphism(newCiphertext, g, evalKeys)); newCiphertext = EvalAdd(newCiphertext, EvalAutomorphism(newCiphertext, m-1, evalKeys)); return newCiphertext; } }; /** * @brief Abstract interface class for LBC SHE algorithms * @tparam Element a ring element. */ template <class Element> class LPFHEAlgorithm { public: virtual ~LPFHEAlgorithm() {} /** * Virtual function to define the interface for bootstrapping evaluation of ciphertext * * @param &ciphertext the input ciphertext. * @param *newCiphertext the new ciphertext. */ virtual void Bootstrap(ConstCiphertext<Element> &ciphertext, Ciphertext<Element> *newCiphertext) const = 0; }; /** * @brief main implementation class to capture essential cryptoparameters of any LBC system * @tparam Element a ring element. */ template <typename Element> class LPCryptoParameters : public Serializable { public: LPCryptoParameters() {} virtual ~LPCryptoParameters() {} /** * Returns the value of plaintext modulus p * * @return the plaintext modulus. */ const PlaintextModulus &GetPlaintextModulus() const { return m_encodingParams->GetPlaintextModulus(); } /** * Returns the reference to IL params * * @return the ring element parameters. */ const shared_ptr<typename Element::Params> GetElementParams() const { return m_params; } /** * Returns the reference to encoding params * * @return the encoding parameters. */ const EncodingParams GetEncodingParams() const { return m_encodingParams; } /** * Sets the value of plaintext modulus p */ void SetPlaintextModulus(const PlaintextModulus &plaintextModulus) { m_encodingParams->SetPlaintextModulus(plaintextModulus); } virtual bool operator==(const LPCryptoParameters<Element>& cmp) const = 0; bool operator!=(const LPCryptoParameters<Element>& cmp) const { return !(*this == cmp); } /** * Overload to allow printing of parameters to an iostream * NOTE that the implementation relies on calling the virtual PrintParameters method * @param out - the stream to print to * @param item - reference to the item to print * @return the stream */ friend std::ostream& operator<<(std::ostream& out, const LPCryptoParameters& item) { item.PrintParameters(out); return out; } virtual usint GetRelinWindow() const { return 0; } virtual int GetDepth() const { return 0; } virtual size_t GetMaxDepth() const { return 0; } virtual const typename Element::DggType &GetDiscreteGaussianGenerator() const { throw std::logic_error("No DGG Available for this parameter set"); } /** * Sets the reference to element params */ void SetElementParams(shared_ptr<typename Element::Params> params) { m_params = params; } /** * Sets the reference to encoding params */ void SetEncodingParams(EncodingParams encodingParams) { m_encodingParams = encodingParams; } template <class Archive> void save( Archive & ar, std::uint32_t const version ) const { ar( ::cereal::make_nvp("elp", m_params) ); ar( ::cereal::make_nvp("enp", m_encodingParams) ); } template <class Archive> void load( Archive & ar, std::uint32_t const version ) { if( version > SerializedVersion() ) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar( ::cereal::make_nvp("elp", m_params) ); ar( ::cereal::make_nvp("enp", m_encodingParams) ); } std::string SerializedObjectName() const { return "CryptoParameters"; } static uint32_t SerializedVersion() { return 1; } protected: LPCryptoParameters(const PlaintextModulus &plaintextModulus) { m_encodingParams.reset( new EncodingParamsImpl(plaintextModulus) ); } LPCryptoParameters(shared_ptr<typename Element::Params> params, const PlaintextModulus &plaintextModulus) { m_params = params; m_encodingParams.reset( new EncodingParamsImpl(plaintextModulus) ); } LPCryptoParameters(shared_ptr<typename Element::Params> params, EncodingParams encodingParams) { m_params = params; m_encodingParams = encodingParams; } LPCryptoParameters(LPCryptoParameters<Element> *from, shared_ptr<typename Element::Params> newElemParms) { *this = *from; m_params = newElemParms; } virtual void PrintParameters(std::ostream& out) const { out << "Element Parameters: " << *m_params << std::endl; out << "Encoding Parameters: " << *m_encodingParams << std::endl; } private: //element-specific parameters shared_ptr<typename Element::Params> m_params; //encoding-specific parameters EncodingParams m_encodingParams; }; // forward decl so SchemeIdentifier works template<typename Element> class LPPublicKeyEncryptionScheme; template<typename Element> class PalisadeSchemeIdentifier { string schemeName; LPPublicKeyEncryptionScheme<Element> *(*schemeMaker)(); public: PalisadeSchemeIdentifier(string n, LPPublicKeyEncryptionScheme<Element> (*f)()) : schemeName(n), schemeMaker(f) {} const string& GetName() const { return schemeName; } LPPublicKeyEncryptionScheme<Element> *GetScheme() const { return (*schemeMaker)(); } }; /** * @brief Abstract interface for public key encryption schemes * @tparam Element a ring element. */ template<typename Element> class LPPublicKeyEncryptionScheme { protected: //PalisadeSchemeIdentifier<Element> *SchemeId; public: LPPublicKeyEncryptionScheme() {} virtual ~LPPublicKeyEncryptionScheme() {} virtual bool operator==(const LPPublicKeyEncryptionScheme& sch) const = 0; bool operator!=(const LPPublicKeyEncryptionScheme& sch) const { return !(*this == sch); } /** * Enable features with a bit mast of PKESchemeFeature codes * @param mask */ void Enable(usint mask) { if (mask&ENCRYPTION) Enable(ENCRYPTION); if (mask&PRE) Enable(PRE); if (mask&SHE) Enable(SHE); if (mask&LEVELEDSHE) Enable(LEVELEDSHE); if (mask&MULTIPARTY) Enable(MULTIPARTY); if (mask&FHE) Enable(FHE); } usint GetEnabled() const { usint flag = 0; if (m_algorithmEncryption != NULL) flag |= ENCRYPTION; if (m_algorithmPRE != NULL) flag |= PRE; if (m_algorithmSHE != NULL) flag |= SHE; if (m_algorithmFHE != NULL) flag |= FHE; if (m_algorithmLeveledSHE != NULL) flag |= LEVELEDSHE; if (m_algorithmMultiparty != NULL) flag |= MULTIPARTY; return flag; } //instantiated in the scheme implementation class virtual void Enable(PKESchemeFeature feature) = 0; ///////////////////////////////////////// // wrapper for LPParameterSelectionAlgorithm // bool ParamsGen(shared_ptr<LPCryptoParameters<Element>> cryptoParams, int32_t evalAddCount = 0, int32_t evalMultCount = 0, int32_t keySwitchCount = 0, size_t dcrtBits = 0) const { if (this->m_algorithmParamsGen) { return this->m_algorithmParamsGen->ParamsGen(cryptoParams, evalAddCount, evalMultCount, keySwitchCount, dcrtBits); } else { throw std::logic_error("Parameter generation operation has not been implemented"); } } ///////////////////////////////////////// // the three functions below are wrappers for things in LPEncryptionAlgorithm (ENCRYPT) // Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey, const Element &plaintext) const { if(this->m_algorithmEncryption) { return this->m_algorithmEncryption->Encrypt(publicKey,plaintext); } else { throw std::logic_error("Encrypt operation has not been enabled"); } } Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey, const Element &plaintext) const { if(this->m_algorithmEncryption) { return this->m_algorithmEncryption->Encrypt(privateKey,plaintext); } else { throw std::logic_error("Encrypt operation has not been enabled"); } } DecryptResult Decrypt(const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext, NativePoly *plaintext) const { if(this->m_algorithmEncryption) return this->m_algorithmEncryption->Decrypt(privateKey,ciphertext,plaintext); else { throw std::logic_error("Decrypt operation has not been enabled"); } } LPKeyPair<Element> KeyGen(CryptoContext<Element> cc, bool makeSparse) { if(this->m_algorithmEncryption) { auto kp = this->m_algorithmEncryption->KeyGen(cc, makeSparse); kp.publicKey->SetKeyTag( kp.secretKey->GetKeyTag() ); return kp; } else { throw std::logic_error("KeyGen operation has not been enabled"); } } ///////////////////////////////////////// // the three functions below are wrappers for things in LPPREAlgorithm (PRE) // LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> origPrivateKey) const { if(this->m_algorithmPRE) { auto rk = this->m_algorithmPRE->ReKeyGen(newKey,origPrivateKey); rk->SetKeyTag( newKey->GetKeyTag() ); return rk; } else { throw std::logic_error("ReKeyGen operation has not been enabled"); } } Ciphertext<Element> ReEncrypt(const LPEvalKey<Element> evalKey, ConstCiphertext<Element> ciphertext, const LPPublicKey<Element> publicKey) const { if(this->m_algorithmPRE) { auto ct = this->m_algorithmPRE->ReEncrypt(evalKey, ciphertext, publicKey); ct->SetKeyTag( evalKey->GetKeyTag() ); return ct; } else { throw std::logic_error("ReEncrypt operation has not been enabled"); } } ///////////////////////////////////////// // the three functions below are wrappers for things in LPMultipartyAlgorithm (Multiparty) // // Wrapper for Multiparty Key Gen // FIXME check key ID for multiparty LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc, const LPPublicKey<Element> pk1, bool makeSparse, bool PRE) { if(this->m_algorithmMultiparty) { auto k = this->m_algorithmMultiparty->MultipartyKeyGen(cc, pk1, makeSparse, PRE); k.publicKey->SetKeyTag( k.secretKey->GetKeyTag() ); return k; } else { throw std::logic_error("MultipartyKeyGen operation has not been enabled"); } } // Wrapper for Multiparty Key Gen // FIXME key IDs for multiparty LPKeyPair<Element> MultipartyKeyGen(CryptoContext<Element> cc, const vector<LPPrivateKey<Element>>& secretKeys, bool makeSparse) { if(this->m_algorithmMultiparty) { auto k = this->m_algorithmMultiparty->MultipartyKeyGen(cc, secretKeys, makeSparse); k.publicKey->SetKeyTag( k.secretKey->GetKeyTag() ); return k; } else { throw std::logic_error("MultipartyKeyGen operation has not been enabled"); } } // FIXME key IDs for multiparty Ciphertext<Element> MultipartyDecryptMain(const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext) const { if(this->m_algorithmMultiparty) { auto ct = this->m_algorithmMultiparty->MultipartyDecryptMain(privateKey,ciphertext); ct->SetKeyTag( privateKey->GetKeyTag() ); return ct; } else { throw std::logic_error("MultipartyDecryptMain operation has not been enabled"); } } // FIXME key IDs for multiparty Ciphertext<Element> MultipartyDecryptLead(const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext) const { if(this->m_algorithmMultiparty) { auto ct = this->m_algorithmMultiparty->MultipartyDecryptLead(privateKey,ciphertext); ct->SetKeyTag( privateKey->GetKeyTag() ); return ct; } else { throw std::logic_error("MultipartyDecryptLead operation has not been enabled"); } } DecryptResult MultipartyDecryptFusion(const vector<Ciphertext<Element>>& ciphertextVec, NativePoly *plaintext) const { if(this->m_algorithmMultiparty) { return this->m_algorithmMultiparty->MultipartyDecryptFusion(ciphertextVec,plaintext); } else { throw std::logic_error("MultipartyDecrypt operation has not been enabled"); } } ///////////////////////////////////////// // the three functions below are wrappers for things in LPSHEAlgorithm (SHE) // Ciphertext<Element> AddRandomNoise(ConstCiphertext<Element> ciphertext) const { if (this->m_algorithmSHE) return this->m_algorithmSHE->AddRandomNoise(ciphertext); else { throw std::logic_error("AddRandomNoise operation has not been enabled"); } } Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalAdd(ciphertext1, ciphertext2); return ct; } else { throw std::logic_error("EvalAdd operation has not been enabled"); } } Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ciphertext1, ConstPlaintext plaintext) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalAdd(ciphertext1, plaintext); return ct; } else { throw std::logic_error("EvalAdd operation has not been enabled"); } } Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalSub(ciphertext1, ciphertext2); return ct; } else { throw std::logic_error("EvalSub operation has not been enabled"); } } Ciphertext<Element> EvalSub(ConstCiphertext<Element> ciphertext1, ConstPlaintext plaintext) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalSub(ciphertext1, plaintext); return ct; } else { throw std::logic_error("EvalSub operation has not been enabled"); } } Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalMult(ciphertext1, ciphertext2); return ct; } else { throw std::logic_error("EvalMult operation has not been enabled"); } } Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext, ConstPlaintext plaintext) const { if (this->m_algorithmSHE) return this->m_algorithmSHE->EvalMult(ciphertext, plaintext); else { throw std::logic_error("EvalMult operation has not been enabled"); } } Ciphertext<Element> EvalMult(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2, const LPEvalKey<Element> evalKey) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalMult(ciphertext1, ciphertext2, evalKey); return ct; } else { throw std::logic_error("EvalMult operation has not been enabled"); } } Ciphertext<Element> EvalMultMany(const vector<Ciphertext<Element>>& ciphertext, const vector<LPEvalKey<Element>> &evalKeys) const { if (this->m_algorithmSHE){ return this->m_algorithmSHE->EvalMultMany(ciphertext, evalKeys); } else { throw std::logic_error("EvalMultMany operation has not been enabled"); } } Ciphertext<Element> EvalNegate(ConstCiphertext<Element> ciphertext) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalNegate(ciphertext); return ct; } else { throw std::logic_error("EvalNegate operation has not been enabled"); } } shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPublicKey<Element> publicKey, const LPPrivateKey<Element> origPrivateKey, const std::vector<usint> &indexList) const { if (this->m_algorithmSHE) { auto km = this->m_algorithmSHE->EvalAutomorphismKeyGen(publicKey,origPrivateKey,indexList); for( auto& k : *km ) k.second->SetKeyTag( origPrivateKey->GetKeyTag() ); return km; } else throw std::logic_error("EvalAutomorphismKeyGen operation has not been enabled"); } shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAtIndexKeyGen(const LPPublicKey<Element> publicKey, const LPPrivateKey<Element> origPrivateKey, const std::vector<int32_t> &indexList) const { if (this->m_algorithmSHE) { auto km = this->m_algorithmSHE->EvalAtIndexKeyGen(publicKey,origPrivateKey,indexList); for( auto& k : *km ) k.second->SetKeyTag( origPrivateKey->GetKeyTag() ); return km; } else throw std::logic_error("EvalAtIndexKeyGen operation has not been enabled"); } Ciphertext<Element> EvalAutomorphism(ConstCiphertext<Element> ciphertext, usint i, const std::map<usint, LPEvalKey<Element>> &evalKeys) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalAutomorphism(ciphertext, i, evalKeys); return ct; } else throw std::logic_error("EvalAutomorphism operation has not been enabled"); } Ciphertext<Element> EvalAtIndex(ConstCiphertext<Element> ciphertext, usint i, const std::map<usint, LPEvalKey<Element>> &evalKeys) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalAtIndex(ciphertext, i, evalKeys); return ct; } else throw std::logic_error("EvalAtIndex operation has not been enabled"); } shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalAutomorphismKeyGen(const LPPrivateKey<Element> privateKey, const std::vector<usint> &indexList) const { if (this->m_algorithmSHE) { auto km = this->m_algorithmSHE->EvalAutomorphismKeyGen(privateKey, indexList); for( auto& k : *km ) k.second->SetKeyTag( privateKey->GetKeyTag() ); return km; } else throw std::logic_error("EvalAutomorphismKeyGen operation has not been enabled"); } shared_ptr<std::map<usint, LPEvalKey<Element>>> EvalSumKeyGen( const LPPrivateKey<Element> privateKey, const LPPublicKey<Element> publicKey) const { if (this->m_algorithmSHE) { auto km = this->m_algorithmSHE->EvalSumKeyGen(privateKey,publicKey); for( auto& k : *km ) { k.second->SetKeyTag( privateKey->GetKeyTag() ); } return km; } else throw std::logic_error("EvalSumKeyGen operation has not been enabled"); } Ciphertext<Element> EvalSum(ConstCiphertext<Element> ciphertext, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalKeys) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalSum(ciphertext, batchSize, evalKeys); return ct; } else throw std::logic_error("EvalSum operation has not been enabled"); } Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstCiphertext<Element> ciphertext2, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalSumKeys, const LPEvalKey<Element> evalMultKey) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalInnerProduct(ciphertext1, ciphertext2, batchSize, evalSumKeys, evalMultKey); ct->SetKeyTag( evalSumKeys.begin()->second->GetKeyTag() ); return ct; } else throw std::logic_error("EvalInnerProduct operation has not been enabled"); } Ciphertext<Element> EvalMerge(const vector<Ciphertext<Element>> &ciphertextVector, const std::map<usint, LPEvalKey<Element>> &evalKeys) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalMerge(ciphertextVector,evalKeys); return ct; } else throw std::logic_error("EvalMerge operation has not been enabled"); } Ciphertext<Element> EvalInnerProduct(ConstCiphertext<Element> ciphertext1, ConstPlaintext ciphertext2, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalSumKeys) const { if (this->m_algorithmSHE) return this->m_algorithmSHE->EvalInnerProduct(ciphertext1, ciphertext2, batchSize, evalSumKeys); else throw std::logic_error("EvalInnerProduct operation has not been enabled"); } shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegressBatched(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, const std::map<usint, LPEvalKey<Element>> &evalSumKeys, const LPEvalKey<Element> evalMultKey) const { if (this->m_algorithmSHE) { string kID = evalMultKey->GetKeyTag(); auto ctm = this->m_algorithmSHE->EvalLinRegressBatched(x, y, batchSize, evalSumKeys, evalMultKey); for( size_t r = 0; r < ctm->GetRows(); r++ ) for( size_t c = 0; c < ctm->GetCols(); c++ ) (*ctm)(r,c).SetKeyTag(kID); return ctm; } else throw std::logic_error("EvalLinRegressionBatched operation has not been enabled"); } Ciphertext<Element> EvalCrossCorrelation(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y, usint batchSize, usint indexStart, usint length, const std::map<usint, LPEvalKey<Element>> &evalSumKeys, const LPEvalKey<Element> evalMultKey) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->EvalCrossCorrelation(x, y, batchSize, indexStart, length, evalSumKeys, evalMultKey); // FIXME: mark with which key? return ct; } else throw std::logic_error("EvalCrossCorrelation operation has not been enabled"); } /** * EvalLinRegression - Computes the parameter vector for linear regression using the least squares method * @param x - matrix of regressors * @param y - vector of dependent variables * @return the parameter vector using (x^T x)^{-1} x^T y (using least squares method) */ shared_ptr<Matrix<RationalCiphertext<Element>>> EvalLinRegression(const shared_ptr<Matrix<RationalCiphertext<Element>>> x, const shared_ptr<Matrix<RationalCiphertext<Element>>> y) const { if (this->m_algorithmSHE) { auto ctm = this->m_algorithmSHE->EvalLinRegression(x, y); // FIXME mark with which key?? return ctm; } else { throw std::logic_error("EvalLinRegression operation has not been enabled"); } } LPEvalKey<Element> KeySwitchGen( const LPPrivateKey<Element> originalPrivateKey, const LPPrivateKey<Element> newPrivateKey) const { if (this->m_algorithmSHE) { auto kp = this->m_algorithmSHE->KeySwitchGen(originalPrivateKey, newPrivateKey); kp->SetKeyTag( newPrivateKey->GetKeyTag() ); return kp; } else { throw std::logic_error("KeySwitchGen operation has not been enabled"); } } Ciphertext<Element> KeySwitch( const LPEvalKey<Element> keySwitchHint, ConstCiphertext<Element> cipherText) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->KeySwitch(keySwitchHint, cipherText); return ct; } else { throw std::logic_error("KeySwitch operation has not been enabled"); } } LPEvalKey<Element> KeySwitchRelinGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> origPrivateKey) const { if (this->m_algorithmSHE) { auto kp = this->m_algorithmSHE->KeySwitchRelinGen(newKey, origPrivateKey); kp->SetKeyTag( newKey->GetKeyTag() ); return kp; } else { throw std::logic_error("KeySwitchRelinGen operation has not been enabled"); } } Ciphertext<Element> KeySwitchRelin(const LPEvalKey<Element> evalKey, ConstCiphertext<Element> ciphertext) const { if (this->m_algorithmSHE) { auto ct = this->m_algorithmSHE->KeySwitchRelin(evalKey, ciphertext); ct->SetKeyTag( evalKey->GetKeyTag() ); return ct; } else { throw std::logic_error("KeySwitchRelin operation has not been enabled"); } } LPEvalKey<Element> EvalMultKeyGen(const LPPrivateKey<Element> originalPrivateKey) const { if(this->m_algorithmSHE) { auto ek = this->m_algorithmSHE->EvalMultKeyGen(originalPrivateKey); ek->SetKeyTag( originalPrivateKey->GetKeyTag() ); return ek; } else { throw std::logic_error("EvalMultKeyGen operation has not been enabled"); } } vector<LPEvalKey<Element>> EvalMultKeysGen(const LPPrivateKey<Element> originalPrivateKey) const { if(this->m_algorithmSHE){ auto ek = this->m_algorithmSHE->EvalMultKeysGen(originalPrivateKey); for(size_t i=0; i<ek.size(); i++) ek[i]->SetKeyTag( originalPrivateKey->GetKeyTag() ); return ek; } else { throw std::logic_error("EvalMultKeysGen operation has not been enabled"); } } Ciphertext<Element> EvalMultAndRelinearize(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2, const vector<LPEvalKey<Element>> &ek) const { if(this->m_algorithmSHE) return this->m_algorithmSHE->EvalMultAndRelinearize(ct1, ct2, ek); else { throw std::logic_error("EvalMultAndRelinearize operation has not been enabled"); } } ///////////////////////////////////////// // the functions below are wrappers for things in LPFHEAlgorithm (FHE) // // TODO: Add Bootstrap and any other FHE methods ///////////////////////////////////////// // the functions below are wrappers for things in LPSHEAlgorithm (SHE) // Ciphertext<Element> ModReduce(ConstCiphertext<Element> cipherText) const { if(this->m_algorithmLeveledSHE) { auto ct = this->m_algorithmLeveledSHE->ModReduce(cipherText); ct->SetKeyTag( cipherText->GetKeyTag() ); return ct; } else{ throw std::logic_error("ModReduce operation has not been enabled"); } } Ciphertext<Element> RingReduce(ConstCiphertext<Element> cipherText, const LPEvalKey<Element> keySwitchHint) const { if(this->m_algorithmLeveledSHE){ auto ct = this->m_algorithmLeveledSHE->RingReduce(cipherText,keySwitchHint); ct->SetKeyTag( keySwitchHint->GetKeyTag() ); return ct; } else{ throw std::logic_error("RingReduce operation has not been enabled"); } } bool CanRingReduce(usint ringDimension, const std::vector<BigInteger> &moduli, const double rootHermiteFactor) const { if (this->m_algorithmLeveledSHE) { return this->m_algorithmLeveledSHE->CanRingReduce(ringDimension, moduli, rootHermiteFactor); } else { throw std::logic_error("CanRingReduce operation has not been enabled"); } } Ciphertext<Element> ComposedEvalMult( ConstCiphertext<Element> cipherText1, ConstCiphertext<Element> cipherText2, const LPEvalKey<Element> quadKeySwitchHint) const { if(this->m_algorithmLeveledSHE){ auto ct = this->m_algorithmLeveledSHE->ComposedEvalMult(cipherText1,cipherText2,quadKeySwitchHint); ct->SetKeyTag( quadKeySwitchHint->GetKeyTag() ); return ct; } else{ throw std::logic_error("ComposedEvalMult operation has not been enabled"); } } Ciphertext<Element> LevelReduce(ConstCiphertext<Element> cipherText1, const LPEvalKeyNTRU<Element> linearKeySwitchHint) const { if(this->m_algorithmLeveledSHE){ auto ct = this->m_algorithmLeveledSHE->LevelReduce(cipherText1,linearKeySwitchHint); ct->SetKeyTag( linearKeySwitchHint->GetKeyTag() ); return ct; } else{ throw std::logic_error("LevelReduce operation has not been enabled"); } } const std::shared_ptr<LPEncryptionAlgorithm<Element>> getAlgorithm() const { return m_algorithmEncryption; } template <class Archive> void save( Archive & ar, std::uint32_t const version ) const { ar( ::cereal::make_nvp("enabled",GetEnabled()) ); } template <class Archive> void load( Archive & ar, std::uint32_t const version ) { if( version > SerializedVersion() ) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } usint enabled; ar( ::cereal::make_nvp("enabled",enabled) ); this->Enable(enabled); } virtual std::string SerializedObjectName() const { return "Scheme"; } static uint32_t SerializedVersion() { return 1; } friend std::ostream& operator<<(std::ostream& out, const LPPublicKeyEncryptionScheme<Element>& s) { out << typeid(s).name() << ":" ; out << " ParameterGeneration " << (s.m_algorithmParamsGen == 0 ? "none" : typeid(*s.m_algorithmParamsGen).name()); out << ", Encryption " << (s.m_algorithmEncryption == 0 ? "none" : typeid(*s.m_algorithmEncryption).name()); out << ", PRE " << (s.m_algorithmPRE == 0 ? "none" : typeid(*s.m_algorithmPRE).name()); out << ", Multiparty " << (s.m_algorithmMultiparty == 0 ? "none" : typeid(*s.m_algorithmMultiparty).name()); out << ", SHE " << (s.m_algorithmSHE == 0 ? "none" : typeid(*s.m_algorithmSHE).name()); out << ", FHE " << (s.m_algorithmFHE == 0 ? "none" : typeid(*s.m_algorithmFHE).name()); out << ", LeveledSHE " << (s.m_algorithmLeveledSHE == 0 ? "none" : typeid(*s.m_algorithmLeveledSHE).name()); return out; } protected: std::shared_ptr<LPParameterGenerationAlgorithm<Element>> m_algorithmParamsGen; std::shared_ptr<LPEncryptionAlgorithm<Element>> m_algorithmEncryption; std::shared_ptr<LPPREAlgorithm<Element>> m_algorithmPRE; std::shared_ptr<LPMultipartyAlgorithm<Element>> m_algorithmMultiparty; std::shared_ptr<LPSHEAlgorithm<Element>> m_algorithmSHE; std::shared_ptr<LPFHEAlgorithm<Element>> m_algorithmFHE; std::shared_ptr<LPLeveledSHEAlgorithm<Element>> m_algorithmLeveledSHE; }; } // namespace lbcrypto ends #endif
GB_unop__abs_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_fp32_fp32) // op(A') function: GB (_unop_tran__abs_fp32_fp32) // C type: float // A type: float // cast: float cij = aij // unaryop: cij = fabsf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = fabsf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = fabsf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_fp32_fp32) ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = fabsf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = fabsf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_fp32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sequencial.c
// Autor Roland Teodorowitsch // Data: ago. 2018 // // Adaptado Cesar De Rose - set. 2018 #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <omp.h> #define SIZE 2500 void InicializaMatriz(void); void VerificaMatriz(void); // ESTRUTURA DE DADOS COMPARTILHADA int m1[SIZE][SIZE], m2[SIZE][SIZE], mres[SIZE][SIZE]; int l1, c1, l2, c2; int main() { int i, j, k, lres, cres; double starttime, stoptime; // INICIALIZA OS ARRAYS A SEREM MULTIPLICADOS l1 = c1 = SIZE; l2 = c2 = SIZE; lres = l1; cres = c2; InicializaMatriz(); omp_set_num_threads(8); printf("\n Multiplicando matrizes de tamanho %d com %d threads.\n", SIZE, omp_get_max_threads() ); starttime = omp_get_wtime(); // REALIZA A MULTIPLICACAO #pragma omp parallel for private(j, k) for (i = 0; i < lres; i++) { for (j = 0; j < cres; j++) { mres[i][j] = 0; for (k = 0; k < c1; k++) { mres[i][j] += m1[i][k] * m2[k][j]; } } } stoptime = omp_get_wtime(); // VERIFICA SE O RESULTADO DA MULTIPLICACAO ESTA CORRETO VerificaMatriz(); printf(" Tempo de execucao: %3.2f segundos\n\n", stoptime - starttime); return 0; } void InicializaMatriz(void) { int i, j, k; k=1; for (i = 0; i < SIZE; i++) { for (j=0; j < SIZE; j++) { if (k%2 == 0) m1[i][j] = -k; else m1[i][j] = k; } k++; } k=1; for (j = 0; j < SIZE; j++) { for (i = 0 ; i < SIZE; i++) { if (k%2 == 0) m2[i][j] = -k; else m2[i][j] = k; } k++; } } void VerificaMatriz(void) { int i, j, k; for (i = 0; i < SIZE; i++) { k = SIZE*(i+1); for (j = 0; j < SIZE; j++) { int k_col = k*(j+1); if (i % 2 == 0) { if (j % 2 == 0) { if (mres[i][j]!=k_col) printf("Verificacao Falhou!\n"); } else { if (mres[i][j]!=-k_col) printf("Verificacao Falhou!\n"); } } else { if (j % 2 == 0) { if (mres[i][j]!=-k_col) printf("Verificacao Falhou!\n"); } else { if (mres[i][j]!=k_col) printf("Verificacao Falhou!\n"); } } } } }
Square.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/Square.c" #else void THNN_(Square_updateOutput)( THNNState *state, THTensor *input, THTensor *output) { THTensor_(resizeAs)(output, input); if (THTensor_nDimensionLegacyAll(input) == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(output)) { TH_TENSOR_APPLY2(real, output, real, input, *output_data = (*input_data) * (*input_data); ); } else { real *output_data = THTensor_(data)(output); real *input_data = THTensor_(data)(input); int64_t i; #pragma omp parallel for private(i) for (i = 0; i < THTensor_(nElement)(input); i++) output_data[i] = input_data[i]*input_data[i]; } } void THNN_(Square_updateGradInput)( THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput) { THNN_CHECK_SHAPE(input, gradOutput); THTensor_(resizeAs)(gradInput, input); if (THTensor_nDimensionLegacyAll(input) == 1 || !THTensor_(isContiguous)(input) || !THTensor_(isContiguous)(gradOutput) || !THTensor_(isContiguous)(gradInput)) { TH_TENSOR_APPLY3(real, gradInput, real, gradOutput, real, input, *gradInput_data = 2.0 * (*gradOutput_data) * (*input_data); ); } else { real *gradOutput_data = THTensor_(data)(gradOutput); real *gradInput_data = THTensor_(data)(gradInput); real *input_data = THTensor_(data)(input); int64_t i; #pragma omp parallel for private(i) for (i = 0; i < THTensor_(nElement)(gradInput); i++) gradInput_data[i] = 2.0 * gradOutput_data[i] * input_data[i]; } } #endif
GB_unop__identity_uint64_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_fp32) // op(A') function: GB (_unop_tran__identity_uint64_fp32) // C type: uint64_t // A type: float // cast: uint64_t cij = GB_cast_to_uint64_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_fp32) ( uint64_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; uint64_t z = GB_cast_to_uint64_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
roi_align.c
#include <TH/TH.h> #include <math.h> #include <omp.h> void ROIAlignForwardCpu(const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float * bottom_rois, float* top_data); void ROIAlignBackwardCpu(const float* top_diff, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float * bottom_rois, float* top_data); int roi_align_forward(int aligned_height, int aligned_width, float spatial_scale, THFloatTensor * features, THFloatTensor * rois, THFloatTensor * output) { //Grab the input tensor float * data_flat = THFloatTensor_data(features); float * rois_flat = THFloatTensor_data(rois); float * output_flat = THFloatTensor_data(output); // Number of ROIs int num_rois = THFloatTensor_size(rois, 0); int size_rois = THFloatTensor_size(rois, 1); if (size_rois != 5) { return 0; } // data height int data_height = THFloatTensor_size(features, 2); // data width int data_width = THFloatTensor_size(features, 3); // Number of channels int num_channels = THFloatTensor_size(features, 1); // do ROIAlignForward ROIAlignForwardCpu(data_flat, spatial_scale, num_rois, data_height, data_width, num_channels, aligned_height, aligned_width, rois_flat, output_flat); return 1; } int roi_align_backward(int aligned_height, int aligned_width, float spatial_scale, THFloatTensor * top_grad, THFloatTensor * rois, THFloatTensor * bottom_grad) { //Grab the input tensor float * top_grad_flat = THFloatTensor_data(top_grad); float * rois_flat = THFloatTensor_data(rois); float * bottom_grad_flat = THFloatTensor_data(bottom_grad); // Number of ROIs int num_rois = THFloatTensor_size(rois, 0); int size_rois = THFloatTensor_size(rois, 1); if (size_rois != 5) { return 0; } // batch size int batch_size = THFloatTensor_size(bottom_grad, 0); // data height int data_height = THFloatTensor_size(bottom_grad, 2); // data width int data_width = THFloatTensor_size(bottom_grad, 3); // Number of channels int num_channels = THFloatTensor_size(bottom_grad, 1); // do ROIAlignBackward ROIAlignBackwardCpu(top_grad_flat, spatial_scale, num_rois, data_height, data_width, num_channels, aligned_height, aligned_width, rois_flat, bottom_grad_flat); return 1; } void ROIAlignForwardCpu(const float* bottom_data, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float * bottom_rois, float* top_data) { const int output_size = num_rois * aligned_height * aligned_width * channels; #pragma omp parallel for for (int idx = 0; idx < output_size; ++idx) { // (n, c, ph, pw) is an element in the aligned output int pw = idx % aligned_width; int ph = (idx / aligned_width) % aligned_height; int c = (idx / aligned_width / aligned_height) % channels; int n = idx / aligned_width / aligned_height / channels; float roi_batch_ind = bottom_rois[n * 5 + 0]; float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale; float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale; float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale; float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale; // Force malformed ROI to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.); float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.); float bin_size_h = roi_height / (aligned_height - 1.); float bin_size_w = roi_width / (aligned_width - 1.); float h = (float)(ph) * bin_size_h + roi_start_h; float w = (float)(pw) * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; // bilinear interpolation if (h < 0 || h >= height || w < 0 || w >= width) { top_data[idx] = 0.; } else { float h_ratio = h - (float)(hstart); float w_ratio = w - (float)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; int downright = downleft + 1; top_data[idx] = bottom_data[upleft] * (1. - h_ratio) * (1. - w_ratio) + bottom_data[upright] * (1. - h_ratio) * w_ratio + bottom_data[downleft] * h_ratio * (1. - w_ratio) + bottom_data[downright] * h_ratio * w_ratio; } } } void ROIAlignBackwardCpu(const float* top_diff, const float spatial_scale, const int num_rois, const int height, const int width, const int channels, const int aligned_height, const int aligned_width, const float * bottom_rois, float* bottom_diff) { const int output_size = num_rois * aligned_height * aligned_width * channels; #pragma omp parallel for for (int idx = 0; idx < output_size; ++idx) { // (n, c, ph, pw) is an element in the aligned output int pw = idx % aligned_width; int ph = (idx / aligned_width) % aligned_height; int c = (idx / aligned_width / aligned_height) % channels; int n = idx / aligned_width / aligned_height / channels; float roi_batch_ind = bottom_rois[n * 5 + 0]; float roi_start_w = bottom_rois[n * 5 + 1] * spatial_scale; float roi_start_h = bottom_rois[n * 5 + 2] * spatial_scale; float roi_end_w = bottom_rois[n * 5 + 3] * spatial_scale; float roi_end_h = bottom_rois[n * 5 + 4] * spatial_scale; // Force malformed ROI to be 1x1 float roi_width = fmaxf(roi_end_w - roi_start_w + 1., 0.); float roi_height = fmaxf(roi_end_h - roi_start_h + 1., 0.); float bin_size_h = roi_height / (aligned_height - 1.); float bin_size_w = roi_width / (aligned_width - 1.); float h = (float)(ph) * bin_size_h + roi_start_h; float w = (float)(pw) * bin_size_w + roi_start_w; int hstart = fminf(floor(h), height - 2); int wstart = fminf(floor(w), width - 2); int img_start = roi_batch_ind * channels * height * width; // bilinear interpolation if (h < 0 || h >= height || w < 0 || w >= width) { float h_ratio = h - (float)(hstart); float w_ratio = w - (float)(wstart); int upleft = img_start + (c * height + hstart) * width + wstart; int upright = upleft + 1; int downleft = upleft + width; int downright = downleft + 1; bottom_diff[upleft] += top_diff[idx] * (1. - h_ratio) * (1. - w_ratio); bottom_diff[upright] += top_diff[idx] * (1. - h_ratio) * w_ratio; bottom_diff[downleft] += top_diff[idx] * h_ratio * (1. - w_ratio); bottom_diff[downright] += top_diff[idx] * h_ratio * w_ratio; } } }
activations.c
#include "activations.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <float.h> char *get_activation_string(ACTIVATION a) { switch(a){ case LOGISTIC: return "logistic"; case LOGGY: return "loggy"; case RELU: return "relu"; case ELU: return "elu"; case SELU: return "selu"; case RELIE: return "relie"; case RAMP: return "ramp"; case LINEAR: return "linear"; case TANH: return "tanh"; case PLSE: return "plse"; case LEAKY: return "leaky"; case STAIR: return "stair"; case HARDTAN: return "hardtan"; case LHTAN: return "lhtan"; default: break; } return "relu"; } ACTIVATION get_activation(char *s) { if (strcmp(s, "logistic")==0) return LOGISTIC; if (strcmp(s, "swish") == 0) return SWISH; if (strcmp(s, "mish") == 0) return MISH; if (strcmp(s, "normalize_channels") == 0) return NORM_CHAN; if (strcmp(s, "normalize_channels_softmax") == 0) return NORM_CHAN_SOFTMAX; if (strcmp(s, "normalize_channels_softmax_maxval") == 0) return NORM_CHAN_SOFTMAX_MAXVAL; if (strcmp(s, "loggy")==0) return LOGGY; if (strcmp(s, "relu")==0) return RELU; if (strcmp(s, "elu")==0) return ELU; if (strcmp(s, "selu") == 0) return SELU; if (strcmp(s, "relie")==0) return RELIE; if (strcmp(s, "plse")==0) return PLSE; if (strcmp(s, "hardtan")==0) return HARDTAN; if (strcmp(s, "lhtan")==0) return LHTAN; if (strcmp(s, "linear")==0) return LINEAR; if (strcmp(s, "ramp")==0) return RAMP; if (strcmp(s, "leaky")==0) return LEAKY; if (strcmp(s, "tanh")==0) return TANH; if (strcmp(s, "stair")==0) return STAIR; fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s); return RELU; } float activate(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate(x); case LOGISTIC: return logistic_activate(x); case LOGGY: return loggy_activate(x); case RELU: return relu_activate(x); case ELU: return elu_activate(x); case SELU: return selu_activate(x); case RELIE: return relie_activate(x); case RAMP: return ramp_activate(x); case LEAKY: return leaky_activate(x); case TANH: return tanh_activate(x); case PLSE: return plse_activate(x); case STAIR: return stair_activate(x); case HARDTAN: return hardtan_activate(x); case LHTAN: return lhtan_activate(x); } return 0; } void activate_array(float *x, const int n, const ACTIVATION a) { int i; if (a == LINEAR) {} else if (a == LEAKY) { #pragma omp parallel for for (i = 0; i < n; ++i) { x[i] = leaky_activate(x[i]); } } else if (a == LOGISTIC) { #pragma omp parallel for for (i = 0; i < n; ++i) { x[i] = logistic_activate(x[i]); } } else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void activate_array_swish(float *x, const int n, float * output_sigmoid, float * output) { int i; #pragma omp parallel for for (i = 0; i < n; ++i) { float x_val = x[i]; float sigmoid = logistic_activate(x_val); output_sigmoid[i] = sigmoid; output[i] = x_val * sigmoid; } } // https://github.com/digantamisra98/Mish void activate_array_mish(float *x, const int n, float * activation_input, float * output) { const float MISH_THRESHOLD = 20; int i; #pragma omp parallel for for (i = 0; i < n; ++i) { float x_val = x[i]; activation_input[i] = x_val; // store value before activation output[i] = x_val * tanh_activate( softplus_activate(x_val, MISH_THRESHOLD) ); } } void activate_array_normalize_channels(float *x, const int n, int batch, int channels, int wh_step, float *output) { int size = n / channels; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { int wh_i = i % wh_step; int b = i / wh_step; const float eps = 0.0001; if (i < size) { float sum = eps; int k; for (k = 0; k < channels; ++k) { float val = x[wh_i + k * wh_step + b*wh_step*channels]; if (val > 0) sum += val; } for (k = 0; k < channels; ++k) { float val = x[wh_i + k * wh_step + b*wh_step*channels]; if (val > 0) val = val / sum; else val = 0; output[wh_i + k * wh_step + b*wh_step*channels] = val; } } } } void activate_array_normalize_channels_softmax(float *x, const int n, int batch, int channels, int wh_step, float *output, int use_max_val) { int size = n / channels; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { int wh_i = i % wh_step; int b = i / wh_step; const float eps = 0.0001; if (i < size) { float sum = eps; float max_val = -FLT_MAX; int k; if (use_max_val) { for (k = 0; k < channels; ++k) { float val = x[wh_i + k * wh_step + b*wh_step*channels]; if (val > max_val || k == 0) max_val = val; } } else max_val = 0; for (k = 0; k < channels; ++k) { float val = x[wh_i + k * wh_step + b*wh_step*channels]; sum += expf(val - max_val); } for (k = 0; k < channels; ++k) { float val = x[wh_i + k * wh_step + b*wh_step*channels]; val = expf(val - max_val) / sum; output[wh_i + k * wh_step + b*wh_step*channels] = val; } } } } void gradient_array_normalize_channels_softmax(float *x, const int n, int batch, int channels, int wh_step, float *delta) { int size = n / channels; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { int wh_i = i % wh_step; int b = i / wh_step; if (i < size) { float grad = 0; int k; for (k = 0; k < channels; ++k) { const int index = wh_i + k * wh_step + b*wh_step*channels; float out = x[index]; float d = delta[index]; grad += out*d; } for (k = 0; k < channels; ++k) { const int index = wh_i + k * wh_step + b*wh_step*channels; float d = delta[index]; d = d * grad; delta[index] = d; } } } } void gradient_array_normalize_channels(float *x, const int n, int batch, int channels, int wh_step, float *delta) { int size = n / channels; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { int wh_i = i % wh_step; int b = i / wh_step; if (i < size) { float grad = 0; int k; for (k = 0; k < channels; ++k) { const int index = wh_i + k * wh_step + b*wh_step*channels; float out = x[index]; float d = delta[index]; grad += out*d; } for (k = 0; k < channels; ++k) { const int index = wh_i + k * wh_step + b*wh_step*channels; if (x[index] > 0) { float d = delta[index]; d = d * grad; delta[index] = d; } } } } } float gradient(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_gradient(x); case LOGISTIC: return logistic_gradient(x); case LOGGY: return loggy_gradient(x); case RELU: return relu_gradient(x); case NORM_CHAN: //return relu_gradient(x); case NORM_CHAN_SOFTMAX_MAXVAL: //... case NORM_CHAN_SOFTMAX: printf(" Error: should be used custom NORM_CHAN or NORM_CHAN_SOFTMAX-function for gradient \n"); exit(0); return 0; case ELU: return elu_gradient(x); case SELU: return selu_gradient(x); case RELIE: return relie_gradient(x); case RAMP: return ramp_gradient(x); case LEAKY: return leaky_gradient(x); case TANH: return tanh_gradient(x); case PLSE: return plse_gradient(x); case STAIR: return stair_gradient(x); case HARDTAN: return hardtan_gradient(x); case LHTAN: return lhtan_gradient(x); } return 0; } void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta) { int i; #pragma omp parallel for for(i = 0; i < n; ++i){ delta[i] *= gradient(x[i], a); } } // https://github.com/BVLC/caffe/blob/04ab089db018a292ae48d51732dd6c66766b36b6/src/caffe/layers/swish_layer.cpp#L54-L56 void gradient_array_swish(const float *x, const int n, const float * sigmoid, float * delta) { int i; #pragma omp parallel for for (i = 0; i < n; ++i) { float swish = x[i]; delta[i] *= swish + sigmoid[i]*(1 - swish); } } // https://github.com/digantamisra98/Mish void gradient_array_mish(const int n, const float * activation_input, float * delta) { int i; #pragma omp parallel for for (i = 0; i < n; ++i) { const float MISH_THRESHOLD = 20.0f; // implementation from TensorFlow: https://github.com/tensorflow/addons/commit/093cdfa85d334cbe19a37624c33198f3140109ed // implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31 float inp = activation_input[i]; const float sp = softplus_activate(inp, MISH_THRESHOLD); const float grad_sp = 1 - exp(-sp); const float tsp = tanh(sp); const float grad_tsp = (1 - tsp*tsp) * grad_sp; const float grad = inp * grad_tsp + tsp; delta[i] *= grad; //float x = activation_input[i]; //float d = 2 * expf(x) + expf(2 * x) + 2; //float w = 4 * (x + 1) + 4 * expf(2 * x) + expf(3 * x) + expf(x)*(4 * x + 6); //float derivative = expf(x) * w / (d * d); //delta[i] *= derivative; } }
pmv-OpenMP-b.c
// Compilar con -O2 y -fopenmp #include <stdlib.h> #include <stdio.h> #include <omp.h> int main(int argc, char** argv){ int i, j, f, c; double t1, t2, total; srand(time(NULL)); //Leer argumento de entrada (no de componentes del vector) if (argc<2){ printf("Falta tamaño de matriz y vector\n"); exit(-1); } unsigned int N = atoi(argv[1]); // Máximo N =2^32-1=4294967295 (sizeof(unsigned int) = 4 B) double *v1, *v2, **M; v1 = (double*) malloc(N*sizeof(double));// malloc necesita el tamaño en bytes v2 = (double*) malloc(N*sizeof(double)); //si no hay espacio suficiente malloc devuelve NULL M = (double**) malloc(N*sizeof(double *)); if ( (v1==NULL) || (v2==NULL) || (M==NULL) ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } for (i=0; i<N; i++){ M[i] = (double*) malloc(N*sizeof(double)); if ( M[i]==NULL ){ printf("Error en la reserva de espacio para los vectores\n"); exit(-2); } } //A partir de aqui se pueden acceder las componentes de la matriz como M[i][j] //Inicializar matriz y vectores //printf("Vector 1: \n\n["); #pragma omp parallel for for (i=0; i<N; i++) v1[i]=rand()%(1-10 + 1) + 1; /*for (i=0; i<N; i++) printf("%.0lf ",v1[i]);*/ //printf("]\n\n"); //printf("Matriz: \n\n"); #pragma omp parallel for for (f=0; f<N; f++) for (c=0; c<N; c++) M[f][c] = rand()%(1-10 + 1) + 1; /* for (f=0; f<N; f++) { printf("\n"); for (c=0; c<N; c++) printf("%.0lf ", M[f][c]); }*/ double suma=0; //Medida de tiempo t1 = omp_get_wtime(); //Calcular producto de matriz por vector v2 = M · v1 for (f=0; f<N; f++) { #pragma omp parallel firstprivate(suma) { #pragma omp for for (c=0; c<N; c++) suma += M[f][c] * v1[c]; #pragma omp critical v2[f] += suma; } } //Medida de tiempo t2 = omp_get_wtime(); total = t2 - t1; //Imprimir el resultado y el tiempo de ejecución printf("Tiempo(seg.):%11.9f\t / Tamaño:%u\t/ V2[0]=%8.6f V2[%d]=%8.6f\n", total,N,v2[0],N-1,v2[N-1]); if (N<15) { printf("\nv2=["); for (i=0; i<N; i++) printf("%.0lf ",v2[i]); printf("]\n"); } free(v1); // libera el espacio reservado para v1 free(v2); // libera el espacio reservado para v2 for (i=0; i<N; i++) free(M[i]); free(M); return 0; }
bls.c
/* Licensed under a 3-clause BSD style license - see LICENSE.rst */ #include <math.h> #include <float.h> #include <stdlib.h> #if defined(_OPENMP) #include <omp.h> #endif #ifndef INFINITY #define INFINITY (1.0 / 0.0) #endif void compute_objective( double y_in, double y_out, double ivar_in, double ivar_out, int obj_flag, double* objective, double* log_likelihood, double* depth, double* depth_err, double* depth_snr ) { if (obj_flag) { double arg = y_out - y_in; *log_likelihood = 0.5*ivar_in*arg*arg; *objective = *log_likelihood; } else { *depth = y_out - y_in; *depth_err = sqrt(1.0 / ivar_in + 1.0 / ivar_out); *depth_snr = *depth / *depth_err; *objective = *depth_snr; } } static inline double wrap_into (double x, double period) { return x - period * floor(x / period); } int run_bls ( // Inputs int N, // Length of the time array double* t, // The list of timestamps double* y, // The y measured at ``t`` double* ivar, // The inverse variance of the y array int n_periods, double* periods, // The period to test in units of ``t`` int n_durations, // Length of the durations array double* durations, // The durations to test in units of ``bin_duration`` int oversample, // The number of ``bin_duration`` bins in the maximum duration int obj_flag, // A flag indicating the periodogram type // 0 - depth signal-to-noise // 1 - log likelihood // Outputs double* best_objective, // The value of the periodogram at maximum double* best_depth, // The estimated depth at maximum double* best_depth_err, // The uncertainty on ``best_depth`` double* best_duration, // The best fitting duration in units of ``t`` double* best_phase, // The phase of the mid-transit time in units of // ``t`` double* best_depth_snr, // The signal-to-noise ratio of the depth estimate double* best_log_like // The log likelihood at maximum ) { // Start by finding the period and duration ranges double max_period = periods[0], min_period = periods[0]; int k; for (k = 1; k < n_periods; ++k) { if (periods[k] < min_period) min_period = periods[k]; if (periods[k] > max_period) max_period = periods[k]; } if (min_period < DBL_EPSILON) return 1; double min_duration = durations[0], max_duration = durations[0]; for (k = 1; k < n_durations; ++k) { if (durations[k] < min_duration) min_duration = durations[k]; if (durations[k] > max_duration) max_duration = durations[k]; } if ((max_duration > min_period) || (min_duration < DBL_EPSILON)) return 2; // Compute the durations in terms of bin_duration double bin_duration = min_duration / ((double)oversample); int max_n_bins = (int)(ceil(max_period / bin_duration)) + oversample; int nthreads, blocksize = max_n_bins+1; #pragma omp parallel { #if defined(_OPENMP) nthreads = omp_get_num_threads(); #else nthreads = 1; #endif } // Allocate the work arrays double* mean_y_0 = (double*)malloc(nthreads*blocksize*sizeof(double)); if (mean_y_0 == NULL) { return -2; } double* mean_ivar_0 = (double*)malloc(nthreads*blocksize*sizeof(double)); if (mean_ivar_0 == NULL) { free(mean_y_0); return -3; } // Pre-accumulate some factors. double min_t = INFINITY; double sum_y = 0.0, sum_ivar = 0.0; int i; #pragma omp parallel for reduction(+:sum_y), reduction(+:sum_ivar) for (i = 0; i < N; ++i) { min_t = fmin(min_t, t[i]); sum_y += y[i] * ivar[i]; sum_ivar += ivar[i]; } // Loop over periods and do the search int p; #pragma omp parallel for for (p = 0; p < n_periods; ++p) { #if defined(_OPENMP) int ithread = omp_get_thread_num(); #else int ithread = 0; #endif int block = blocksize * ithread; double period = periods[p]; int n_bins = (int)(ceil(period / bin_duration)) + oversample; double* mean_y = mean_y_0 + block; double* mean_ivar = mean_ivar_0 + block; // This first pass bins the data into a fine-grain grid in phase from zero // to period and computes the weighted sum and inverse variance for each // bin. int n, ind; for (n = 0; n < n_bins+1; ++n) { mean_y[n] = 0.0; mean_ivar[n] = 0.0; } for (n = 0; n < N; ++n) { int ind = (int)(wrap_into(t[n] - min_t, period) / bin_duration) + 1; mean_y[ind] += y[n] * ivar[n]; mean_ivar[ind] += ivar[n]; } // To simplify calculations below, we wrap the binned values around and pad // the end of the array with the first ``oversample`` samples. for (n = 1, ind = n_bins - oversample; n <= oversample; ++n, ++ind) { mean_y[ind] = mean_y[n]; mean_ivar[ind] = mean_ivar[n]; } // To compute the estimates of the in-transit flux, we need the sum of // mean_y and mean_ivar over a given set of transit points. To get this // fast, we can compute the cumulative sum and then use differences between // points separated by ``duration`` bins. Here we convert the mean arrays // to cumulative sums. for (n = 1; n <= n_bins; ++n) { mean_y[n] += mean_y[n-1]; mean_ivar[n] += mean_ivar[n-1]; } // Then we loop over phases (in steps of n_bin) and durations and find the // best fit value. By looping over durations here, we get to reuse a lot of // the computations that we did above. double objective, log_like, depth, depth_err, depth_snr; best_objective[p] = -INFINITY; int k; for (k = 0; k < n_durations; ++k) { int dur = (int)(round(durations[k] / bin_duration)); int n_max = n_bins-dur; for (n = 0; n <= n_max; ++n) { // Estimate the in-transit and out-of-transit flux double y_in = mean_y[n+dur] - mean_y[n]; double ivar_in = mean_ivar[n+dur] - mean_ivar[n]; double y_out = sum_y - y_in; double ivar_out = sum_ivar - ivar_in; // Skip this model if there are no points in transit if ((ivar_in < DBL_EPSILON) || (ivar_out < DBL_EPSILON)) { continue; } // Normalize to compute the actual value of the flux y_in /= ivar_in; y_out /= ivar_out; // Either compute the log likelihood or the signal-to-noise // ratio compute_objective(y_in, y_out, ivar_in, ivar_out, obj_flag, &objective, &log_like, &depth, &depth_err, &depth_snr); // If this is the best result seen so far, keep it if (y_out >= y_in && objective > best_objective[p]) { best_objective[p] = objective; // Compute the other parameters compute_objective(y_in, y_out, ivar_in, ivar_out, (obj_flag == 0), &objective, &log_like, &depth, &depth_err, &depth_snr); best_depth[p] = depth; best_depth_err[p] = depth_err; best_depth_snr[p] = depth_snr; best_log_like[p] = log_like; best_duration[p] = dur * bin_duration; best_phase[p] = fmod(n*bin_duration + 0.5*best_duration[p] + min_t, period); } } } } // Clean up free(mean_y_0); free(mean_ivar_0); return 0; }
sol1.c
/** * \file * \brief [Problem 14](https://projecteuler.net/problem=14) solution * \author [Krishna Vedala](https://github.com/kvedala) * * Since the computational values for each iteration step are independent, * we can compute them in parallel. However, the maximum values should be * updated in synchrony so that we do not get into a "race condition". * * To compile with supporintg gcc or clang, the flag "-fopenmp" should be * passes while with Microsoft C compiler, the flag "/fopenmp" should be * used. If you are using the provided CMAKE compilation, it will automatically * detect OPENMP and compile with it for you. * * Automatically detects for OPENMP using the _OPENMP macro. */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif /** * Computes the length of collatz sequence for a given * starting number */ long long collatz(long long start_num) { long long length = 1; while (start_num != 1) /* loop till we reach 1 */ { if (start_num & 0x01) /* check for odd */ start_num = 3 * start_num + 1; else start_num >>= 1; /* simpler divide by 2 */ length++; } return length; } /** Main function */ int main(int argc, char **argv) { long long max_len = 0, max_len_num = 0; long long MAX_NUM = 1000000; if (argc == 2) /* set commandline argumnet as the maximum iteration number */ { MAX_NUM = atoll(argv[1]); printf("Maximum number: %lld\n", MAX_NUM); } long long i; #ifdef _OPENMP #pragma omp parallel for shared(max_len, max_len_num) schedule(guided) #endif for (i = 1; i < MAX_NUM; i++) { long long L = collatz(i); if (L > max_len) { max_len = L; /* length of sequence */ max_len_num = i; /* starting number */ } #if defined(_OPENMP) && defined(DEBUG) printf("Thread: %2d\t %3lld: \t%5lld\n", omp_get_thread_num(), i, L); #elif defined(DEBUG) printf("%3lld: \t%5lld\n", i, L); #endif } printf("Start: %3lld: \tLength: %5lld\n", max_len_num, max_len); return 0; }
spmm_blocking_libxsmm.h
/*! * Copyright (c) 2021 Intel Corporation * \file array/cpu/spmm.h * \brief SPMM CPU kernel function header. * \author Sanchit Misra <sanchit.misra@intel.com>, * Ramanarayan Mohanty <ramanarayan.mohanty@intel.com>, * Vasimuddin Md <vasimuddin.md@intel.com>, * Sasikanth Avancha <sasikanth.avancha@intel.com> */ #ifndef DGL_ARRAY_CPU_SPMM_BLOCKING_LIBXSMM_H_ #define DGL_ARRAY_CPU_SPMM_BLOCKING_LIBXSMM_H_ #include <dgl/array.h> #include <dgl/bcast.h> #include <dmlc/logging.h> #include <algorithm> #if !defined(_WIN32) #ifdef USE_AVX #ifdef USE_LIBXSMM #include <unistd.h> #include <libxsmm.h> #ifdef DEBUG #include <x86intrin.h> #endif // DEBUG #include <dmlc/omp.h> #define NUM_BLOCKS_PER_THREAD 20 #define BLOCKING_HEURISTIC_PARAM 500 namespace dgl { namespace aten { namespace cpu { template <typename IdType, typename DType> struct CSRMatrixInternal { IdType num_rows; IdType num_cols; IdType *indptr; IdType *indices; DType *data; }; int32_t GetLLCSize() { int32_t cache_size = sysconf(_SC_LEVEL3_CACHE_SIZE); if (cache_size < 0) cache_size = DGL_CPU_LLC_SIZE; return cache_size; } /*! * \brief Tile the CSR matrix to roughly make sure that the column tiles and * corresponding neighbor features fit into LLC and the row tiles * are assigned to OMP threads. * \param csr The Csr matrix. * \param block_csr_array The array containing csr matrices of all blocks. * \param num_M_blocks Number of blocks to create along the rows of adjacency matrix. * \param num_K_blocks Number of blocks to create along the columns of adjacency matrix. * \param M_block_size block size along the rows of adjacency matrix. * \param K_block_size block size along the columns of adjacency matrix. * \param use_lhs Whether to use lhs. * \param use_rhs Whether to use rhs. */ template <typename IdType> inline void SpMMCreateBlocks( const CSRMatrix& csr, CSRMatrixInternal<IdType, IdType> *block_csr_array, IdType num_M_blocks, IdType num_K_blocks, IdType M_block_size, IdType K_block_size, bool use_lhs, bool use_rhs) { const IdType M = csr.num_rows; const IdType K = csr.num_cols; IdType* indptr = csr.indptr.Ptr<IdType>(); IdType* indices = csr.indices.Ptr<IdType>(); IdType* edges = csr.data.Ptr<IdType>(); CHECK_NOTNULL(indptr); if (use_lhs) CHECK_NOTNULL(indices); if (use_rhs) CHECK_NOTNULL(edges); if (num_K_blocks > 1) { IdType *indptr_block_buf = reinterpret_cast<IdType *>(aligned_alloc(64, (M_block_size + 1) * num_M_blocks * num_K_blocks * sizeof(IdType))); IdType *indices_block_buf = reinterpret_cast<IdType *>(aligned_alloc(64, indptr[M] * sizeof(IdType))); IdType *edges_block_buf = reinterpret_cast<IdType *>(aligned_alloc(64, indptr[M] * sizeof(IdType))); #pragma omp parallel { IdType *my_cur_col_id = reinterpret_cast<IdType *>(aligned_alloc(64, 2 * M_block_size * sizeof(IdType))); #pragma omp for for (IdType m = 0; m < num_M_blocks; m++) { const IdType M_start = m * M_block_size; const IdType M_end = std::min((m + 1) * M_block_size, M); const IdType nnz = indptr[M_end] - indptr[M_start]; IdType cur_indices_id = 0; IdType *my_indices_block_buf, *my_edges_block_buf; if (use_lhs) my_indices_block_buf = indices_block_buf + indptr[M_start]; if (use_rhs) my_edges_block_buf = edges_block_buf + indptr[M_start]; for (IdType i = M_start; i < M_end; i++) { my_cur_col_id[(i - M_start) * 2] = indptr[i]; my_cur_col_id[(i - M_start) * 2 + 1] = indptr[i + 1]; } for (IdType k = 0; k < num_K_blocks; k++) { const IdType K_start = k * K_block_size; const IdType K_end = std::min((k + 1) * K_block_size, K); CSRMatrixInternal<IdType, IdType> cur_csr; cur_csr.num_rows = M_end - M_start; cur_csr.num_cols = K_end - K_start; // Create csr_ij IdType *cur_csr_indptr = indptr_block_buf + (m * num_K_blocks + k) * (M_block_size + 1); IdType *cur_csr_indices = nullptr, *cur_csr_edges = nullptr; if (use_lhs) cur_csr_indices = my_indices_block_buf + cur_indices_id; if (use_rhs) cur_csr_edges = my_edges_block_buf + cur_indices_id; IdType cur_nnz = 0; for (IdType i = M_start; i < M_end; i++) { const IdType row_start = my_cur_col_id[(i - M_start) * 2]; const IdType row_end = my_cur_col_id[(i - M_start) * 2 + 1]; cur_csr_indptr[i - M_start] = cur_nnz; IdType eid; for (eid = row_start; eid < row_end; eid++) { const IdType src = indices[eid]; const IdType edge = edges[eid]; if (src >= K_end) { break; } CHECK_LT(cur_indices_id + cur_nnz, nnz); if (use_lhs) cur_csr_indices[cur_nnz] = src; if (use_rhs) cur_csr_edges[cur_nnz] = edge; cur_nnz++; } my_cur_col_id[(i - M_start) * 2] = eid; } cur_csr_indptr[cur_csr.num_rows] = cur_nnz; cur_indices_id += cur_nnz; cur_csr.indptr = cur_csr_indptr; if (use_lhs) cur_csr.indices = cur_csr_indices; if (use_rhs) cur_csr.data = cur_csr_edges; block_csr_array[m * num_K_blocks + k] = cur_csr; } CHECK_EQ(nnz, cur_indices_id); } free(my_cur_col_id); } } else { #pragma omp for for (IdType m = 0; m < num_M_blocks; m++) { const IdType M_start = m * M_block_size; const IdType M_end = std::min((m + 1) * M_block_size, M); CSRMatrixInternal<IdType, IdType> cur_csr; cur_csr.num_rows = M_end - M_start; cur_csr.num_cols = K; cur_csr.indptr = indptr + M_start; cur_csr.indices = indices; cur_csr.data = edges; block_csr_array[m] = cur_csr; } } } /*! * \brief Create libxsmm kernel. * \param has_idx For the edge features, are there indices available. * \param N Feature size. * \param redop_flag Flag specifying the reduction operation. * \param is_cmp Is the reduction operation a compare operation. * \note libxsmm_dispatch_meltw_opreduce_vecs_idx creates a JIT'ed kernel. * Given a node u, the kernel performs an elementwise "Op" on the * features of the neighbors and/or the edges incident on u. * Subsequently, it performs an elementwise "Redop" on all such * features created and stores into the feature of node u. * It uses a SIMD and a cache efficient design and also provides * support to enable software prefetching if needed. For IdType, * it supports INT32 and INT64. For DType, it supports BF16 and FP32. * It supports all the "Ops" and "Redops" supported by DGL. Once a * kernel is generated by libxsmm_dispatch_meltw_opreduce_vecs_idx, * it is cached for the entire duration of the execution of a program * so that subsequently if the kernel is needed again, it just returns * the cached copy. */ template <typename IdType, typename DType, typename Op> inline libxsmm_meltwfunction_opreduce_vecs_idx SpMMCreateLibxsmmKernel( bool has_idx, IdType N, libxsmm_meltw_opreduce_vecs_flags redop_flag, bool is_cmp) { int _ld = N; libxsmm_meltw_opreduce_vecs_flags opredop_flags; // First, set the Op in the opredop_flags if (std::is_same<Op, op::Add<DType>>::value) { opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_ADD; } else if (std::is_same<Op, op::Sub<DType>>::value) { opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_SUB; } else if (std::is_same<Op, op::Mul<DType>>::value) { opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_MUL; } else if (std::is_same<Op, op::Div<DType>>::value) { opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_DIV; } else if (std::is_same<Op, op::CopyLhs<DType>>::value) { opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_COPY; } else if (std::is_same<Op, op::CopyRhs<DType>>::value) { opredop_flags = LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OP_COPY; } // Second, set which of lhs or rhs is considered first and second operand. // This is needed since libxsmm assumes that the copy operation always copies the first operand. // So, if we need to copy rhs, we need to set that as the first operand. // For rhs, we also set whether to use implicit indices or provided indices. if (std::is_same<Op, op::CopyLhs<DType>>::value) { opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags | LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OPORDER_VECIDX_VECIN); } else if (std::is_same<Op, op::CopyRhs<DType>>::value) { opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags | LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OPORDER_VECIN_VECIDX); if (!has_idx) { opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags | LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_IMPLICIT_INDEXED_VECIDX); } } else { opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags | LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_OPORDER_VECIDX_VECIN); if (has_idx) { opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags | LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_INDEXED_VEC); } else { opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags | LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_IMPLICIT_INDEXED_VEC); } } // Third, we set the Redop in the opredop_flags opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags | redop_flag); // Fourth, in case of Cmp Redop, set whether to record argmax/argmin for lhs/rhs if (is_cmp) { if (Op::use_lhs) { opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags | LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_RECORD_ARGOP_OFF_VEC_0); } if (Op::use_rhs) { opredop_flags = (libxsmm_meltw_opreduce_vecs_flags)(opredop_flags | LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_RECORD_ARGOP_OFF_VEC_1); } } libxsmm_meltwfunction_opreduce_vecs_idx kernel = nullptr; if (std::is_same<DType, float>::value) { kernel = libxsmm_dispatch_meltw_opreduce_vecs_idx( N, &_ld, &_ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, (sizeof(IdType) == 8) ? LIBXSMM_DATATYPE_I64 : LIBXSMM_DATATYPE_I32, opredop_flags); } if (kernel == nullptr) { LOG(FATAL) << "Failed to generate libxsmm kernel for the SpMM operation!"; } return kernel; } /*! * \brief Use libxsmm to perform SpMM-Sum on all blocks. * \param block_csr_array The array containing csr matrices of all blocks. * \param B The feature on source nodes. * \param E The feature on edges. * \param C The result feature on destination nodes. * \param has_idx For the edge features, are there indices available. * \param N Feature size. * \param num_M_blocks Number of blocks to create along the rows of adjacency matrix. * \param num_K_blocks Number of blocks to create along the columns of adjacency matrix. * \param M_block_size block size along the rows of adjacency matrix. * \param kernel The libxsmm kernel. */ template <typename IdType, typename DType> inline void SpMMBlockwiseOpSum( CSRMatrixInternal<IdType, IdType> *block_csr_array, const DType *B, const DType *E, DType *C, bool has_idx, IdType N, IdType num_M_blocks, IdType num_K_blocks, IdType M_block_size, libxsmm_meltwfunction_opreduce_vecs_idx kernel) { DType (*in_matrix1)[N] = (DType (*)[N])B; DType (*in_matrix2)[N] = (DType (*)[N])E; DType (*output)[N] = (DType (*)[N])C; #pragma omp parallel { for (IdType k = 0; k < num_K_blocks; k++) { #pragma omp for schedule(dynamic) for (IdType m = 0; m < num_M_blocks; m++) { CSRMatrixInternal<IdType, IdType> cur_csr = block_csr_array[m * num_K_blocks + k]; const IdType M_start = m * M_block_size; for (IdType i = 0; i < cur_csr.num_rows; i++) { const IdType row_start = cur_csr.indptr[i]; const IdType row_end = cur_csr.indptr[i + 1]; const IdType dst = i + M_start; libxsmm_meltw_opreduce_vecs_idx_param params; params.n = row_end - row_start; params.indices = &cur_csr.indices[row_start]; params.in_matrix = in_matrix1; params.out_vec = &output[dst][0]; params.scale_vals = nullptr; if (has_idx) { params.in_matrix2 = in_matrix2; params.indices2 = &cur_csr.data[row_start]; } else { params.in_matrix2 = &in_matrix2[row_start]; } kernel(&params); } } } } } /*! * \brief Use libxsmm to perform SpMM-Max/Min on all blocks. * \param block_csr_array The array containing csr matrices of all blocks. * \param B The feature on source nodes. * \param E The feature on edges. * \param C The result feature on destination nodes. * \param argB Arg-Min/Max on source nodes. * \param argE Arg-Min/Max on edges. * \param has_idx For the edge features, are there indices available. * \param N Feature size. * \param num_M_blocks Number of blocks to create along the rows of adjacency matrix. * \param num_K_blocks Number of blocks to create along the columns of adjacency matrix. * \param M_block_size block size along the rows of adjacency matrix. * \param kernel The libxsmm kernel. */ template <typename IdType, typename DType, typename Op, typename Cmp> inline void SpMMBlockwiseOpCmp( CSRMatrixInternal<IdType, IdType> *block_csr_array, const DType *B, const DType *E, DType *C, IdType *argB, IdType *argE, bool has_idx, IdType N, IdType num_M_blocks, IdType num_K_blocks, IdType M_block_size, libxsmm_meltwfunction_opreduce_vecs_idx kernel) { DType (*in_matrix1)[N] = (DType (*)[N])B; DType (*in_matrix2)[N] = (DType (*)[N])E; DType (*output)[N] = (DType (*)[N])C; IdType (*out_matrix1)[N] = (IdType (*)[N])argB; IdType (*out_matrix2)[N] = (IdType (*)[N])argE; #pragma omp parallel { for (IdType k = 0; k < num_K_blocks; k++) { #pragma omp for schedule(dynamic) for (IdType m = 0; m < num_M_blocks; m++) { CSRMatrixInternal<IdType, IdType> cur_csr = block_csr_array[m * num_K_blocks + k]; const IdType M_start = m * M_block_size; for (IdType i = 0; i < cur_csr.num_rows; i++) { const IdType row_start = cur_csr.indptr[i]; const IdType row_end = cur_csr.indptr[i + 1]; const IdType dst = i + M_start; libxsmm_meltw_opreduce_vecs_idx_param params; params.n = row_end - row_start; params.indices = &cur_csr.indices[row_start]; params.in_matrix = in_matrix1; params.out_vec = &output[dst][0]; params.argop_off_vec_0 = &out_matrix1[dst][0]; params.argop_off_vec_1 = &out_matrix2[dst][0]; params.scale_vals = nullptr; if (has_idx) { params.in_matrix2 = in_matrix2; params.indices2 = &cur_csr.data[row_start]; } else { params.in_matrix2 = &in_matrix2[row_start]; } kernel(&params); } } } } } /*! * \brief Free the tiled CSR matrix data. * \param block_csr_array The array containing csr matrices of all blocks. * \param num_M_blocks Number of blocks to create along the rows of adjacency matrix. * \param num_K_blocks Number of blocks to create along the columns of adjacency matrix. * \param use_lhs Whether to use lhs. * \param use_rhs Whether to use rhs. */ template <typename IdType> inline void SpMMFreeBlocks( CSRMatrixInternal<IdType, IdType> *block_csr_array, IdType num_M_blocks, IdType num_K_blocks, bool use_lhs, bool use_rhs) { if (num_K_blocks > 1) { free(block_csr_array[0].indptr); if (use_lhs) free(block_csr_array[0].indices); if (use_rhs) free(block_csr_array[0].data); } free(block_csr_array); } /*! * \brief Optimized CPU kernel of SpMM-Sum/Max/Min on Csr format. * \param bcast Broadcast information. * \param csr The Csr matrix. * \param ufeat The feature on source nodes. * \param efeat The feature on edges. * \param out The result feature on destination nodes. * \param argu Arg-Min/Max on source nodes. * \param arge Arg-Min/Max on edges. * \note it uses libxsmm, blocking and dynamic thread scheduling. */ template <typename IdType, typename DType, typename Op, typename Redop> void SpMMRedopCsrOpt( const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, NDArray argu, NDArray arge) { int32_t llc_size = GetLLCSize(); #ifdef DEBUG uint64_t startTick, endTick; startTick = __rdtsc(); #endif // DEBUG const bool has_idx = !IsNullArray(csr.data); DType* C = out.Ptr<DType>(); const DType* B = ufeat.Ptr<DType>(); const DType* E = efeat.Ptr<DType>(); IdType *argB, *argE; if (std::is_same<Redop, op::Max<DType>>::value || std::is_same<Redop, op::Min<DType>>::value) { argB = argu.Ptr<IdType>(); argE = arge.Ptr<IdType>(); } const int nthreads = omp_get_max_threads(); const IdType M = csr.num_rows; const IdType N = bcast.out_len; const IdType K = csr.num_cols; const IdType* indptr = csr.indptr.Ptr<IdType>(); CHECK_NOTNULL(indptr); const int total_nnz = indptr[M]; if (M <= 0 || K <= 0 || N <= 0 || total_nnz <= 0) return; const float avg_degree = total_nnz * 1.0 / M; const float nnz_prob = avg_degree / K; IdType K_block_size = llc_size / (N * sizeof(DType) * nnz_prob * BLOCKING_HEURISTIC_PARAM); IdType M_block_size = M / (nthreads * NUM_BLOCKS_PER_THREAD); if (M_block_size == 0) M_block_size = 1; if (K_block_size == 0) K_block_size = 1; IdType num_M_blocks = (M + M_block_size - 1) / M_block_size; IdType num_K_blocks = (K + K_block_size - 1) / K_block_size; CSRMatrixInternal<IdType, IdType> *block_csr_array = (CSRMatrixInternal<IdType, IdType> *)aligned_alloc(64, sizeof(CSRMatrixInternal<IdType, IdType>) * num_M_blocks * num_K_blocks); #ifdef DEBUG endTick = __rdtsc(); if (std::is_same<Redop, op::Max<DType>>::value) { LOG(INFO) << "Redop = Max"; } else if (std::is_same<Redop, op::Min<DType>>::value) { LOG(INFO) << "Redop = Min"; } else if (std::is_same<Redop, op::Add<DType>>::value) { LOG(INFO) << "Redop = Add"; } LOG(INFO) << "nthreads = " << nthreads << ", llc_size = " << llc_size; LOG(INFO) << "M = " << M << ", K = " << K << ", N = " << N; LOG(INFO) << "use_lhs = " << Op::use_lhs << ", use_rhs = " << Op::use_rhs; LOG(INFO) << "total_nnz = " << total_nnz << ", avg_degree = " << avg_degree; LOG(INFO) << "has_idx = " << has_idx; LOG(INFO) << "nnz_prob = " << nnz_prob; LOG(INFO) << "K_block_size = " << K_block_size << ", M_block_size = " << M_block_size; LOG(INFO) << "num_K_blocks = " << num_K_blocks << ", num_M_blocks = " << num_M_blocks; LOG(INFO) << "stage0 ticks = " << (endTick - startTick); startTick = __rdtsc(); #endif // DEBUG SpMMCreateBlocks(csr, block_csr_array, num_M_blocks, num_K_blocks, M_block_size, K_block_size, Op::use_lhs, Op::use_rhs); #ifdef DEBUG endTick = __rdtsc(); LOG(INFO) << "stage1 ticks = " << (endTick - startTick); startTick = __rdtsc(); #endif // DEBUG libxsmm_meltwfunction_opreduce_vecs_idx kernel = nullptr; if (std::is_same<Redop, op::Max<DType>>::value) { kernel = SpMMCreateLibxsmmKernel<IdType, DType, Op>(has_idx, N, LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_MAX, true); } else if (std::is_same<Redop, op::Min<DType>>::value) { kernel = SpMMCreateLibxsmmKernel<IdType, DType, Op>(has_idx, N, LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_MIN, true); } else if (std::is_same<Redop, op::Add<DType>>::value) { kernel = SpMMCreateLibxsmmKernel<IdType, DType, Op>(has_idx, N, LIBXSMM_MELTW_FLAG_OPREDUCE_VECS_REDOP_SUM, false); } #ifdef DEBUG endTick = __rdtsc(); LOG(INFO) << "stage2 ticks = " << (endTick - startTick); startTick = __rdtsc(); #endif // DEBUG if (std::is_same<Redop, op::Max<DType>>::value || std::is_same<Redop, op::Min<DType>>::value) { SpMMBlockwiseOpCmp<IdType, DType, Op, Redop>(block_csr_array, B, E, C, argB, argE, has_idx, N, num_M_blocks, num_K_blocks, M_block_size, kernel); } else { SpMMBlockwiseOpSum(block_csr_array, B, E, C, has_idx, N, num_M_blocks, num_K_blocks, M_block_size, kernel); } #ifdef DEBUG endTick = __rdtsc(); LOG(INFO) << "stage3 ticks = " << (endTick - startTick); startTick = __rdtsc(); #endif // DEBUG SpMMFreeBlocks(block_csr_array, num_M_blocks, num_K_blocks, Op::use_lhs, Op::use_rhs); #ifdef DEBUG endTick = __rdtsc(); LOG(INFO) << "stage4 ticks = " << (endTick - startTick); #endif // DEBUG } /*! * \brief Optimized CPU kernel of SpMM-Sum on Csr format. * \param bcast Broadcast information. * \param csr The Csr matrix. * \param ufeat The feature on source nodes. * \param efeat The feature on edges. * \param out The result feature on destination nodes. * \note it uses libxsmm, blocking and dynamic thread scheduling. */ template <typename IdType, typename DType, typename Op> void SpMMSumCsrLibxsmm(const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out) { NDArray dummy; SpMMRedopCsrOpt<IdType, DType, Op, op::Add<DType>>(bcast, csr, ufeat, efeat, out, dummy, dummy); } /*! * \brief Optimized CPU kernel of SpMM-Min/Max on Csr format. * \param bcast Broadcast information. * \param csr The Csr matrix. * \param ufeat The feature on source nodes. * \param efeat The feature on edges. * \param out The result feature on destination nodes. * \param argu Arg-Min/Max on source nodes. * \param arge Arg-Min/Max on edges. * \note it uses libxsmm, blocking and dynamic thread scheduling. */ template <typename IdType, typename DType, typename Op, typename Cmp> void SpMMCmpCsrLibxsmm(const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, NDArray argu, NDArray arge) { SpMMRedopCsrOpt<IdType, DType, Op, Cmp>(bcast, csr, ufeat, efeat, out, argu, arge); } } // namespace cpu } // namespace aten } // namespace dgl #endif // USE_LIBXSMM #endif // USE_AVX #endif // _WIN32 #endif // DGL_ARRAY_CPU_SPMM_BLOCKING_LIBXSMM_H_
trsm_c_sky_n_hi_row_conj.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include <memory.h> alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { ALPHA_INT m = A->rows; ALPHA_Complex diag[m]; memset(diag, '\0', m * sizeof(ALPHA_Complex)); ALPHA_INT num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for (ALPHA_INT r = 1; r < A->rows + 1; r++) { const ALPHA_INT indx = A->pointers[r] - 1; diag[r - 1].real = A->values[indx].real; diag[r - 1].imag = -A->values[indx].imag; } #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++) { for (ALPHA_INT r = 0; r <A->rows; r++) { ALPHA_Complex temp = {.real = 0.f, .imag = 0.f}; ALPHA_INT start = A->pointers[r]; ALPHA_INT end = A->pointers[r + 1]; ALPHA_INT idx = 1; ALPHA_INT eles_num = end - start; for (ALPHA_INT ai = start; ai < end - 1; ++ai) { ALPHA_INT c = r - eles_num + idx; ALPHA_Complex cv = A->values[ai]; alpha_conj(cv, cv); alpha_madde(temp, cv, y[c * ldy + out_y_col]); idx ++; } ALPHA_Complex t; alpha_mul(t, alpha, x[r * ldx + out_y_col]); alpha_sub(t, t, temp); alpha_div(y[r * ldy + out_y_col], t, diag[r]); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
opal_test.c
// Copyright 2009-2020 Sandia Corporation. Under the terms // of Contract DE-NA0003525 with Sandia Corporation, the U.S. // Government retains certain rights in this software. // // Copyright (c) 2009-2020, Sandia Corporation // All rights reserved. // // Portions are copyright of other developers: // See the file CONTRIBUTORS.TXT in the top level directory // the distribution for more information. // // This file is part of the SST software package. For license // information, see the LICENSE file in the top level directory of the // distribution. #include <stdio.h> #include <stdlib.h> extern "C" void ariel_enable() { printf("Inside Ariel\n"); } int main(int argc, char* argv[]) { const int LENGTH = 2000; ariel_enable(); printf("Allocating arrays of size %d elements.\n", LENGTH); double* a = (double*) malloc(sizeof(double) * LENGTH); double* b = (double*) malloc(sizeof(double) * LENGTH); double* c = (double*) malloc(sizeof(double) * LENGTH); printf("Done allocating arrays.\n"); int i; for(i = 0; i < LENGTH; ++i) { a[i] = i; b[i] = LENGTH - i; c[i] = 0; } printf("Perfoming the fast_c compute loop...\n"); #pragma omp parallel num_threads(2) for(i = 0; i < LENGTH; ++i) { //printf("issuing a write to: %llu (fast_c)\n", ((unsigned long long int) &fast_c[i])); c[i] = 2.0 * a[i] + 1.5 * b[i]; } double sum = 0; for(i = 0; i < LENGTH; ++i) { sum += c[i]; } printf("Sum of arrays is: %f\n", sum); printf("Freeing arrays...\n"); free(a); free(b); free(c); printf("Done.\n"); }
dsacstar_derivative.h
/* Based on the DSAC++ and ESAC code. https://github.com/vislearn/LessMore https://github.com/vislearn/esac Copyright (c) 2016, TU Dresden Copyright (c) 2010, Heidelberg University All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the TU Dresden, Heidelberg University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TU DRESDEN OR HEIDELBERG UNIVERSITY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #define PROB_THRESH 0.001 // ignore hypotheses with low probability for expectations namespace dsacstar { /** * @brief Calculates the Jacobean of the projection function w.r.t the given 3D point, ie. the function has the form 3 -> 1 * @param pt Ground truth 2D location. * @param obj 3D point. * @param rot Rotation in axis-angle format (OpenCV convention) * @param trans Translation vector (OpenCV convention). * @param camMat Calibration matrix of the camera. * @param maxReproj Reprojection errors are clamped to this maximum value. * @return 1x3 Jacobean matrix of partial derivatives. */ cv::Mat_<double> dProjectdObj( const cv::Point2f& pt, const cv::Point3f& obj, const cv::Mat& rot, const cv::Mat& trans, const cv::Mat& camMat, float maxReproErr) { double f = camMat.at<float>(0, 0); double ppx = camMat.at<float>(0, 2); double ppy = camMat.at<float>(1, 2); //transform point cv::Mat objMat = cv::Mat(obj); objMat.convertTo(objMat, CV_64F); objMat = rot * objMat + trans; if(std::abs(objMat.at<double>(2, 0)) < EPS) // prevent division by zero return cv::Mat_<double>::zeros(1, 3); // project double px = f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) + ppx; double py = f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) + ppy; // calculate error double err = std::sqrt((pt.x - px) * (pt.x - px) + (pt.y - py) * (pt.y - py)); // early out if projection error is above threshold if(err > maxReproErr) return cv::Mat_<double>::zeros(1, 3); err += EPS; // avoid dividing by zero // derivative in x direction of obj coordinate double pxdx = f * rot.at<double>(0, 0) / objMat.at<double>(2, 0) - f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 0); double pydx = f * rot.at<double>(1, 0) / objMat.at<double>(2, 0) - f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 0); double dx = 0.5 / err * (2 * (pt.x - px) * -pxdx + 2 * (pt.y - py) * -pydx); // derivative in y direction of obj coordinate double pxdy = f * rot.at<double>(0, 1) / objMat.at<double>(2, 0) - f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 1); double pydy = f * rot.at<double>(1, 1) / objMat.at<double>(2, 0) - f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 1); double dy = 0.5 / err * (2 * (pt.x - px) * -pxdy + 2 * (pt.y - py) * -pydy); // derivative in z direction of obj coordinate double pxdz = f * rot.at<double>(0, 2) / objMat.at<double>(2, 0) - f * objMat.at<double>(0, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 2); double pydz = f * rot.at<double>(1, 2) / objMat.at<double>(2, 0) - f * objMat.at<double>(1, 0) / objMat.at<double>(2, 0) / objMat.at<double>(2, 0) * rot.at<double>(2, 2); double dz = 0.5 / err * (2 * (pt.x - px) * -pxdz + 2 * (pt.y - py) * -pydz); cv::Mat_<double> jacobean(1, 3); jacobean(0, 0) = dx; jacobean(0, 1) = dy; jacobean(0, 2) = dz; return jacobean; } /** * @brief Checks whether the given matrix contains NaN entries. * @param m Input matrix. * @return True if m contrains NaN entries. */ inline bool containsNaNs(const cv::Mat& m) { return cv::sum(cv::Mat(m != m))[0] > 0; } /** * @brief Calculates the Jacobean of the PNP function w.r.t. the scene coordinate inputs. * * PNP is treated as a n x 3 -> 6 fnuction, i.e. it takes n 3D coordinates and maps them to a 6D pose. * The Jacobean is therefore 6x3n. * The Jacobean is calculated using central differences, and hence only suitable for small point sets. * For gradients of large points sets, we use an analytical approximaten, see the backward function in dsacstar.cpp. * * @param imgPts List of 2D points. * @param objPts List of corresponding 3D points. * @param camMat Camera calibration matrix. * @param eps Step size for central differences. * @return 6x3n Jacobean matrix of partial derivatives. */ cv::Mat_<double> dPNP( const std::vector<cv::Point2f>& imgPts, std::vector<cv::Point3f> objPts, const cv::Mat& camMat, float eps = 0.001f) { int pnpMethod = (imgPts.size() == 4) ? cv::SOLVEPNP_P3P : cv::SOLVEPNP_ITERATIVE; //in case of P3P the 4th point is needed to resolve ambiguities, its derivative is zero int effectiveObjPoints = (pnpMethod == cv::SOLVEPNP_P3P) ? 3 : objPts.size(); cv::Mat_<double> jacobean = cv::Mat_<double>::zeros(6, objPts.size() * 3); bool success; // central differences for(int i = 0; i < effectiveObjPoints; i++) for(unsigned j = 0; j < 3; j++) { if(j == 0) objPts[i].x += eps; else if(j == 1) objPts[i].y += eps; else if(j == 2) objPts[i].z += eps; // forward step dsacstar::pose_t fStep; success = safeSolvePnP(objPts, imgPts, camMat, cv::Mat(), fStep.first, fStep.second, false, pnpMethod); if(!success) return cv::Mat_<double>::zeros(6, objPts.size() * 3); if(j == 0) objPts[i].x -= 2 * eps; else if(j == 1) objPts[i].y -= 2 * eps; else if(j == 2) objPts[i].z -= 2 * eps; // backward step dsacstar::pose_t bStep; success = safeSolvePnP(objPts, imgPts, camMat, cv::Mat(), bStep.first, bStep.second, false, pnpMethod); if(!success) return cv::Mat_<double>::zeros(6, objPts.size() * 3); if(j == 0) objPts[i].x += eps; else if(j == 1) objPts[i].y += eps; else if(j == 2) objPts[i].z += eps; // gradient calculation fStep.first = (fStep.first - bStep.first) / (2 * eps); fStep.second = (fStep.second - bStep.second) / (2 * eps); fStep.first.copyTo(jacobean.col(i * 3 + j).rowRange(0, 3)); fStep.second.copyTo(jacobean.col(i * 3 + j).rowRange(3, 6)); if(containsNaNs(jacobean.col(i * 3 + j))) return cv::Mat_<double>::zeros(6, objPts.size() * 3); } return jacobean; } /** * @brief Calculates the Jacobean matrix of the function that maps n estimated scene coordinates to a score, ie. the function has the form n x 3 -> 1. Returns one Jacobean matrix per hypothesis. * @param sceneCoordinates Scene coordinate prediction (1x3xHxW). * @param sampling Contains original image coordinate for each scene coordinate predicted. * @param sampledPoints Corresponding minimal set for each hypotheses as scene coordinate indices. * @param jacobeansScore (output paramter) List of Jacobean matrices. One 1 x 3n matrix per pose hypothesis. * @param scoreOutputGradients Gradients w.r.t the score i.e. the gradients of the loss up to the soft inlier count. * @param hyps List of RANSAC hypotheses. * @param reproErrs Image of reprojection error for each pose hypothesis. * @param jacobeanHyps List of jacobean matrices with derivatives of the 6D pose wrt. the reprojection errors. * @param hypProbs Selection probabilities over all hypotheses. * @param camMat Camera calibration matrix. * @param inlierAlpha Alpha parameter for soft inlier counting. * @param inlierThreshold RANSAC inlier threshold. * @param maxReproj Reprojection errors are clamped to this maximum value. */ void dScore( dsacstar::coord_t& sceneCoordinates, const cv::Mat_<cv::Point2i>& sampling, const std::vector<std::vector<cv::Point2i>>& sampledPoints, std::vector<cv::Mat_<double>>& jacobeansScore, const std::vector<double>& scoreOutputGradients, const std::vector<dsacstar::pose_t>& hyps, const std::vector<cv::Mat_<float>>& reproErrs, const std::vector<cv::Mat_<double>>& jacobeansHyps, const std::vector<double>& hypProbs, const cv::Mat& camMat, float inlierAlpha, float inlierThreshold, float maxReproErr) { int hypCount = sampledPoints.size(); // beta parameter for soft inlier counting. float inlierBeta = 5 / inlierThreshold; // collect 2d-3D correspondences std::vector<std::vector<cv::Point2f>> imgPts(hypCount); std::vector<std::vector<cv::Point3f>> objPts(hypCount); #pragma omp parallel for for(int h = 0; h < hypCount; h++) { if(hypProbs[h] < PROB_THRESH) continue; int batchIdx = 0; // ony batch size = 1 supported atm for(unsigned i = 0; i < sampledPoints[h].size(); i++) { int x = sampledPoints[h][i].x; int y = sampledPoints[h][i].y; imgPts[h].push_back(sampling(y, x)); objPts[h].push_back(cv::Point3f( sceneCoordinates[batchIdx][0][y][x], sceneCoordinates[batchIdx][1][y][x], sceneCoordinates[batchIdx][2][y][x])); } } // derivatives of the soft inlier scores std::vector<cv::Mat_<double>> dReproErrs(reproErrs.size()); #pragma omp parallel for for(int h = 0; h < hypCount; h++) { if(hypProbs[h] < PROB_THRESH) continue; dReproErrs[h] = cv::Mat_<double>::zeros(reproErrs[h].size()); for(int x = 0; x < sampling.cols; x++) for(int y = 0; y < sampling.rows; y++) { double softThreshold = inlierBeta * (reproErrs[h](y, x) - inlierThreshold); softThreshold = 1 / (1+std::exp(-softThreshold)); dReproErrs[h](y, x) = -softThreshold * (1 - softThreshold) * inlierBeta * scoreOutputGradients[h]; } dReproErrs[h] *= inlierAlpha / dReproErrs[h].cols / dReproErrs[h].rows; } jacobeansScore.resize(hypCount); // derivative of the loss wrt the score #pragma omp parallel for for(int h = 0; h < hypCount; h++) { cv::Mat_<double> jacobean = cv::Mat_<double>::zeros(1, sampling.cols * sampling.rows * 3); jacobeansScore[h] = jacobean; if(hypProbs[h] < PROB_THRESH) continue; int batchIdx = 0; // ony batch size = 1 supported atm // accumulate derivate of score wrt the scene coordinates that are used to calculate the pose cv::Mat_<double> supportPointGradients = cv::Mat_<double>::zeros(1, 12); cv::Mat_<double> dHdO = dPNP(imgPts[h], objPts[h], camMat); // 6x12 if(dsacstar::getMax(dHdO) > 10) dHdO = 0; // clamping for stability cv::Mat rot; cv::Rodrigues(hyps[h].first, rot); for(int x = 0; x < sampling.cols; x++) for(int y = 0; y < sampling.rows; y++) { int ptIdx = x * dReproErrs[h].rows + y; cv::Point2f pt(sampling(y, x).x, sampling(y, x).y); cv::Point3f obj = cv::Point3f( sceneCoordinates[batchIdx][0][y][x], sceneCoordinates[batchIdx][1][y][x], sceneCoordinates[batchIdx][2][y][x]); // account for the direct influence of all scene coordinates in the score cv::Mat_<double> dPdO = dProjectdObj(pt, obj, rot, hyps[h].second, camMat, maxReproErr); dPdO *= dReproErrs[h](y, x); dPdO.copyTo(jacobean.colRange(x * dReproErrs[h].rows * 3 + y * 3, x * dReproErrs[h].rows * 3 + y * 3 + 3)); // account for the indirect influence of the scene coorindates that are used to calculate the pose cv::Mat_<double> dPdH = jacobeansHyps[h].row(ptIdx); supportPointGradients += dReproErrs[h](y, x) * dPdH * dHdO; } // add the accumulated derivatives for the scene coordinates that are used to calculate the pose for(unsigned i = 0; i < sampledPoints[h].size(); i++) { unsigned x = sampledPoints[h][i].x; unsigned y = sampledPoints[h][i].y; jacobean.colRange(x * dReproErrs[h].rows * 3 + y * 3, x * dReproErrs[h].rows * 3 + y * 3 + 3) += supportPointGradients.colRange(i * 3, i * 3 + 3); } } } /** * @brief Calculates the Jacobean matrix of the function that maps n estimated scene coordinates to a soft max score, ie. the function has the form n x 3 -> 1. Returns one Jacobean matrix per hypothesis. * * This is the Soft maxed version of dScore (see above). * * @param sceneCoordinates Scene coordinate prediction (1x3xHxW). * @param sampling Contains original image coordinate for each scene coordinate predicted. * @param sampledPoints Corresponding minimal set for each hypotheses as scene coordinate indices. * @param losses Loss value for each hypothesis. * @param hypProbs Selection probabilities over all hypotheses. * @paran initHyps List of unrefined hypotheses. * @paran initReproErrs List of reprojection error images of unrefined hypotheses. * @param jacobeanHyps List of jacobean matrices with derivatives of the 6D pose wrt. the reprojection errors. * @param camMat Camera calibration matrix. * @param inlierAlpha Alpha parameter for soft inlier counting. * @param inlierThreshold RANSAC inlier threshold. * @param maxReproj Reprojection errors are clamped to this maximum value. * @return List of Jacobean matrices. One 1 x 3n matrix per pose hypothesis. */ std::vector<cv::Mat_<double>> dSMScore( dsacstar::coord_t& sceneCoordinates, const cv::Mat_<cv::Point2i>& sampling, const std::vector<std::vector<cv::Point2i>>& sampledPoints, const std::vector<double>& losses, const std::vector<double>& hypProbs, const std::vector<dsacstar::pose_t>& initHyps, const std::vector<cv::Mat_<float>>& initReproErrs, const std::vector<cv::Mat_<double>>& jacobeansHyps, const cv::Mat& camMat, float inlierAlpha, float inlierThreshold, float maxReproErr) { // assemble the gradients wrt the scores, ie the gradients of soft max function std::vector<double> scoreOutputGradients(sampledPoints.size()); #pragma omp parallel for for(unsigned i = 0; i < sampledPoints.size(); i++) { if(hypProbs[i] < PROB_THRESH) continue; scoreOutputGradients[i] = hypProbs[i] * losses[i]; for(unsigned j = 0; j < sampledPoints.size(); j++) scoreOutputGradients[i] -= hypProbs[i] * hypProbs[j] * losses[j]; } // calculate gradients of the score function std::vector<cv::Mat_<double>> jacobeansScore; dScore( sceneCoordinates, sampling, sampledPoints, jacobeansScore, scoreOutputGradients, initHyps, initReproErrs, jacobeansHyps, hypProbs, camMat, inlierAlpha, inlierThreshold, maxReproErr); // data conversion #pragma omp parallel for for(unsigned i = 0; i < jacobeansScore.size(); i++) { // reorder to points row first into rows cv::Mat_<double> reformat = cv::Mat_<double>::zeros(sampling.cols * sampling.rows, 3); if(hypProbs[i] >= PROB_THRESH) { for(int x = 0; x < sampling.cols; x++) for(int y = 0; y < sampling.rows; y++) { cv::Mat_<double> patchGrad = jacobeansScore[i].colRange( x * sampling.rows * 3 + y * 3, x * sampling.rows * 3 + y * 3 + 3); patchGrad.copyTo(reformat.row(y * sampling.cols + x)); } } jacobeansScore[i] = reformat; } return jacobeansScore; } /** * @brief Calculates the Jacobean of the transform function w.r.t the given 3D point, ie. the function has the form 3 -> 1 * @param pt Ground truth 3D location in camera coordinates. * @param obj 3D point. * @param hyp Pose estimate. * @param maxDist Distance errors are clamped to this maximum value. * @return 1x3 Jacobean matrix of partial derivatives. */ cv::Mat_<double> dTransformdObj( const cv::Point3f& pt, const cv::Point3f& obj, const dsacstar::pose_t& hyp, float maxDist) { //transform point cv::Mat objMat = cv::Mat(obj); objMat.convertTo(objMat, CV_64F); cv::Mat rot; cv::Rodrigues(hyp.first, rot); objMat = rot * objMat + hyp.second; cv::Point3d objPt(objMat.at<double>(0, 0), objMat.at<double>(1, 0), objMat.at<double>(2, 0)); // calculate error double err = std::sqrt((pt.x - objPt.x) * (pt.x - objPt.x) + (pt.y - objPt.y) * (pt.y - objPt.y) + (pt.z - objPt.z) * (pt.z - objPt.z)); // early out if projection error is above threshold if(err*100 > maxDist) return cv::Mat_<double>::zeros(1, 3); err += EPS; // avoid dividing by zero // derivative in x direction of obj coordinate double dx = 0.5 / err * (2 * (pt.x - objPt.x) * -rot.at<double>(0, 0) + 2 * (pt.y - objPt.y) * -rot.at<double>(1, 0) + 2 * (pt.z - objPt.z) * -rot.at<double>(2, 0)); // derivative in x direction of obj coordinate double dy = 0.5 / err * (2 * (pt.x - objPt.x) * -rot.at<double>(0, 1) + 2 * (pt.y - objPt.y) * -rot.at<double>(1, 1) + 2 * (pt.z - objPt.z) * -rot.at<double>(2, 1)); // derivative in x direction of obj coordinate double dz = 0.5 / err * (2 * (pt.x - objPt.x) * -rot.at<double>(0, 2) + 2 * (pt.y - objPt.y) * -rot.at<double>(1, 2) + 2 * (pt.z - objPt.z) * -rot.at<double>(2, 2)); cv::Mat_<double> jacobean(1, 3); jacobean(0, 0) = dx; jacobean(0, 1) = dy; jacobean(0, 2) = dz; return jacobean; } /** * @brief Calculates the Jacobean of the transform function w.r.t the given 6D pose, ie. the function has the form 6 -> 1 * @param pt Ground truth 3D location in camera coordinate. * @param obj 3D point. * @param hyp Pose estimate. * @param maxDist Distance errors are clamped to this maximum value. * @return 1x6 Jacobean matrix of partial derivatives. */ cv::Mat_<double> dTransformdHyp( const cv::Point3f& pt, const cv::Point3f& obj, const dsacstar::pose_t& hyp, float maxDist) { //transform point cv::Mat objMat = cv::Mat(obj); objMat.convertTo(objMat, CV_64F); cv::Mat rot, dRdH; cv::Rodrigues(hyp.first, rot, dRdH); dRdH = dRdH.t(); cv::Mat eyeMat = rot * objMat + hyp.second; cv::Point3d eyePt(eyeMat.at<double>(0, 0), eyeMat.at<double>(1, 0), eyeMat.at<double>(2, 0)); // calculate error double err = std::sqrt((pt.x - eyePt.x) * (pt.x - eyePt.x) + (pt.y - eyePt.y) * (pt.y - eyePt.y) + (pt.z - eyePt.z) * (pt.z - eyePt.z)); // early out if projection error is above threshold if(err * 100 > maxDist) return cv::Mat_<double>::zeros(1, 6); err += EPS; // avoid dividing by zero // derivative of the error wrt to transformation cv::Mat_<double> dNdTf = cv::Mat_<double>::zeros(1, 3); dNdTf(0, 0) = -1 / err * (pt.x - eyePt.x); dNdTf(0, 1) = -1 / err * (pt.y - eyePt.y); dNdTf(0, 2) = -1 / err * (pt.z - eyePt.z); // derivative of transformation function wrt rotation matrix cv::Mat_<double> dTfdR = cv::Mat_<double>::zeros(3, 9); dTfdR.row(0).colRange(0, 3) = objMat.t(); dTfdR.row(1).colRange(3, 6) = objMat.t(); dTfdR.row(2).colRange(6, 9) = objMat.t(); // combined derivative of the error wrt the rodriguez vector cv::Mat_<double> dNdH = dNdTf * dTfdR * dRdH; // derivative of transformation wrt the translation vector cv::Mat_<double> dTfdT = cv::Mat_<double>::eye(3, 3); // combined derivative of error wrt the translation vector cv::Mat_<double> dNdT = dNdTf * dTfdT; cv::Mat_<double> jacobean(1, 6); dNdH.copyTo(jacobean.colRange(0, 3)); dNdT.copyTo(jacobean.colRange(3, 6)); return jacobean; } /** * @brief Calculates the Jacobean matrix of the function that maps n estimated scene coordinates to a score, ie. the function has the form n x 3 -> 1. Returns one Jacobean matrix per hypothesis. RGBD version. * @param sceneCoordinates Scene coordinate prediction (1x3xHxW). * @param cameraCoordinates Camera coordinates calculated from measured depth, same format and size as scene coordinates. * @param validPts A list of valid 2D image positions where camera coordinates / measured depth exists. * @param sampledPoints Corresponding minimal set for each hypotheses as scene coordinate indices. * @param jacobeansScore (output paramter) List of Jacobean matrices. One 1 x 3n matrix per pose hypothesis. * @param scoreOutputGradients Gradients w.r.t the score i.e. the gradients of the loss up to the soft inlier count. * @param hyps List of RANSAC hypotheses. * @param distErrs Image of 3D distance error for each pose hypothesis. * @param hypProbs Selection probabilities over all hypotheses. * @param inlierAlpha Alpha parameter for soft inlier counting. * @param inlierThreshold RANSAC inlier threshold. * @param maxDistErr Distance errors are clamped to this maximum value. */ void dScoreRGBD( dsacstar::coord_t& sceneCoordinates, dsacstar::coord_t& cameraCoordinates, const std::vector<cv::Point2i>& validPts, const std::vector<std::vector<cv::Point2i>>& sampledPoints, std::vector<cv::Mat_<double>>& jacobeansScore, const std::vector<double>& scoreOutputGradients, const std::vector<dsacstar::pose_t>& hyps, const std::vector<cv::Mat_<float>>& distErrs, const std::vector<double>& hypProbs, float inlierAlpha, float inlierThreshold, float maxDistErr) { int imH = sceneCoordinates.size(2); int imW = sceneCoordinates.size(3); int hypCount = sampledPoints.size(); // beta parameter for soft inlier counting. float inlierBeta = 5 / inlierThreshold; int batchIdx = 0; // ony batch size = 1 supported atm // collect 2d-3D correspondences std::vector<std::vector<cv::Point3f>> eyePts(hypCount); std::vector<std::vector<cv::Point3f>> objPts(hypCount); #pragma omp parallel for for(int h = 0; h < hypCount; h++) { if(hypProbs[h] < PROB_THRESH) continue; for(unsigned i = 0; i < sampledPoints[h].size(); i++) { int x = sampledPoints[h][i].x; int y = sampledPoints[h][i].y; eyePts[h].push_back(cv::Point3f( cameraCoordinates[batchIdx][0][y][x], cameraCoordinates[batchIdx][1][y][x], cameraCoordinates[batchIdx][2][y][x])); objPts[h].push_back(cv::Point3f( sceneCoordinates[batchIdx][0][y][x], sceneCoordinates[batchIdx][1][y][x], sceneCoordinates[batchIdx][2][y][x])); } } // derivatives of the soft inlier scores std::vector<cv::Mat_<double>> dDistErrs(distErrs.size()); #pragma omp parallel for for(int h = 0; h < hypCount; h++) { if(hypProbs[h] < PROB_THRESH) continue; dDistErrs[h] = cv::Mat_<double>::zeros(distErrs[h].size()); for(unsigned ptIdx = 0; ptIdx < validPts.size(); ptIdx++) { int x = validPts[ptIdx].x; int y = validPts[ptIdx].y; double softThreshold = inlierBeta * (distErrs[h](y, x) - inlierThreshold); softThreshold = 1 / (1+std::exp(-softThreshold)); dDistErrs[h](y, x) = -softThreshold * (1 - softThreshold) * inlierBeta * scoreOutputGradients[h]; } dDistErrs[h] *= inlierAlpha / dDistErrs[h].cols / dDistErrs[h].rows; } jacobeansScore.resize(hypCount); // derivative of the loss wrt the score #pragma omp parallel for for(int h = 0; h < hypCount; h++) { if(hypProbs[h] < PROB_THRESH) continue; cv::Mat_<double> jacobean = cv::Mat_<double>::zeros(1, imW * imH * 3); jacobeansScore[h] = jacobean; // accumulate derivate of score wrt the scene coordinates that are used to calculate the pose cv::Mat_<double> supportPointGradients = cv::Mat_<double>::zeros(1, 9); cv::Mat_<double> dHdO; dsacstar::pose_t cvHyp; kabsch(eyePts[h], objPts[h], cvHyp, dHdO); if (dHdO.empty()) dKabschFD(eyePts[h], objPts[h], dHdO); if(dsacstar::getMax(dHdO) > 10) dHdO = 0; // clamping for stability for(unsigned ptIdx = 0; ptIdx < validPts.size(); ptIdx++) { int x = validPts[ptIdx].x; int y = validPts[ptIdx].y; cv::Point3f eye = cv::Point3f( cameraCoordinates[batchIdx][0][y][x], cameraCoordinates[batchIdx][1][y][x], cameraCoordinates[batchIdx][2][y][x]); cv::Point3f obj = cv::Point3f( sceneCoordinates[batchIdx][0][y][x], sceneCoordinates[batchIdx][1][y][x], sceneCoordinates[batchIdx][2][y][x]); // account for the direct influence of all scene coordinates in the score cv::Mat_<double> dPdO = dTransformdObj(eye, obj, hyps[h], maxDistErr); dPdO *= dDistErrs[h](y, x); dPdO.copyTo(jacobean.colRange(x * dDistErrs[h].rows * 3 + y * 3, x * dDistErrs[h].rows * 3 + y * 3 + 3)); // account for the indirect influence of the scene coorindates that are used to calculate the pose cv::Mat_<double> dPdH = dTransformdHyp(eye, obj, hyps[h], maxDistErr); supportPointGradients += dDistErrs[h](y, x) * dPdH * dHdO; } // add the accumulated derivatives for the scene coordinates that are used to calculate the pose for(unsigned i = 0; i < sampledPoints[h].size(); i++) { unsigned x = sampledPoints[h][i].x; unsigned y = sampledPoints[h][i].y; jacobean.colRange(x * dDistErrs[h].rows * 3 + y * 3, x * dDistErrs[h].rows * 3 + y * 3 + 3) += supportPointGradients.colRange(i * 3, i * 3 + 3); } } } /** * @brief Calculates the Jacobean matrix of the function that maps n estimated scene coordinates to a soft max score, ie. the function has the form n x 3 -> 1. Returns one Jacobean matrix per hypothesis. RGB-D version. * * This is the Soft maxed version of dScoreRGBD (see above). * * @param sceneCoordinates Scene coordinate prediction (1x3xHxW). * @param cameraCoordinates Camera coordinates calculated from measured depth, same format and size as scene coordinates. * @param validPts A list of valid 2D image positions where camera coordinates / measured depth exists. * @param sampledPoints Corresponding minimal set for each hypotheses as scene coordinate indices. * @param losses Loss value for each hypothesis. * @param hypProbs Selection probabilities over all hypotheses. * @paran initHyps List of unrefined hypotheses. * @paran initDistErrs List of 3D distance error images of unrefined hypotheses. * @param inlierAlpha Alpha parameter for soft inlier counting. * @param inlierThreshold RANSAC inlier threshold. * @param maxDistErr Clamp distance error with this value. * @return List of Jacobean matrices. One 1 x 3n matrix per pose hypothesis. */ std::vector<cv::Mat_<double>> dSMScoreRGBD( dsacstar::coord_t& sceneCoordinates, dsacstar::coord_t& cameraCoordinates, const std::vector<cv::Point2i>& validPts, const std::vector<std::vector<cv::Point2i>>& sampledPoints, const std::vector<double>& losses, const std::vector<double>& hypProbs, const std::vector<dsacstar::pose_t>& initHyps, const std::vector<cv::Mat_<float>>& initDistErrs, float inlierAlpha, float inlierThreshold, float maxDistErr) { int imH = sceneCoordinates.size(2); int imW = sceneCoordinates.size(3); // assemble the gradients wrt the scores, ie the gradients of soft max function std::vector<double> scoreOutputGradients(sampledPoints.size()); #pragma omp parallel for for(unsigned i = 0; i < sampledPoints.size(); i++) { if(hypProbs[i] < PROB_THRESH) continue; scoreOutputGradients[i] = hypProbs[i] * losses[i]; for(unsigned j = 0; j < sampledPoints.size(); j++) scoreOutputGradients[i] -= hypProbs[i] * hypProbs[j] * losses[j]; } // calculate gradients of the score function std::vector<cv::Mat_<double>> jacobeansScore; dScoreRGBD( sceneCoordinates, cameraCoordinates, validPts, sampledPoints, jacobeansScore, scoreOutputGradients, initHyps, initDistErrs, hypProbs, inlierAlpha, inlierThreshold, maxDistErr); // data conversion #pragma omp parallel for for(unsigned i = 0; i < jacobeansScore.size(); i++) { // reorder to points row first into rows cv::Mat_<double> reformat = cv::Mat_<double>::zeros(imW * imH, 3); if(hypProbs[i] >= PROB_THRESH) { for(unsigned ptIdx = 0; ptIdx < validPts.size(); ptIdx++) { int x = validPts[ptIdx].x; int y = validPts[ptIdx].y; cv::Mat_<double> patchGrad = jacobeansScore[i].colRange( x * imH * 3 + y * 3, x * imH * 3 + y * 3 + 3); patchGrad.copyTo(reformat.row(y * imW + x)); } } jacobeansScore[i] = reformat; } return jacobeansScore; } }
opencl_krb5pa-sha1_fmt_plug.c
/* * Kerberos 5 "PA ENC TIMESTAMP" by magnum & Dhiru * * Pcap file -> input file: * 1. tshark -r capture.pcapng -T pdml > ~/capture.pdml * 2. krbng2john.py ~/capture.pdml > krb5.in * 3. Run john on krb5.in * * http://www.ietf.org/rfc/rfc4757.txt * http://www.securiteam.com/windowsntfocus/5BP0H0A6KM.html * * Input format is 'user:$krb5pa$etype$user$realm$salt$timestamp+checksum' * * NOTE: Checksum implies last 12 bytes of PA_ENC_TIMESTAMP value in AS-REQ * packet. * * Default Salt: realm + user * * AES-256 encryption & decryption of AS-REQ timestamp in Kerberos v5 * See the following RFC for more details about the crypto & algorithms used: * * RFC3961 - Encryption and Checksum Specifications for Kerberos 5 * RFC3962 - Advanced Encryption Standard (AES) Encryption for Kerberos 5 * * march 09 / kevin devine <wyse101 0x40 gmail.com> * * This software is Copyright (c) 2012 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * This software is Copyright (c) 2012 Dhiru Kholia (dhiru at openwall.com) and * released under same terms as above */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_krb5pa_sha1; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_krb5pa_sha1); #else #include <errno.h> #include <string.h> #include <stdlib.h> #include <ctype.h> #include "arch.h" #include "misc.h" #include "formats.h" #include "options.h" #include "common.h" #include "unicode.h" #include "config.h" #include "aes.h" #include "common-opencl.h" #define OUTLEN 32 #include "opencl_pbkdf2_hmac_sha1.h" #include "hmac_sha.h" #include "loader.h" #define FORMAT_LABEL "krb5pa-sha1-opencl" #define FORMAT_NAME "Kerberos 5 AS-REQ Pre-Auth etype 17/18" /* aes-cts-hmac-sha1-96 */ #define FORMAT_TAG "$krb5pa$" #define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1) #define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL" #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 #define BINARY_SIZE 12 #define BINARY_ALIGN 4 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 1 #define MAX_SALTLEN 52 #define MAX_REALMLEN MAX_SALTLEN #define MAX_USERLEN MAX_SALTLEN #define TIMESTAMP_SIZE 44 #define CHECKSUM_SIZE BINARY_SIZE #define TOTAL_LENGTH (14 + 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) + MAX_REALMLEN + MAX_USERLEN + MAX_SALTLEN) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 /* This handles all sizes */ #define GETPOS(i, index) (((index) % ocl_v_width) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width) /* This is faster but can't handle size 3 */ //#define GETPOS(i, index) (((index) & (ocl_v_width - 1)) * 4 + ((i) & ~3U) * ocl_v_width + (((i) & 3) ^ 3) + ((index) / ocl_v_width) * 64 * ocl_v_width) static struct fmt_tests tests[] = { {"$krb5pa$18$user1$EXAMPLE.COM$$2a0e68168d1eac344da458599c3a2b33ff326a061449fcbc242b212504e484d45903c6a16e2d593912f56c93883bf697b325193d62a8be9c", "openwall"}, {"$krb5pa$18$user1$EXAMPLE.COM$$a3918bd0381107feedec8db0022bdf3ac56e534ed54d13c62a7013a47713cfc31ef4e7e572f912fa4164f76b335e588bf29c2d17b11c5caa", "openwall"}, {"$krb5pa$18$l33t$EXAMPLE.COM$$98f732b309a1d7ef2355a974842a32894d911e97150f5d57f248e1c2632fbd3735c5f156532ccae0341e6a2d779ca83a06021fe57dafa464", "openwall"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$$64dfeee04be2b2e0423814e0df4d0f960885aca4efffe6cb5694c4d34690406071c4968abd2c153ee42d258c5e09a41269bbcd7799f478d3", "password@123"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"}, {"$krb5pa$18$aduser$AD.EXAMPLE.COM$AD.EXAMPLE.COMaduser$f94f755a8b4493d925094a4eb1cec630ac40411a14c9733a853516fe426637d9daefdedc0567e2bb5a83d4f89a0ad1a4b178662b6106c0ff", "password@12345678"}, /* etype 17 hash obtained using MiTM etype downgrade attack */ {"$krb5pa$17$user1$EXAMPLE.COM$$c5461873dc13665771b98ba80be53939e906d90ae1ba79cf2e21f0395e50ee56379fbef4d0298cfccfd6cf8f907329120048fd05e8ae5df4", "openwall"}, {NULL}, }; static cl_mem mem_in, mem_out, mem_salt, mem_state, pinned_in, pinned_out; static cl_kernel pbkdf2_init, pbkdf2_loop, pbkdf2_final; static struct fmt_main *self; static struct custom_salt { int type; int etype; unsigned char realm[64]; unsigned char user[64]; unsigned char salt[64]; /* realm + user */ unsigned char ct[TIMESTAMP_SIZE]; } *cur_salt; static unsigned char constant[16]; static unsigned char ke_input[16]; static unsigned char ki_input[16]; static size_t key_buf_size; static unsigned int *inbuffer; static pbkdf2_salt currentsalt; static pbkdf2_out *output; static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)]; static int new_keys; #define ITERATIONS (4096 - 1) #define HASH_LOOPS 105 // Must be made from factors 3, 3, 5, 7, 13 #define STEP 0 #define SEED 128 static const char * warn[] = { "P xfer: ", ", init: ", ", loop: ", ", inter: ", ", final: ", ", res xfer: " }; static int split_events[] = { 2, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t s; s = autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_init); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_loop)); s = MIN(s, autotune_get_task_max_work_group_size(FALSE, 0, pbkdf2_final)); return s; } #if 0 struct fmt_main *me; #endif static void create_clobj(size_t gws, struct fmt_main *self) { gws *= ocl_v_width; key_buf_size = 64 * gws; /// Allocate memory pinned_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_ALLOC_HOST_PTR, key_buf_size, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating pinned in"); mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, key_buf_size, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem in"); inbuffer = clEnqueueMapBuffer(queue[gpu_id], pinned_in, CL_TRUE, CL_MAP_READ | CL_MAP_WRITE, 0, key_buf_size, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory"); mem_state = clCreateBuffer(context[gpu_id], CL_MEM_READ_WRITE, sizeof(pbkdf2_state) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem_state"); mem_salt = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR, sizeof(pbkdf2_salt), &currentsalt, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem setting"); pinned_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY | CL_MEM_ALLOC_HOST_PTR, sizeof(pbkdf2_out) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating pinned out"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, sizeof(pbkdf2_out) * gws, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error allocating mem out"); output = clEnqueueMapBuffer(queue[gpu_id], pinned_out, CL_TRUE, CL_MAP_READ, 0, sizeof(pbkdf2_out) * gws, 0, NULL, NULL, &ret_code); HANDLE_CLERROR(ret_code, "Error mapping page-locked memory"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 1, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_init, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_loop, 0, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 0, sizeof(mem_salt), &mem_salt), "Error while setting mem_salt kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(pbkdf2_final, 2, sizeof(mem_state), &mem_state), "Error while setting mem_state kernel argument"); crypt_out = mem_alloc(sizeof(*crypt_out) * gws); } static void release_clobj(void) { if (crypt_out) { HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_in, inbuffer, 0, NULL, NULL), "Error Unmapping mem in"); HANDLE_CLERROR(clEnqueueUnmapMemObject(queue[gpu_id], pinned_out, output, 0, NULL, NULL), "Error Unmapping mem in"); HANDLE_CLERROR(clFinish(queue[gpu_id]), "Error releasing memory mappings"); HANDLE_CLERROR(clReleaseMemObject(pinned_in), "Release pinned_in"); HANDLE_CLERROR(clReleaseMemObject(pinned_out), "Release pinned_out"); HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release pinned_in"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem_out"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem_salt"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); MEM_FREE(crypt_out); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(pbkdf2_init), "Release Kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_loop), "Release Kernel"); HANDLE_CLERROR(clReleaseKernel(pbkdf2_final), "Release Kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } /* n-fold(k-bits): * l = lcm(n,k) * r = l/k * s = k-bits | k-bits rot 13 | k-bits rot 13*2 | ... | k-bits rot 13*(r-1) * compute the 1's complement sum: * n-fold = s[0..n-1]+s[n..2n-1]+s[2n..3n-1]+..+s[(k-1)*n..k*n-1] */ /* representation: msb first, assume n and k are multiples of 8, and * that k>=16. this is the case of all the cryptosystems which are * likely to be used. this function can be replaced if that * assumption ever fails. */ /* input length is in bits */ static void nfold(unsigned int inbits, const unsigned char *in, unsigned int outbits,unsigned char *out) { int a,b,c,lcm; int byte, i, msbit; /* the code below is more readable if I make these bytes * instead of bits */ inbits >>= 3; outbits >>= 3; /* first compute lcm(n,k) */ a = outbits; b = inbits; while (b != 0) { c = b; b = a % b; a = c; } lcm = outbits*inbits/a; /* now do the real work */ memset(out, 0, outbits); byte = 0; /* this will end up cycling through k lcm(k,n)/k times, which * is correct */ for (i = lcm - 1; i >= 0; i--) { /* compute the msbit in k which gets added into this byte */ msbit = (/* first, start with the msbit in the first, unrotated byte */ ((inbits << 3) - 1) /* then, for each byte, shift to the right for each * repetition */ +(((inbits << 3) + 13) * (i / inbits)) /* last, pick out the correct byte within that * shifted repetition */ +((inbits - (i % inbits)) << 3) ) % (inbits << 3); /* pull out the byte value itself */ byte += (((in[((inbits - 1) - (msbit >> 3)) % inbits] << 8)| (in[((inbits) - (msbit>>3)) % inbits])) >>((msbit & 7) + 1)) & 0xff; /* do the addition */ byte += out[i % outbits]; out[i % outbits] = byte & 0xff; /* keep around the carry bit, if any */ byte >>= 8; } /* if there's a carry bit left over, add it back in */ if (byte) { for (i = outbits - 1; i >= 0; i--) { /* do the addition */ byte += out[i]; out[i] = byte & 0xff; /* keep around the carry bit, if any */ byte >>= 8;\ } } } static void init(struct fmt_main *_self) { unsigned char usage[5]; static char valgo[sizeof(ALGORITHM_NAME) + 8] = ""; self = _self; opencl_prepare_dev(gpu_id); /* VLIW5 does better with just 2x vectors due to GPR pressure */ if (!options.v_width && amd_vliw5(device_info[gpu_id])) ocl_v_width = 2; else ocl_v_width = opencl_get_vector_width(gpu_id, sizeof(cl_int)); if (ocl_v_width > 1) { /* Run vectorized kernel */ snprintf(valgo, sizeof(valgo), ALGORITHM_NAME " %ux", ocl_v_width); self->params.algorithm_name = valgo; } // generate 128 bits from 40 bits of "kerberos" string nfold(8 * 8, (unsigned char*)"kerberos", 128, constant); memset(usage,0,sizeof(usage)); usage[3] = 0x01; // key number in big-endian format usage[4] = 0xAA; // used to derive Ke nfold(sizeof(usage)*8,usage,sizeof(ke_input)*8,ke_input); memset(usage,0,sizeof(usage)); usage[3] = 0x01; // key number in big-endian format usage[4] = 0x55; // used to derive Ki nfold(sizeof(usage)*8,usage,sizeof(ki_input)*8,ki_input); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[128]; snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DITERATIONS=%u -DOUTLEN=%u " "-DPLAINTEXT_LENGTH=%u -DV_WIDTH=%u", HASH_LOOPS, ITERATIONS, OUTLEN, PLAINTEXT_LENGTH, ocl_v_width); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_kernel.cl", gpu_id, build_opts); pbkdf2_init = clCreateKernel(program[gpu_id], "pbkdf2_init", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); crypt_kernel = pbkdf2_loop = clCreateKernel(program[gpu_id], "pbkdf2_loop", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); pbkdf2_final = clCreateKernel(program[gpu_id], "pbkdf2_final", &ret_code); HANDLE_CLERROR(ret_code, "Error creating kernel"); //Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 2 * HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, ocl_v_width * sizeof(pbkdf2_state), 0, db); //Auto tune execution from shared/included code. autotune_run(self, 4 * ITERATIONS + 4, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 5000000000ULL)); } } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *data = ciphertext; int type, saltlen = 0; // tag is mandatory if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) return 0; data += FORMAT_TAG_LEN; // etype field, 17 or 18 p = strchr(data, '$'); if (!p || p - data != 2) return 0; type = atoi(data); if (type < 17 || type > 18) return 0; data = p + 1; // user field p = strchr(data, '$'); if (!p || p - data > MAX_USERLEN) return 0; saltlen += p - data; data = p + 1; // realm field p = strchr(data, '$'); if (!p || p - data > MAX_REALMLEN) return 0; saltlen += p - data; data = p + 1; // salt field p = strchr(data, '$'); if (!p) return 0; // if salt is empty, realm.user is used instead if (p - data) saltlen = p - data; data = p + 1; // We support a max. total salt length of 52. // We could opt to emit a warning if rejected here. if(saltlen > MAX_SALTLEN) { static int warned = 0; if (!ldr_in_pot) if (!warned++) fprintf(stderr, "%s: One or more hashes rejected due to salt length limitation\n", FORMAT_LABEL); return 0; } // 56 bytes (112 hex chars) encrypted timestamp + checksum if (strlen(data) != 2 * (TIMESTAMP_SIZE + CHECKSUM_SIZE) || strspn(data, HEXCHARS_all) != strlen(data)) return 0; return 1; } static void *get_salt(char *ciphertext) { char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); ctcopy += FORMAT_TAG_LEN; p = strtokm(ctcopy, "$"); cs.etype = atoi(p); p = strtokm(NULL, "$"); if (p[-1] == '$') cs.user[0] = 0; else { strcpy((char*)cs.user, p); p = strtokm(NULL, "$"); } if (p[-1] == '$') cs.realm[0] = 0; else { strcpy((char*)cs.realm, p); p = strtokm(NULL, "$"); } if (p[-1] == '$') { strcpy((char*)cs.salt, (char*)cs.realm); strcat((char*)cs.salt, (char*)cs.user); } else { strcpy((char*)cs.salt, p); p = strtokm(NULL, "$"); } for (i = 0; i < TIMESTAMP_SIZE; i++) cs.ct[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16 + atoi16[ARCH_INDEX(p[i * 2 + 1])]; MEM_FREE(keeptr); return (void *)&cs; } static void clear_keys(void) { memset(inbuffer, 0, key_buf_size); } static void set_key(char *key, int index) { int i; int length = strlen(key); for (i = 0; i < length; i++) ((char*)inbuffer)[GETPOS(i, index)] = key[i]; new_keys = 1; } static char* get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; int i = 0; while (i < PLAINTEXT_LENGTH && (ret[i] = ((char*)inbuffer)[GETPOS(i, index)])) i++; ret[i] = 0; return ret; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char out[TOTAL_LENGTH + 1]; char in[TOTAL_LENGTH + 1]; char salt[MAX_SALTLEN + 1]; char *data; char *e, *u, *r, *s, *tc; strnzcpy(in, ciphertext, sizeof(in)); tc = strrchr(in, '$'); *tc++ = 0; s = strrchr(in, '$'); *s++ = 0; r = strrchr(in, '$'); *r++ = 0; u = strrchr(in, '$'); *u++ = 0; e = in + 8; /* Default salt is user.realm */ if (!*s) { snprintf(salt, sizeof(salt), "%s%s", r, u); s = salt; } snprintf(out, sizeof(out), "%s%s$%s$%s$%s$%s", FORMAT_TAG, e, u, r, s, tc); data = out + strlen(out) - 2 * (CHECKSUM_SIZE + TIMESTAMP_SIZE) - 1; strlwr(data); return out; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; ARCH_WORD dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '$') + 1 + TIMESTAMP_SIZE * 2; /* skip to checksum field */ for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; currentsalt.length = strlen((char*)cur_salt->salt); currentsalt.iterations = ITERATIONS; memcpy(currentsalt.salt, cur_salt->salt, currentsalt.length); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(pbkdf2_salt), &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); } static void AES_cts_encrypt(const unsigned char *in, unsigned char *out, size_t len, const AES_KEY *key, unsigned char *ivec, const int encryptp) { unsigned char tmp[AES_BLOCK_SIZE]; unsigned int i; if (encryptp) { while(len > AES_BLOCK_SIZE) { for (i = 0; i < AES_BLOCK_SIZE; i++) tmp[i] = in[i] ^ ivec[i]; AES_encrypt(tmp, out, key); memcpy(ivec, out, AES_BLOCK_SIZE); len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } for (i = 0; i < len; i++) tmp[i] = in[i] ^ ivec[i]; for (; i < AES_BLOCK_SIZE; i++) tmp[i] = 0 ^ ivec[i]; AES_encrypt(tmp, out - AES_BLOCK_SIZE, key); memcpy(out, ivec, len); memcpy(ivec, out - AES_BLOCK_SIZE, AES_BLOCK_SIZE); } else { unsigned char tmp2[AES_BLOCK_SIZE]; unsigned char tmp3[AES_BLOCK_SIZE]; while(len > AES_BLOCK_SIZE * 2) { memcpy(tmp, in, AES_BLOCK_SIZE); AES_decrypt(in, out, key); for (i = 0; i < AES_BLOCK_SIZE; i++) out[i] ^= ivec[i]; memcpy(ivec, tmp, AES_BLOCK_SIZE); len -= AES_BLOCK_SIZE; in += AES_BLOCK_SIZE; out += AES_BLOCK_SIZE; } len -= AES_BLOCK_SIZE; memcpy(tmp, in, AES_BLOCK_SIZE); /* save last iv */ AES_decrypt(in, tmp2, key); memcpy(tmp3, in + AES_BLOCK_SIZE, len); memcpy(tmp3 + len, tmp2 + len, AES_BLOCK_SIZE - len); /* xor 0 */ for (i = 0; i < len; i++) out[i + AES_BLOCK_SIZE] = tmp2[i] ^ tmp3[i]; AES_decrypt(tmp3, out, key); for (i = 0; i < AES_BLOCK_SIZE; i++) out[i] ^= ivec[i]; memcpy(ivec, tmp, AES_BLOCK_SIZE); } } // keysize = 32 for 256 bits, 16 for 128 bits static void dk(unsigned char key_out[], unsigned char key_in[], size_t key_size, unsigned char ptext[], size_t ptext_size) { unsigned char iv[32]; unsigned char plaintext[32]; AES_KEY ekey; memset(iv,0,sizeof(iv)); memset(plaintext,0,sizeof(plaintext)); memcpy(plaintext,ptext,16); AES_set_encrypt_key(key_in,key_size*8,&ekey); AES_cbc_encrypt(plaintext,key_out,key_size,&ekey,iv,AES_ENCRYPT); } static void krb_decrypt(const unsigned char ciphertext[], size_t ctext_size, unsigned char plaintext[], const unsigned char key[], size_t key_size) { unsigned char iv[32]; AES_KEY ekey; memset(iv,0,sizeof(iv)); AES_set_decrypt_key(key,key_size*8,&ekey); AES_cts_encrypt(ciphertext,plaintext,ctext_size,&ekey,iv,AES_DECRYPT); } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int i; int key_size; size_t scalar_gws; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER_VW(count, local_work_size); scalar_gws = global_work_size * ocl_v_width; if (cur_salt->etype == 17) key_size = 16; else key_size = 32; /// Copy data to gpu if (ocl_autotune_running || new_keys) { BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, key_buf_size, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); new_keys = 0; } /// Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_init, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run initial kernel"); for (i = 0; i < (ocl_autotune_running ? 1 : ITERATIONS / HASH_LOOPS); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run loop kernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[3]), "Run intermediate kernel"); for (i = 0; i < (ocl_autotune_running ? 1 : ITERATIONS / HASH_LOOPS); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_loop, 1, NULL, &global_work_size, lws, 0, NULL, NULL), "Run loop kernel (2nd pass)"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Error running loop kernel"); opencl_process_event(); } BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], pbkdf2_final, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[4]), "Run final kernel (SHA1)"); BENCH_CLERROR(clFinish(queue[gpu_id]), "Failed running final kernel"); /// Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, sizeof(pbkdf2_out) * scalar_gws, output, 0, NULL, multi_profilingEvent[5]), "Copy result back"); if (!ocl_autotune_running) { #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < count; i++) { unsigned char base_key[32]; unsigned char Ke[32]; unsigned char plaintext[TIMESTAMP_SIZE]; //pbkdf2((const unsigned char*)saved_key[i], len, (unsigned char *)cur_salt->salt,strlen((char*)cur_salt->salt), 4096, (unsigned int*)tkey); // generate 128 bits from 40 bits of "kerberos" string // This is precomputed in init() //nfold(8 * 8, (unsigned char*)"kerberos", 128, constant); dk(base_key, (unsigned char*)output[i].dk, key_size, constant, 32); /* The "well-known constant" used for the DK function is the key usage number, * expressed as four octets in big-endian order, followed by one octet indicated below. * Kc = DK(base-key, usage | 0x99); * Ke = DK(base-key, usage | 0xAA); * Ki = DK(base-key, usage | 0x55); */ // derive Ke for decryption/encryption // This is precomputed in init() //memset(usage,0,sizeof(usage)); //usage[3] = 0x01; // key number in big-endian format //usage[4] = 0xAA; // used to derive Ke //nfold(sizeof(usage)*8,usage,sizeof(ke_input)*8,ke_input); dk(Ke, base_key, key_size, ke_input, 32); // decrypt the AS-REQ timestamp encrypted with 256-bit AES // here is enough to check the string, further computation below is required // to fully verify the checksum krb_decrypt(cur_salt->ct, TIMESTAMP_SIZE, plaintext, Ke, key_size); // Check a couple bytes from known plain (YYYYMMDDHHMMSSZ) and // bail out if we are out of luck. if (plaintext[22] == '2' && plaintext[23] == '0' && plaintext[36] == 'Z') { unsigned char Ki[32]; unsigned char checksum[20]; // derive Ki used in HMAC-SHA-1 checksum // This is precomputed in init() //memset(usage,0,sizeof(usage)); //usage[3] = 0x01; // key number in big-endian format //usage[4] = 0x55; // used to derive Ki //nfold(sizeof(usage)*8,usage,sizeof(ki_input)*8,ki_input); dk(Ki, base_key, key_size, ki_input, 32); // derive checksum of plaintext (only 96 bits used out of 160) hmac_sha1(Ki, key_size, plaintext, TIMESTAMP_SIZE, checksum, 20); memcpy(crypt_out[i], checksum, BINARY_SIZE); } else { memset(crypt_out[i], 0, BINARY_SIZE); } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_krb5pa_sha1 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { FORMAT_TAG }, tests }, { init, done, reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
ft_single.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - FT This benchmark is an OpenMP C version of the NPB FT code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Authors: D. Bailey W. Saphir OpenMP C version: S. Satoh --------------------------------------------------------------------*/ //#include "npb-C.h" /* NAS Parallel Benchmarks 2.3 OpenMP C Versions */ #include <stdio.h> #include <stdlib.h> #include <math.h> #if defined(_OPENMP) #include <omp.h> #endif /* _OPENMP */ typedef int boolean; typedef struct { double real; double imag; } dcomplex; #define TRUE 1 #define FALSE 0 #define max(a,b) (((a) > (b)) ? (a) : (b)) #define min(a,b) (((a) < (b)) ? (a) : (b)) #define pow2(a) ((a)*(a)) #define get_real(c) c.real #define get_imag(c) c.imag #define cadd(c,a,b) (c.real = a.real + b.real, c.imag = a.imag + b.imag) #define csub(c,a,b) (c.real = a.real - b.real, c.imag = a.imag - b.imag) #define cmul(c,a,b) (c.real = a.real * b.real - a.imag * b.imag, \ c.imag = a.real * b.imag + a.imag * b.real) #define crmul(c,a,b) (c.real = a.real * b, c.imag = a.imag * b) extern double randlc(double *, double); extern void vranlc(int, double *, double, double *); extern void timer_clear(int); extern void timer_start(int); extern void timer_stop(int); extern double timer_read(int); extern void c_print_results(char *name, char cclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand); /* global variables */ //#include "global.h" /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'B' #endif #if CLASS == 'S' /* CLASS = S */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX 64 #define NY 64 #define NZ 64 #define MAXDIM 64 #define NITER_DEFAULT 6 #define NTOTAL 262144 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'W' /* CLASS = W */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX 128 #define NY 128 #define NZ 32 #define MAXDIM 128 #define NITER_DEFAULT 6 #define NTOTAL 524288 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'A' /* CLASS = A */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX 256 #define NY 256 #define NZ 128 #define MAXDIM 256 #define NITER_DEFAULT 6 #define NTOTAL 8388608 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'B' /* CLASS = B */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX 512 #define NY 256 #define NZ 256 #define MAXDIM 512 #define NITER_DEFAULT 20 #define NTOTAL 33554432 #define CONVERTDOUBLE FALSE #endif #if CLASS == 'C' /* CLASS = C */ /* c This file is generated automatically by the setparams utility. c It sets the number of processors and the classc of the NPB c in this directory. Do not modify it by hand. */ #define NX 512 #define NY 512 #define NZ 512 #define MAXDIM 512 #define NITER_DEFAULT 20 #define NTOTAL 134217728 #define CONVERTDOUBLE FALSE #endif #define COMPILETIME "28 Oct 2014" #define NPBVERSION "2.3" #define CS1 "gcc" #define CS2 "$(CC)" #define CS3 "(none)" #define CS4 "-I../common" #define CS5 "-fopenmp -O2" #define CS6 "-lm -fopenmp" #define CS7 "randdp" /* c If processor array is 1x1 -> 0D grid decomposition c Cache blocking params. These values are good for most c RISC processors. c FFT parameters: c fftblock controls how many ffts are done at a time. c The default is appropriate for most cache-based machines c On vector machines, the FFT can be vectorized with vector c length equal to the block size, so the block size should c be as large as possible. This is the size of the smallest c dimension of the problem: 128 for class A, 256 for class B and c 512 for class C. */ #define FFTBLOCK_DEFAULT 16 #define FFTBLOCKPAD_DEFAULT 18 #define FFTBLOCK FFTBLOCK_DEFAULT #define FFTBLOCKPAD FFTBLOCKPAD_DEFAULT /* COMMON block: blockinfo */ int fftblock; int fftblockpad; /* c we need a bunch of logic to keep track of how c arrays are laid out. c Note: this serial version is the derived from the parallel 0D case c of the ft NPB. c The computation proceeds logically as c set up initial conditions c fftx(1) c transpose (1->2) c ffty(2) c transpose (2->3) c fftz(3) c time evolution c fftz(3) c transpose (3->2) c ffty(2) c transpose (2->1) c fftx(1) c compute residual(1) c for the 0D, 1D, 2D strategies, the layouts look like xxx c c 0D 1D 2D c 1: xyz xyz xyz c 2: xyz xyz yxz c 3: xyz zyx zxy c the array dimensions are stored in dims(coord, phase) */ /* COMMON block: layout */ static int dims[3][3]; static int xstart[3]; static int ystart[3]; static int zstart[3]; static int xend[3]; static int yend[3]; static int zend[3]; #define T_TOTAL 0 #define T_SETUP 1 #define T_FFT 2 #define T_EVOLVE 3 #define T_CHECKSUM 4 #define T_FFTLOW 5 #define T_FFTCOPY 6 #define T_MAX 7 #define TIMERS_ENABLED FALSE /* other stuff */ #define SEED 314159265.0 #define A 1220703125.0 #define PI 3.141592653589793238 #define ALPHA 1.0e-6 #define EXPMAX (NITER_DEFAULT*(NX*NX/4+NY*NY/4+NZ*NZ/4)) /* COMMON block: excomm */ static double ex[EXPMAX+1]; /* ex(0:expmax) */ /* c roots of unity array c relies on x being largest dimension? */ /* COMMON block: ucomm */ static dcomplex u[NX]; /* for checksum data */ /* COMMON block: sumcomm */ static dcomplex sums[NITER_DEFAULT+1]; /* sums(0:niter_default) */ /* number of iterations*/ /* COMMON block: iter */ static int niter; /* function declarations */ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]); static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]); static void ipow46(double a, int exponent, double *result); static void setup(void); static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]); static void print_timers(void); static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]); static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]); static void fft_init (int n); static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]); static int ilog2(int n); static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]); static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *cclass); /*-------------------------------------------------------------------- c FT benchmark c-------------------------------------------------------------------*/ int main(int argc, char **argv) { /*c------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i, ierr; /*------------------------------------------------------------------ c u0, u1, u2 are the main arrays in the problem. c Depending on the decomposition, these arrays will have different c dimensions. To accomodate all possibilities, we allocate them as c one-dimensional arrays and pass them to subroutines for different c views c - u0 contains the initial (transformed) initial condition c - u1 and u2 are working arrays c - indexmap maps i,j,k of u0 to the correct i^2+j^2+k^2 for the c time evolution operator. c-----------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Large arrays are in common so that they are allocated on the c heap rather than the stack. This common block is not c referenced directly anywhere else. Padding is to avoid accidental c cache problems, since all array sizes are powers of two. c-------------------------------------------------------------------*/ static dcomplex u0[NZ][NY][NX]; static dcomplex pad1[3]; static dcomplex u1[NZ][NY][NX]; static dcomplex pad2[3]; static dcomplex u2[NZ][NY][NX]; static dcomplex pad3[3]; static int indexmap[NZ][NY][NX]; int iter; int nthreads = 1; double total_time, mflops; boolean verified; char cclass; /*-------------------------------------------------------------------- c Run the entire problem once to make sure all data is touched. c This reduces variable startup costs, which is important for such a c short benchmark. The other NPB 2 implementations are similar. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } setup(); #pragma omp parallel { compute_indexmap(indexmap, dims[2]); #pragma omp single { compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); } fft(1, u1, u0); } /* end parallel */ /*-------------------------------------------------------------------- c Start over from the beginning. Note that all operations must c be timed, in contrast to other benchmarks. c-------------------------------------------------------------------*/ for (i = 0; i < T_MAX; i++) { timer_clear(i); } timer_start(T_TOTAL); if (TIMERS_ENABLED == TRUE) timer_start(T_SETUP); #pragma omp parallel private(iter) firstprivate(niter) { compute_indexmap(indexmap, dims[2]); #pragma omp single { compute_initial_conditions(u1, dims[0]); fft_init (dims[0][0]); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_SETUP); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_FFT); } fft(1, u1, u0); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_FFT); } for (iter = 1; iter <= niter; iter++) { if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_EVOLVE); } evolve(u0, u1, iter, indexmap, dims[0]); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_EVOLVE); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_FFT); } fft(-1, u1, u2); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_FFT); } if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_start(T_CHECKSUM); } checksum(iter, u2, dims[0]); if (TIMERS_ENABLED == TRUE) { #pragma omp master timer_stop(T_CHECKSUM); } } #pragma omp single verify(NX, NY, NZ, niter, &verified, &cclass); #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* end parallel */ timer_stop(T_TOTAL); total_time = timer_read(T_TOTAL); if( total_time != 0.0) { mflops = 1.0e-6*(double)(NTOTAL) * (14.8157+7.19641*log((double)(NTOTAL)) + (5.23518+7.21113*log((double)(NTOTAL)))*niter) /total_time; } else { mflops = 0.0; } c_print_results("FT", cclass, NX, NY, NZ, niter, nthreads, total_time, mflops, " floating point", verified, NPBVERSION, COMPILETIME, CS1, CS2, CS3, CS4, CS5, CS6, CS7); if (TIMERS_ENABLED == TRUE) print_timers(); } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void evolve(dcomplex u0[NZ][NY][NX], dcomplex u1[NZ][NY][NX], int t, int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c evolve u0 -> u1 (t time steps) in fourier space c-------------------------------------------------------------------*/ int i, j, k; #pragma omp for for (k = 0; k < d[2]; k++) { for (j = 0; j < d[1]; j++) { for (i = 0; i < d[0]; i++) { crmul(u1[k][j][i], u0[k][j][i], ex[t*indexmap[k][j][i]]); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_initial_conditions(dcomplex u0[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Fill in array u0 with initial conditions from c random number generator c-------------------------------------------------------------------*/ int k; double x0, start, an, dummy; static double tmp[NX*2*MAXDIM+1]; int i,j,t; start = SEED; /*-------------------------------------------------------------------- c Jump to the starting element for our first plane. c-------------------------------------------------------------------*/ ipow46(A, (zstart[0]-1)*2*NX*NY + (ystart[0]-1)*2*NX, &an); dummy = randlc(&start, an); ipow46(A, 2*NX*NY, &an); /*-------------------------------------------------------------------- c Go through by z planes filling in one square at a time. c-------------------------------------------------------------------*/ for (k = 0; k < dims[0][2]; k++) { x0 = start; vranlc(2*NX*dims[0][1], &x0, A, tmp); t = 1; for (j = 0; j < dims[0][1]; j++) for (i = 0; i < NX; i++) { u0[k][j][i].real = tmp[t++]; u0[k][j][i].imag = tmp[t++]; } if (k != dims[0][2]) dummy = randlc(&start, an); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void ipow46(double a, int exponent, double *result) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute a^exponent mod 2^46 c-------------------------------------------------------------------*/ double dummy, q, r; int n, n2; /*-------------------------------------------------------------------- c Use c a^n = a^(n/2)*a^(n/2) if n even else c a^n = a*a^(n-1) if n odd c-------------------------------------------------------------------*/ *result = 1; if (exponent == 0) return; q = a; r = 1; n = exponent; while (n > 1) { n2 = n/2; if (n2 * 2 == n) { dummy = randlc(&q, q); n = n2; } else { dummy = randlc(&r, q); n = n-1; } } dummy = randlc(&r, q); *result = r; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void setup(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, i, j, fstatus; printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - FT Benchmark\n\n"); niter = NITER_DEFAULT; printf(" Size : %3dx%3dx%3d\n", NX, NY, NZ); printf(" Iterations : %7d\n", niter); /* 1004 format(' Number of processes : ', i7) 1005 format(' Processor array : ', i3, 'x', i3) 1006 format(' WARNING: compiled for ', i5, ' processes. ', > ' Will not verify. ')*/ for (i = 0;i < 3 ; i++) { dims[i][0] = NX; dims[i][1] = NY; dims[i][2] = NZ; } for (i = 0; i < 3; i++) { xstart[i] = 1; xend[i] = NX; ystart[i] = 1; yend[i] = NY; zstart[i] = 1; zend[i] = NZ; } /*-------------------------------------------------------------------- c Set up info for blocking of ffts and transposes. This improves c performance on cache-based systems. Blocking involves c working on a chunk of the problem at a time, taking chunks c along the first, second, or third dimension. c c - In cffts1 blocking is on 2nd dimension (with fft on 1st dim) c - In cffts2/3 blocking is on 1st dimension (with fft on 2nd and 3rd dims) c Since 1st dim is always in processor, we'll assume it's long enough c (default blocking factor is 16 so min size for 1st dim is 16) c The only case we have to worry about is cffts1 in a 2d decomposition. c so the blocking factor should not be larger than the 2nd dimension. c-------------------------------------------------------------------*/ fftblock = FFTBLOCK_DEFAULT; fftblockpad = FFTBLOCKPAD_DEFAULT; if (fftblock != FFTBLOCK_DEFAULT) fftblockpad = fftblock+3; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void compute_indexmap(int indexmap[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute function from local (i,j,k) to ibar^2+jbar^2+kbar^2 c for time evolution exponent. c-------------------------------------------------------------------*/ int i, j, k, ii, ii2, jj, ij2, kk; double ap; /*-------------------------------------------------------------------- c basically we want to convert the fortran indices c 1 2 3 4 5 6 7 8 c to c 0 1 2 3 -4 -3 -2 -1 c The following magic formula does the trick: c mod(i-1+n/2, n) - n/2 c-------------------------------------------------------------------*/ #pragma omp for for (i = 0; i < dims[2][0]; i++) { ii = (i+1+xstart[2]-2+NX/2)%NX - NX/2; ii2 = ii*ii; for (j = 0; j < dims[2][1]; j++) { jj = (j+1+ystart[2]-2+NY/2)%NY - NY/2; ij2 = jj*jj+ii2; for (k = 0; k < dims[2][2]; k++) { kk = (k+1+zstart[2]-2+NZ/2)%NZ - NZ/2; indexmap[k][j][i] = kk*kk+ij2; } } } /*-------------------------------------------------------------------- c compute array of exponentials for time evolution. c-------------------------------------------------------------------*/ #pragma omp single { ap = - 4.0 * ALPHA * PI * PI; ex[0] = 1.0; ex[1] = exp(ap); for (i = 2; i <= EXPMAX; i++) { ex[i] = ex[i-1]*ex[1]; } } /* end single */ } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void print_timers(void) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int i; char *tstrings[] = { " total ", " setup ", " fft ", " evolve ", " checksum ", " fftlow ", " fftcopy " }; for (i = 0; i < T_MAX; i++) { if (timer_read(i) != 0.0) { printf("timer %2d(%16s( :%10.6f\n", i, tstrings[i], timer_read(i)); } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft(int dir, dcomplex x1[NZ][NY][NX], dcomplex x2[NZ][NY][NX]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ dcomplex y0[NX][FFTBLOCKPAD]; dcomplex y1[NX][FFTBLOCKPAD]; /*-------------------------------------------------------------------- c note: args x1, x2 must be different arrays c note: args for cfftsx are (direction, layout, xin, xout, scratch) c xin/xout may be the same and it can be somewhat faster c if they are c-------------------------------------------------------------------*/ if (dir == 1) { cffts1(1, dims[0], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts3(1, dims[2], x1, x2, y0, y1); /* x1 -> x2 */ } else { cffts3(-1, dims[2], x1, x1, y0, y1); /* x1 -> x1 */ cffts2(-1, dims[1], x1, x1, y0, y1); /* x1 -> x1 */ cffts1(-1, dims[0], x1, x2, y0, y1); /* x1 -> x2 */ } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts1(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, jj; for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp for for (k = 0; k < d[2]; k++) { for (jj = 0; jj <= d[1] - fftblock; jj+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < fftblock; j++) { for (i = 0; i < d[0]; i++) { y0[i][j].real = x[k][j+jj][i].real; y0[i][j].imag = x[k][j+jj][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[0], d[0], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < fftblock; j++) { for (i = 0; i < d[0]; i++) { xout[k][j+jj][i].real = y0[i][j].real; xout[k][j+jj][i].imag = y0[i][j].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts2(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; for (i = 0; i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp for for (k = 0; k < d[2]; k++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < d[1]; j++) { for (i = 0; i < fftblock; i++) { y0[j][i].real = x[k][j][i+ii].real; y0[j][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[1], d[1], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (j = 0; j < d[1]; j++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[j][i].real; xout[k][j][i+ii].imag = y0[j][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cffts3(int is, int d[3], dcomplex x[NZ][NY][NX], dcomplex xout[NZ][NY][NX], dcomplex y0[NX][FFTBLOCKPAD], dcomplex y1[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int logd[3]; int i, j, k, ii; for (i = 0;i < 3; i++) { logd[i] = ilog2(d[i]); } #pragma omp for for (j = 0; j < d[1]; j++) { for (ii = 0; ii <= d[0] - fftblock; ii+=fftblock) { /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (k = 0; k < d[2]; k++) { for (i = 0; i < fftblock; i++) { y0[k][i].real = x[k][j][i+ii].real; y0[k][i].imag = x[k][j][i+ii].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTLOW); */ cfftz (is, logd[2], d[2], y0, y1); /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTLOW); */ /* if (TIMERS_ENABLED == TRUE) timer_start(T_FFTCOPY); */ for (k = 0; k < d[2]; k++) { for (i = 0; i < fftblock; i++) { xout[k][j][i+ii].real = y0[k][i].real; xout[k][j][i+ii].imag = y0[k][i].imag; } } /* if (TIMERS_ENABLED == TRUE) timer_stop(T_FFTCOPY); */ } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fft_init (int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c compute the roots-of-unity array that will be used for subsequent FFTs. c-------------------------------------------------------------------*/ int m,nu,ku,i,j,ln; double t, ti; /*-------------------------------------------------------------------- c Initialize the U array with sines and cosines in a manner that permits c stride one access at each FFT iteration. c-------------------------------------------------------------------*/ nu = n; m = ilog2(n); u[0].real = (double)m; u[0].imag = 0.0; ku = 1; ln = 1; for (j = 1; j <= m; j++) { t = PI / ln; for (i = 0; i <= ln - 1; i++) { ti = i * t; u[i+ku].real = cos(ti); u[i+ku].imag = sin(ti); } ku = ku + ln; ln = 2 * ln; } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void cfftz (int is, int m, int n, dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Computes NY N-point complex-to-complex FFTs of X using an algorithm due c to Swarztrauber. X is both the input and the output array, while Y is a c scratch array. It is assumed that N = 2^M. Before calling CFFTZ to c perform FFTs, the array U must be initialized by calling CFFTZ with IS c set to 0 and M set to MX, where MX is the maximum value of M for any c subsequent call. c-------------------------------------------------------------------*/ int i,j,l,mx; /*-------------------------------------------------------------------- c Check if input parameters are invalid. c-------------------------------------------------------------------*/ mx = (int)(u[0].real); if ((is != 1 && is != -1) || m < 1 || m > mx) { printf("CFFTZ: Either U has not been initialized, or else\n" "one of the input parameters is invalid%5d%5d%5d\n", is, m, mx); exit(1); } /*-------------------------------------------------------------------- c Perform one variant of the Stockham FFT. c-------------------------------------------------------------------*/ for (l = 1; l <= m; l+=2) { fftz2 (is, l, m, n, fftblock, fftblockpad, u, x, y); if (l == m) break; fftz2 (is, l + 1, m, n, fftblock, fftblockpad, u, y, x); } /*-------------------------------------------------------------------- c Copy Y to X. c-------------------------------------------------------------------*/ if (m % 2 == 1) { for (j = 0; j < n; j++) { for (i = 0; i < fftblock; i++) { x[j][i].real = y[j][i].real; x[j][i].imag = y[j][i].imag; } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void fftz2 (int is, int l, int m, int n, int ny, int ny1, dcomplex u[NX], dcomplex x[NX][FFTBLOCKPAD], dcomplex y[NX][FFTBLOCKPAD]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Performs the L-th iteration of the second variant of the Stockham FFT. c-------------------------------------------------------------------*/ int k,n1,li,lj,lk,ku,i,j,i11,i12,i21,i22; dcomplex u1,x11,x21; /*-------------------------------------------------------------------- c Set initial parameters. c-------------------------------------------------------------------*/ n1 = n / 2; if (l-1 == 0) { lk = 1; } else { lk = 2 << ((l - 1)-1); } if (m-l == 0) { li = 1; } else { li = 2 << ((m - l)-1); } lj = 2 * lk; ku = li; for (i = 0; i < li; i++) { i11 = i * lk; i12 = i11 + n1; i21 = i * lj; i22 = i21 + lk; if (is >= 1) { u1.real = u[ku+i].real; u1.imag = u[ku+i].imag; } else { u1.real = u[ku+i].real; u1.imag = -u[ku+i].imag; } /*-------------------------------------------------------------------- c This loop is vectorizable. c-------------------------------------------------------------------*/ for (k = 0; k < lk; k++) { for (j = 0; j < ny; j++) { double x11real, x11imag; double x21real, x21imag; x11real = x[i11+k][j].real; x11imag = x[i11+k][j].imag; x21real = x[i12+k][j].real; x21imag = x[i12+k][j].imag; y[i21+k][j].real = x11real + x21real; y[i21+k][j].imag = x11imag + x21imag; y[i22+k][j].real = u1.real * (x11real - x21real) - u1.imag * (x11imag - x21imag); y[i22+k][j].imag = u1.real * (x11imag - x21imag) + u1.imag * (x11real - x21real); } } } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static int ilog2(int n) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int nn, lg; if (n == 1) { return 0; } lg = 1; nn = 2; while (nn < n) { nn = nn << 1; lg++; } return lg; } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void checksum(int i, dcomplex u1[NZ][NY][NX], int d[3]) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int j, q,r,s, ierr; dcomplex chk,allchk; chk.real = 0.0; chk.imag = 0.0; #pragma omp for nowait for (j = 1; j <= 1024; j++) { q = j%NX+1; if (q >= xstart[0] && q <= xend[0]) { r = (3*j)%NY+1; if (r >= ystart[0] && r <= yend[0]) { s = (5*j)%NZ+1; if (s >= zstart[0] && s <= zend[0]) { cadd(chk,chk,u1[s-zstart[0]][r-ystart[0]][q-xstart[0]]); } } } } #pragma omp critical { sums[i].real += chk.real; sums[i].imag += chk.imag; } #pragma omp barrier #pragma omp single { /* complex % real */ sums[i].real = sums[i].real/(double)(NTOTAL); sums[i].imag = sums[i].imag/(double)(NTOTAL); printf("T = %5d Checksum = %22.12e %22.12e\n", i, sums[i].real, sums[i].imag); } } /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ static void verify (int d1, int d2, int d3, int nt, boolean *verified, char *cclass) { /*-------------------------------------------------------------------- c-------------------------------------------------------------------*/ int ierr, size, i; double err, epsilon; /*-------------------------------------------------------------------- c Sample size reference checksums c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c Class S size reference checksums c-------------------------------------------------------------------*/ double vdata_real_s[6+1] = { 0.0, 5.546087004964e+02, 5.546385409189e+02, 5.546148406171e+02, 5.545423607415e+02, 5.544255039624e+02, 5.542683411902e+02 }; double vdata_imag_s[6+1] = { 0.0, 4.845363331978e+02, 4.865304269511e+02, 4.883910722336e+02, 4.901273169046e+02, 4.917475857993e+02, 4.932597244941e+02 }; /*-------------------------------------------------------------------- c Class W size reference checksums c-------------------------------------------------------------------*/ double vdata_real_w[6+1] = { 0.0, 5.673612178944e+02, 5.631436885271e+02, 5.594024089970e+02, 5.560698047020e+02, 5.530898991250e+02, 5.504159734538e+02 }; double vdata_imag_w[6+1] = { 0.0, 5.293246849175e+02, 5.282149986629e+02, 5.270996558037e+02, 5.260027904925e+02, 5.249400845633e+02, 5.239212247086e+02 }; /*-------------------------------------------------------------------- c Class A size reference checksums c-------------------------------------------------------------------*/ double vdata_real_a[6+1] = { 0.0, 5.046735008193e+02, 5.059412319734e+02, 5.069376896287e+02, 5.077892868474e+02, 5.085233095391e+02, 5.091487099959e+02 }; double vdata_imag_a[6+1] = { 0.0, 5.114047905510e+02, 5.098809666433e+02, 5.098144042213e+02, 5.101336130759e+02, 5.104914655194e+02, 5.107917842803e+02 }; /*-------------------------------------------------------------------- c Class B size reference checksums c-------------------------------------------------------------------*/ double vdata_real_b[20+1] = { 0.0, 5.177643571579e+02, 5.154521291263e+02, 5.146409228649e+02, 5.142378756213e+02, 5.139626667737e+02, 5.137423460082e+02, 5.135547056878e+02, 5.133910925466e+02, 5.132470705390e+02, 5.131197729984e+02, 5.130070319283e+02, 5.129070537032e+02, 5.128182883502e+02, 5.127393733383e+02, 5.126691062020e+02, 5.126064276004e+02, 5.125504076570e+02, 5.125002331720e+02, 5.124551951846e+02, 5.124146770029e+02 }; double vdata_imag_b[20+1] = { 0.0, 5.077803458597e+02, 5.088249431599e+02, 5.096208912659e+02, 5.101023387619e+02, 5.103976610617e+02, 5.105948019802e+02, 5.107404165783e+02, 5.108576573661e+02, 5.109577278523e+02, 5.110460304483e+02, 5.111252433800e+02, 5.111968077718e+02, 5.112616233064e+02, 5.113203605551e+02, 5.113735928093e+02, 5.114218460548e+02, 5.114656139760e+02, 5.115053595966e+02, 5.115415130407e+02, 5.115744692211e+02 }; /*-------------------------------------------------------------------- c Class C size reference checksums c-------------------------------------------------------------------*/ double vdata_real_c[20+1] = { 0.0, 5.195078707457e+02, 5.155422171134e+02, 5.144678022222e+02, 5.140150594328e+02, 5.137550426810e+02, 5.135811056728e+02, 5.134569343165e+02, 5.133651975661e+02, 5.132955192805e+02, 5.132410471738e+02, 5.131971141679e+02, 5.131605205716e+02, 5.131290734194e+02, 5.131012720314e+02, 5.130760908195e+02, 5.130528295923e+02, 5.130310107773e+02, 5.130103090133e+02, 5.129905029333e+02, 5.129714421109e+02 }; double vdata_imag_c[20+1] = { 0.0, 5.149019699238e+02, 5.127578201997e+02, 5.122251847514e+02, 5.121090289018e+02, 5.121143685824e+02, 5.121496764568e+02, 5.121870921893e+02, 5.122193250322e+02, 5.122454735794e+02, 5.122663649603e+02, 5.122830879827e+02, 5.122965869718e+02, 5.123075927445e+02, 5.123166486553e+02, 5.123241541685e+02, 5.123304037599e+02, 5.123356167976e+02, 5.123399592211e+02, 5.123435588985e+02, 5.123465164008e+02 }; epsilon = 1.0e-12; *verified = TRUE; *cclass = 'U'; if (d1 == 64 && d2 == 64 && d3 == 64 && nt == 6) { *cclass = 'S'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_s[i]) / vdata_real_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_s[i]) / vdata_imag_s[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 128 && d2 == 128 && d3 == 32 && nt == 6) { *cclass = 'W'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_w[i]) / vdata_real_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_w[i]) / vdata_imag_w[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 256 && d2 == 256 && d3 == 128 && nt == 6) { *cclass = 'A'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_a[i]) / vdata_real_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_a[i]) / vdata_imag_a[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 256 && d3 == 256 && nt == 20) { *cclass = 'B'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_b[i]) / vdata_real_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_b[i]) / vdata_imag_b[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } else if (d1 == 512 && d2 == 512 && d3 == 512 && nt == 20) { *cclass = 'C'; for (i = 1; i <= nt; i++) { err = (get_real(sums[i]) - vdata_real_c[i]) / vdata_real_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } err = (get_imag(sums[i]) - vdata_imag_c[i]) / vdata_imag_c[i]; if (fabs(err) > epsilon) { *verified = FALSE; break; } } } if (*cclass != 'U') { printf("Result verification successful\n"); } else { printf("Result verification failed\n"); } printf("cclass = %1c\n", *cclass); } /* cat ./common/c_print_results.c */ /*****************************************************************/ /****** C _ P R I N T _ R E S U L T S ******/ /*****************************************************************/ void c_print_results( char *name, char cclass, int n1, int n2, int n3, int niter, int nthreads, double t, double mops, char *optype, int passed_verification, char *npbversion, char *compiletime, char *cc, char *clink, char *c_lib, char *c_inc, char *cflags, char *clinkflags, char *rand) { char *evalue="1000"; printf( "\n\n %s Benchmark Completed\n", name ); printf( " Class = %c\n", cclass ); if( n2 == 0 && n3 == 0 ) printf( " Size = %12d\n", n1 ); /* as in IS */ else printf( " Size = %3dx%3dx%3d\n", n1,n2,n3 ); printf( " Iterations = %12d\n", niter ); printf( " Threads = %12d\n", nthreads ); printf( " Time in seconds = %12.2f\n", t ); printf( " Mop/s total = %12.2f\n", mops ); printf( " Operation type = %24s\n", optype); if( passed_verification ) printf( " Verification = SUCCESSFUL\n" ); else printf( " Verification = UNSUCCESSFUL\n" ); printf( " Version = %12s\n", npbversion ); printf( " Compile date = %12s\n", compiletime ); printf( "\n Compile options:\n" ); printf( " CC = %s\n", cc ); printf( " CLINK = %s\n", clink ); printf( " C_LIB = %s\n", c_lib ); printf( " C_INC = %s\n", c_inc ); printf( " CFLAGS = %s\n", cflags ); printf( " CLINKFLAGS = %s\n", clinkflags ); printf( " RAND = %s\n", rand ); #ifdef SMP evalue = getenv("MP_SET_NUMTHREADS"); printf( " MULTICPUS = %s\n", evalue ); #endif /* printf( "\n\n" ); printf( " Please send the results of this run to:\n\n" ); printf( " NPB Development Team\n" ); printf( " Internet: npb@nas.nasa.gov\n \n" ); printf( " If email is not available, send this to:\n\n" ); printf( " MS T27A-1\n" ); printf( " NASA Ames Research Center\n" ); printf( " Moffett Field, CA 94035-1000\n\n" ); printf( " Fax: 415-604-3957\n\n" );*/ } /* cat ./common/c_timers.c */ /* #include "wtime.h" #if defined(IBM) #define wtime wtime #elif defined(CRAY) #define wtime WTIME #else #define wtime wtime_ #endif */ /* Prototype */ void wtime( double * ); /*****************************************************************/ /****** E L A P S E D _ T I M E ******/ /*****************************************************************/ double elapsed_time( void ) { double t; wtime( &t ); return( t ); } double start[64], elapsed[64]; /*****************************************************************/ /****** T I M E R _ C L E A R ******/ /*****************************************************************/ void timer_clear( int n ) { elapsed[n] = 0.0; } /*****************************************************************/ /****** T I M E R _ S T A R T ******/ /*****************************************************************/ void timer_start( int n ) { start[n] = elapsed_time(); } /*****************************************************************/ /****** T I M E R _ S T O P ******/ /*****************************************************************/ void timer_stop( int n ) { double t, now; now = elapsed_time(); t = now - start[n]; elapsed[n] += t; } /*****************************************************************/ /****** T I M E R _ R E A D ******/ /*****************************************************************/ double timer_read( int n ) { return( elapsed[n] ); } void wtime(double *t) { static int sec = -1; struct timeval tv; // gettimeofday(&tv, (void *)0); gettimeofday(&tv, (struct timezone *)0); if (sec < 0) sec = tv.tv_sec; *t = (tv.tv_sec - sec) + 1.0e-6*tv.tv_usec; } // common/c_randdp.c /* */ #if defined(USE_POW) #define r23 pow(0.5, 23.0) #define r46 (r23*r23) #define t23 pow(2.0, 23.0) #define t46 (t23*t23) #else #define r23 (0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5*0.5) #define r46 (r23*r23) #define t23 (2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0*2.0) #define t46 (t23*t23) #endif /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ double randlc (double *x, double a) { /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ /*c--------------------------------------------------------------------- c c This routine returns a uniform pseudorandom double precision number in the c range (0, 1) by using the linear congruential generator c c x_{k+1} = a x_k (mod 2^46) c c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers c before repeating. The argument A is the same as 'a' in the above formula, c and X is the same as x_0. A and X must be odd double precision integers c in the range (1, 2^46). The returned value RANDLC is normalized to be c between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain c the new seed x_1, so that subsequent calls to RANDLC using the same c arguments will generate a continuous sequence. c c This routine should produce the same results on any computer with at least c 48 mantissa bits in double precision floating point data. On 64 bit c systems, double precision should be disabled. c c David H. Bailey October 26, 1990 c c---------------------------------------------------------------------*/ double t1,t2,t3,t4,a1,a2,x1,x2,z; /*c--------------------------------------------------------------------- c Break A into two parts such that A = 2^23 * A1 + A2. c---------------------------------------------------------------------*/ t1 = r23 * a; a1 = (int)t1; a2 = a - t23 * a1; /*c--------------------------------------------------------------------- c Break X into two parts such that X = 2^23 * X1 + X2, compute c Z = A1 * X2 + A2 * X1 (mod 2^23), and then c X = 2^23 * Z + A2 * X2 (mod 2^46). c---------------------------------------------------------------------*/ t1 = r23 * (*x); x1 = (int)t1; x2 = (*x) - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int)(r46 * t3); (*x) = t3 - t46 * t4; return (r46 * (*x)); } /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ void vranlc (int n, double *x_seed, double a, double* y) { /* void vranlc (int n, double *x_seed, double a, double y[]) { */ /*c--------------------------------------------------------------------- c---------------------------------------------------------------------*/ /*c--------------------------------------------------------------------- c c This routine generates N uniform pseudorandom double precision numbers in c the range (0, 1) by using the linear congruential generator c c x_{k+1} = a x_k (mod 2^46) c c where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers c before repeating. The argument A is the same as 'a' in the above formula, c and X is the same as x_0. A and X must be odd double precision integers c in the range (1, 2^46). The N results are placed in Y and are normalized c to be between 0 and 1. X is updated to contain the new seed, so that c subsequent calls to VRANLC using the same arguments will generate a c continuous sequence. If N is zero, only initialization is performed, and c the variables X, A and Y are ignored. c c This routine is the standard version designed for scalar or RISC systems. c However, it should produce the same results on any single processor c computer with at least 48 mantissa bits in double precision floating point c data. On 64 bit systems, double precision should be disabled. c c---------------------------------------------------------------------*/ int i; double x,t1,t2,t3,t4,a1,a2,x1,x2,z; /*c--------------------------------------------------------------------- c Break A into two parts such that A = 2^23 * A1 + A2. c---------------------------------------------------------------------*/ t1 = r23 * a; a1 = (int)t1; a2 = a - t23 * a1; x = *x_seed; /*c--------------------------------------------------------------------- c Generate N results. This loop is not vectorizable. c---------------------------------------------------------------------*/ for (i = 1; i <= n; i++) { /*c--------------------------------------------------------------------- c Break X into two parts such that X = 2^23 * X1 + X2, compute c Z = A1 * X2 + A2 * X1 (mod 2^23), and then c X = 2^23 * Z + A2 * X2 (mod 2^46). c---------------------------------------------------------------------*/ t1 = r23 * x; x1 = (int)t1; x2 = x - t23 * x1; t1 = a1 * x2 + a2 * x1; t2 = (int)(r23 * t1); z = t1 - t23 * t2; t3 = t23 * z + a2 * x2; t4 = (int)(r46 * t3); x = t3 - t46 * t4; y[i] = r46 * x; } *x_seed = x; }
pp_collision.c
/* Copyright (C) 2017 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include "pp_collision.h" #include <stdio.h> #include <stdlib.h> #include "imag_self_energy_with_g.h" #include "interaction.h" #include "lapack_wrapper.h" #include "phonoc_array.h" #include "phonoc_utils.h" #include "triplet.h" #include "triplet_iw.h" static void get_collision( double *ise, const long num_band0, const long num_band, const long num_temps, const double *temperatures, const double *g, const char *g_zero, const double *frequencies, const lapack_complex_double *eigenvectors, const long triplet[3], const long triplet_weight, const ConstBZGrid *bzgrid, const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const double *masses, const long *p2s_map, const long *s2p_map, const long *band_indices, const long symmetrize_fc3_q, const double cutoff_frequency, const long openmp_per_triplets); static void finalize_ise(double *imag_self_energy, const double *ise, const long (*bz_grid_address)[3], const long (*triplets)[3], const long num_triplets, const long num_temps, const long num_band0, const long is_NU); void ppc_get_pp_collision( double *imag_self_energy, const long relative_grid_address[24][4][3], /* thm */ const double *frequencies, const lapack_complex_double *eigenvectors, const long (*triplets)[3], const long num_triplets, const long *triplet_weights, const ConstBZGrid *bzgrid, const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const double *masses, const long *p2s_map, const long *s2p_map, const Larray *band_indices, const Darray *temperatures, const long is_NU, const long symmetrize_fc3_q, const double cutoff_frequency) { long i; long num_band, num_band0, num_band_prod, num_temps; long openmp_per_triplets; double *ise, *freqs_at_gp, *g; char *g_zero; long tp_relative_grid_address[2][24][4][3]; ise = NULL; freqs_at_gp = NULL; g = NULL; g_zero = NULL; num_band0 = band_indices->dims[0]; num_band = multi_dims[1] * 3; num_band_prod = num_band0 * num_band * num_band; num_temps = temperatures->dims[0]; ise = (double *)malloc(sizeof(double) * num_triplets * num_temps * num_band0); freqs_at_gp = (double *)malloc(sizeof(double) * num_band0); for (i = 0; i < num_band0; i++) { freqs_at_gp[i] = frequencies[triplets[0][0] * num_band + band_indices->data[i]]; } if (num_triplets > num_band) { openmp_per_triplets = 1; } else { openmp_per_triplets = 0; } tpl_set_relative_grid_address(tp_relative_grid_address, relative_grid_address, 2); #ifdef _OPENMP #pragma omp parallel for schedule(guided) private( \ g, g_zero) if (openmp_per_triplets) #endif for (i = 0; i < num_triplets; i++) { g = (double *)malloc(sizeof(double) * 2 * num_band_prod); g_zero = (char *)malloc(sizeof(char) * num_band_prod); tpi_get_integration_weight(g, g_zero, freqs_at_gp, /* used as f0 */ num_band0, tp_relative_grid_address, triplets[i], 1, bzgrid, frequencies, /* used as f1 */ num_band, frequencies, /* used as f2 */ num_band, 2, 1 - openmp_per_triplets); get_collision(ise + i * num_temps * num_band0, num_band0, num_band, num_temps, temperatures->data, g, g_zero, frequencies, eigenvectors, triplets[i], triplet_weights[i], bzgrid, fc3, is_compact_fc3, svecs, multi_dims, multiplicity, masses, p2s_map, s2p_map, band_indices->data, symmetrize_fc3_q, cutoff_frequency, openmp_per_triplets); free(g_zero); g_zero = NULL; free(g); g = NULL; } finalize_ise(imag_self_energy, ise, bzgrid->addresses, triplets, num_triplets, num_temps, num_band0, is_NU); free(freqs_at_gp); freqs_at_gp = NULL; free(ise); ise = NULL; } void ppc_get_pp_collision_with_sigma( double *imag_self_energy, const double sigma, const double sigma_cutoff, const double *frequencies, const lapack_complex_double *eigenvectors, const long (*triplets)[3], const long num_triplets, const long *triplet_weights, const ConstBZGrid *bzgrid, const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const double *masses, const long *p2s_map, const long *s2p_map, const Larray *band_indices, const Darray *temperatures, const long is_NU, const long symmetrize_fc3_q, const double cutoff_frequency) { long i; long num_band, num_band0, num_band_prod, num_temps; long openmp_per_triplets, const_adrs_shift; double cutoff; double *ise, *freqs_at_gp, *g; char *g_zero; ise = NULL; freqs_at_gp = NULL; g = NULL; g_zero = NULL; num_band0 = band_indices->dims[0]; num_band = multi_dims[1] * 3; num_band_prod = num_band0 * num_band * num_band; num_temps = temperatures->dims[0]; const_adrs_shift = num_band_prod; ise = (double *)malloc(sizeof(double) * num_triplets * num_temps * num_band0); freqs_at_gp = (double *)malloc(sizeof(double) * num_band0); for (i = 0; i < num_band0; i++) { freqs_at_gp[i] = frequencies[triplets[0][0] * num_band + band_indices->data[i]]; } if (num_triplets > num_band) { openmp_per_triplets = 1; } else { openmp_per_triplets = 0; } cutoff = sigma * sigma_cutoff; #ifdef _OPENMP #pragma omp parallel for schedule(guided) private( \ g, g_zero) if (openmp_per_triplets) #endif for (i = 0; i < num_triplets; i++) { g = (double *)malloc(sizeof(double) * 2 * num_band_prod); g_zero = (char *)malloc(sizeof(char) * num_band_prod); tpi_get_integration_weight_with_sigma( g, g_zero, sigma, cutoff, freqs_at_gp, num_band0, triplets[i], const_adrs_shift, frequencies, num_band, 2, 0); get_collision(ise + i * num_temps * num_band0, num_band0, num_band, num_temps, temperatures->data, g, g_zero, frequencies, eigenvectors, triplets[i], triplet_weights[i], bzgrid, fc3, is_compact_fc3, svecs, multi_dims, multiplicity, masses, p2s_map, s2p_map, band_indices->data, symmetrize_fc3_q, cutoff_frequency, openmp_per_triplets); free(g_zero); g_zero = NULL; free(g); g = NULL; } finalize_ise(imag_self_energy, ise, bzgrid->addresses, triplets, num_triplets, num_temps, num_band0, is_NU); free(freqs_at_gp); freqs_at_gp = NULL; free(ise); ise = NULL; } static void get_collision( double *ise, const long num_band0, const long num_band, const long num_temps, const double *temperatures, const double *g, const char *g_zero, const double *frequencies, const lapack_complex_double *eigenvectors, const long triplet[3], const long triplet_weight, const ConstBZGrid *bzgrid, const double *fc3, const long is_compact_fc3, const double (*svecs)[3], const long multi_dims[2], const long (*multiplicity)[2], const double *masses, const long *p2s_map, const long *s2p_map, const long *band_indices, const long symmetrize_fc3_q, const double cutoff_frequency, const long openmp_per_triplets) { long i; long num_band_prod, num_g_pos; double *fc3_normal_squared; long(*g_pos)[4]; fc3_normal_squared = NULL; g_pos = NULL; num_band_prod = num_band0 * num_band * num_band; fc3_normal_squared = (double *)malloc(sizeof(double) * num_band_prod); g_pos = (long(*)[4])malloc(sizeof(long[4]) * num_band_prod); for (i = 0; i < num_band_prod; i++) { fc3_normal_squared[i] = 0; } num_g_pos = ise_set_g_pos(g_pos, num_band0, num_band, g_zero); itr_get_interaction_at_triplet( fc3_normal_squared, num_band0, num_band, g_pos, num_g_pos, frequencies, eigenvectors, triplet, bzgrid, fc3, is_compact_fc3, svecs, multi_dims, multiplicity, masses, p2s_map, s2p_map, band_indices, symmetrize_fc3_q, cutoff_frequency, 0, 0, 1 - openmp_per_triplets); ise_imag_self_energy_at_triplet( ise, num_band0, num_band, fc3_normal_squared, frequencies, triplet, triplet_weight, g, g + num_band_prod, g_pos, num_g_pos, temperatures, num_temps, cutoff_frequency, 1 - openmp_per_triplets, 0); free(fc3_normal_squared); fc3_normal_squared = NULL; free(g_pos); g_pos = NULL; } static void finalize_ise(double *imag_self_energy, const double *ise, const long (*bz_grid_addresses)[3], const long (*triplets)[3], const long num_triplets, const long num_temps, const long num_band0, const long is_NU) { long i, j, k; long is_N; if (is_NU) { for (i = 0; i < 2 * num_temps * num_band0; i++) { imag_self_energy[i] = 0; } for (i = 0; i < num_triplets; i++) { is_N = tpl_is_N(triplets[i], bz_grid_addresses); for (j = 0; j < num_temps; j++) { for (k = 0; k < num_band0; k++) { if (is_N) { imag_self_energy[j * num_band0 + k] += ise[i * num_temps * num_band0 + j * num_band0 + k]; } else { imag_self_energy[num_temps * num_band0 + j * num_band0 + k] += ise[i * num_temps * num_band0 + j * num_band0 + k]; } } } } } else { for (i = 0; i < num_temps * num_band0; i++) { imag_self_energy[i] = 0; } for (i = 0; i < num_triplets; i++) { for (j = 0; j < num_temps; j++) { for (k = 0; k < num_band0; k++) { imag_self_energy[j * num_band0 + k] += ise[i * num_temps * num_band0 + j * num_band0 + k]; } } } } }
profiler_interface.h
/* # ============================================================================= # Copyright (c) 2016 - 2021 Blue Brain Project/EPFL # # See top-level LICENSE file for details. # ============================================================================= */ #pragma once #include <initializer_list> #include <type_traits> #if defined(CORENEURON_CALIPER) #include <caliper/cali.h> #endif #if defined(CUDA_PROFILING) #include <cuda_profiler_api.h> #endif #if defined(CRAYPAT) #include <pat_api.h> #endif #if defined(TAU) #include <TAU.h> #endif #if defined(LIKWID_PERFMON) #include <likwid.h> #endif namespace coreneuron { namespace detail { /*! \class Instrumentor * \brief Instrumentation infrastructure for benchmarking and profiling. * * The Instrumentor class exposes static methods that can be used to * toggle with fine-grained resolution the profiling of specific * areas within the code. */ template <class... TProfilerImpl> struct Instrumentor { #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-value" /*! \fn phase_begin * \brief Activate the collection of profiling data within a code region. * * This function semantically defines the beginning of a region * of code that the user wishes to profile. * Loops through all enabled profilers and calls the relevant * `phase_begin` function. * This function should have a non-empty implementation only for * profilers that allow multiple code regions with different names * to be profiled concurrently. * * @param name the (unique) identifier of the code region to be profiled */ inline static void phase_begin(const char* name) { std::initializer_list<int>{(TProfilerImpl::phase_begin(name), 0)...}; } /*! \fn phase_end * \brief Deactivate the collection of profiling data within a code region. * * This function semantically defines the end of a region * of code that the user wishes to profile. * Loops through all enabled profilers and calls the relevant * `phase_end` function. * This function should have a non-empty implementation only for * profilers that allow multiple code regions with different names * to be profiled concurrently. * * @param name the (unique) identifier of the code region to be profiled */ inline static void phase_end(const char* name) { std::initializer_list<int>{(TProfilerImpl::phase_end(name), 0)...}; } /*! \fn start_profile * \brief Globally activate the collection of profiling data. * * Activate the collection of profiler data without defining * a region of interest with a given name, as opposed to `phase_begin`. * Loops through all enabled profilers and calls the relevant * `start_profile` function. * This function should have a non-empty implementation only for * profilers that expose simply a global begin/end interface, without * named regions. */ inline static void start_profile() { std::initializer_list<int>{(TProfilerImpl::start_profile(), 0)...}; } /*! \fn stop_profile * \brief Globally deactivate the collection of profiling data. * * Deactivate the collection of profiler data without defining * a region of interest with a given name, as opposed to `phase_end`. * Loops through all enabled profilers and calls the relevant * `stop_profile` function. * This function should have a non-empty implementation only for * profilers that expose simply a global begin/end interface, without * named regions. */ inline static void stop_profile() { std::initializer_list<int>{(TProfilerImpl::stop_profile(), 0)...}; } /*! \fn init_profile * \brief Initialize the profiler. * * Initialize a profiler's internal structure, without activating yet * any data collection, similar in concept to MPI_Init. * Loops through all enabled profilers and calls the relevant * `init_profile` function. * This function should have a non-empty implementation only for * profilers that require special initialization, typically before * any memory allocation is done. */ inline static void init_profile() { std::initializer_list<int>{(TProfilerImpl::init_profile(), 0)...}; } /*! \fn finalize_profile * \brief Finalize the profiler. * * Finalize a profiler's internal structure, without activating yet * any data collection, similar in concept to MPI_Finalize. * Loops through all enabled profilers and calls the relevant * `finalize_profile` function. * This function should have a non-empty implementation only for * profilers that require special finalization. */ inline static void finalize_profile() { std::initializer_list<int>{(TProfilerImpl::finalize_profile(), 0)...}; } #pragma clang diagnostic pop }; #if defined(CORENEURON_CALIPER) struct Caliper { inline static void phase_begin(const char* name) { CALI_MARK_BEGIN(name); }; inline static void phase_end(const char* name) { CALI_MARK_END(name); }; inline static void start_profile(){}; inline static void stop_profile(){}; inline static void init_profile(){}; inline static void finalize_profile(){}; }; #endif #if defined(CUDA_PROFILING) struct CudaProfiling { inline static void phase_begin(const char* name){}; inline static void phase_end(const char* name){}; inline static void start_profile() { cudaProfilerStart(); }; inline static void stop_profile() { cudaProfilerStop(); }; inline static void init_profile(){}; inline static void finalize_profile(){}; }; #endif #if defined(CRAYPAT) struct CrayPat { inline static void phase_begin(const char* name){}; inline static void phase_end(const char* name){}; inline static void start_profile() { PAT_record(PAT_STATE_ON); }; inline static void stop_profile() { PAT_record(PAT_STATE_OFF); }; inline static void init_profile(){}; inline static void finalize_profile(){}; }; #endif #if defined(TAU) struct Tau { inline static void phase_begin(const char* name){}; inline static void phase_end(const char* name){}; inline static void start_profile() { TAU_ENABLE_INSTRUMENTATION(); }; inline static void stop_profile() { TAU_DISABLE_INSTRUMENTATION(); }; inline static void init_profile(){}; inline static void finalize_profile(){}; }; #endif #if defined(LIKWID_PERFMON) struct Likwid { inline static void phase_begin(const char* name) { LIKWID_MARKER_START(name); }; inline static void phase_end(const char* name) { LIKWID_MARKER_STOP(name); }; inline static void start_profile(){}; inline static void stop_profile(){}; inline static void init_profile() { LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; } }; inline static void finalize_profile() { LIKWID_MARKER_CLOSE; }; }; #endif struct NullInstrumentor { inline static void phase_begin(const char* name){}; inline static void phase_end(const char* name){}; inline static void start_profile(){}; inline static void stop_profile(){}; inline static void init_profile(){}; inline static void finalize_profile(){}; }; using InstrumentorImpl = detail::Instrumentor< #if defined CORENEURON_CALIPER detail::Caliper, #endif #if defined(CUDA_PROFILING) detail::CudaProfiling, #endif #if defined(CRAYPAT) detail::CrayPat, #endif #if defined(TAU) detail::Tau, #endif #if defined(LIKWID_PERFMON) detail::Likwid, #endif detail::NullInstrumentor>; } // namespace detail namespace Instrumentor { struct phase { const char* phase_name; phase(const char* name) : phase_name(name) { detail::InstrumentorImpl::phase_begin(phase_name); } ~phase() { detail::InstrumentorImpl::phase_end(phase_name); } }; inline static void start_profile() { detail::InstrumentorImpl::start_profile(); } inline static void stop_profile() { detail::InstrumentorImpl::stop_profile(); } inline static void phase_begin(const char* name) { detail::InstrumentorImpl::phase_begin(name); } inline static void phase_end(const char* name) { detail::InstrumentorImpl::phase_end(name); } inline static void init_profile() { detail::InstrumentorImpl::init_profile(); } inline static void finalize_profile() { detail::InstrumentorImpl::finalize_profile(); } } // namespace Instrumentor } // namespace coreneuron
window.c
/********************************************************************[libaroma]* * Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *______________________________________________________________________________ * * Filename : window.c * Description : window * * + This is part of libaroma, an embedded ui toolkit. * + 06/04/15 - Author(s): Ahmad Amarullah * */ #ifndef __libaroma_window_c__ #define __libaroma_window_c__ #include <aroma_internal.h> #include "ui_internal.h" #ifdef __cplusplus extern "C" { #endif /* check wm macro */ #define __CHECK_WM(RETVAL) \ if (libaroma_wm()==NULL){ \ ALOGW("window manager uninitialized"); \ return RETVAL; \ } /* * Variable : _libaroma_window_measurement_dp * Type : byte * Descriptions: default measurement */ static byte _libaroma_window_measurement_dp=1; /* * Function : libaroma_window_usedp * Return Value: byte * Descriptions: use dp for measurement */ byte libaroma_window_usedp(byte isdp){ if (isdp==1){ _libaroma_window_measurement_dp=1; } else if (!isdp){ _libaroma_window_measurement_dp=0; } return _libaroma_window_measurement_dp; } /* End of libaroma_window_usedp */ /* * Function : libaroma_window_measure_point * Return Value: int * Descriptions: mesure point */ int libaroma_window_measure_point(int x){ if (_libaroma_window_measurement_dp){ return libaroma_dp(x); } return x; } /* End of libaroma_window_measure_point */ /* * Function : _libaroma_window_measure_save * Return Value: void * Descriptions: save measurement value */ void _libaroma_window_measure_save(LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ if (win!=NULL){ if (_libaroma_window_measurement_dp){ win->left = libaroma_px(win->x); win->top = libaroma_px(win->y); win->width= libaroma_px(win->w); win->height= libaroma_px(win->h); } else{ win->left = win->x; win->top = win->y; win->width= win->w; win->height= win->h; } } if (ctl!=NULL){ if (_libaroma_window_measurement_dp){ ctl->left = libaroma_px(ctl->x); ctl->top = libaroma_px(ctl->y); ctl->width= libaroma_px(ctl->w); ctl->height= libaroma_px(ctl->h); } else{ ctl->left = ctl->x; ctl->top = ctl->y; ctl->width= ctl->w; ctl->height= ctl->h; } } } /* End of _libaroma_window_measure_save */ /* * Function : libaroma_window_measure_calculate * Return Value: int * Descriptions: calculate measurement */ int libaroma_window_measure_calculate( int cv, int pos, int max, int is_size, int x){ if (is_size){ if (pos<=0){ switch (pos){ case LIBAROMA_POS_HALF: return (max / 2)-x; break; case LIBAROMA_POS_1P3: return (max / 3)-x; break; case LIBAROMA_POS_2P3: return (max * 2 / 3)-x; break; case LIBAROMA_POS_1P4: return (max / 4)-x; break; case LIBAROMA_POS_3P4: return (max * 3 / 4)-x; break; case LIBAROMA_SIZE_FULL: return max; break; case LIBAROMA_SIZE_HALF: return max / 2; break; case LIBAROMA_SIZE_THIRD: return max / 3; break; case LIBAROMA_SIZE_QUARTER: return max / 4; break; default: return abs(pos); } } } else{ if (pos<0){ switch (pos){ case LIBAROMA_POS_HALF: return max / 2; break; case LIBAROMA_POS_1P3: return max / 3; break; case LIBAROMA_POS_2P3: return max * 2 / 3; break; case LIBAROMA_POS_1P4: return max / 4; break; case LIBAROMA_POS_3P4: return max * 3 / 4; break; default: return abs(pos); } } } return cv; } /* End of libaroma_window_measure_calculate */ /* * Function : libaroma_window_measure_size * Return Value: byte * Descriptions: measure window size */ byte libaroma_window_measure_size(LIBAROMA_WINDOWP win){ if (win){ if (win->parent!=NULL){ ALOGW("window_resize cannot be used for child window"); return 0; } if (_libaroma_window_measurement_dp){ win->x = libaroma_dp(win->rx); win->y = libaroma_dp(win->ry); win->w = libaroma_dp(win->rw); win->h = libaroma_dp(win->rh); } else{ win->x = win->rx; win->y = win->ry; win->w = win->rw; win->h = win->rh; } win->ax=win->x; win->ay=win->y; win->x=libaroma_window_measure_calculate( win->x, win->rx, libaroma_wm()->w, 0, 0 ); win->y=libaroma_window_measure_calculate( win->y, win->ry, libaroma_wm()->h, 0, 0 ); win->w=libaroma_window_measure_calculate( win->w, win->rw, libaroma_wm()->w, 1, win->x ); win->h=libaroma_window_measure_calculate( win->h, win->rh, libaroma_wm()->h, 1, win->y ); if (win->w+win->x>libaroma_wm()->w){ win->w = libaroma_wm()->w-win->x; } if (win->h+win->y>libaroma_wm()->h){ win->h = libaroma_wm()->h-win->y; } _libaroma_window_measure_save(win,NULL); LIBAROMA_MSG _msg; libaroma_window_process_event(win,libaroma_wm_compose( &_msg, LIBAROMA_MSG_WIN_MEASURED, NULL, 0, 0) ); return 1; } return 0; } /* End of libaroma_window_measure */ /* * Function : _libaroma_window_ui_thread * Return Value: byte * Descriptions: window ui thread */ byte _libaroma_window_ui_thread(LIBAROMA_WINDOWP win) { int i; byte need_sync = 0; if (win->active==1){ LIBAROMA_CONTROLP toast_ctl=NULL; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ LIBAROMA_CONTROLP c=win->childs[i]; if (c->handler->thread!=NULL){ if (!libaroma_ctl_is_toast(c)){ //if not a toast, draw now if (c->handler->thread(c)){ if (libaroma_control_draw(c,0)){ libaroma_wm_updatesync( c->x+win->x, c->y+win->y, c->w, c->h, 0 ); need_sync=1; } } } else toast_ctl=c; //else, save it for draw at last } } if (toast_ctl!=NULL){ if (libaroma_control_draw(toast_ctl, 0)){ libaroma_wm_updatesync( toast_ctl->x+win->x, toast_ctl->y+win->y, toast_ctl->w, toast_ctl->h, 0 ); libaroma_png_save(win->dc, "/tmp/dc.png"); if (!need_sync) need_sync=1; } } } return need_sync; } /* End of _libaroma_window_ui_thread */ /* * Function : libaroma_window * Return Value: LIBAROMA_WINDOWP * Descriptions: creates a new window */ LIBAROMA_WINDOWP libaroma_window( char * bg_theme_name, int x, int y, int w, int h ){ __CHECK_WM(NULL); LIBAROMA_WINDOWP win = (LIBAROMA_WINDOWP) calloc(sizeof(LIBAROMA_WINDOW),1); if (!win){ ALOGW("libaroma_window alloc window data failed"); return NULL; } if (bg_theme_name){ snprintf(win->theme_bg,256,"%s",bg_theme_name); } else{ win->theme_bg[0]=0; } win->rx = x; win->ry = y; win->rw = w; win->rh = h; win->onpool=1; win->prev_screen = libaroma_fb_snapshoot_canvas(); win->ui_thread = _libaroma_window_ui_thread; libaroma_window_measure_size(win); return win; } /* End of libaroma_window */ /* * Function : libaroma_window_free * Return Value: byte * Descriptions: free window */ byte libaroma_window_free( LIBAROMA_WINDOWP win ){ __CHECK_WM(0); if (win==NULL){ return 0; } /* inactivate it */ if (win->parent==NULL){ if (libaroma_wm_get_active_window()==win){ /* detach active window from window manager */ libaroma_wm_set_active_window(NULL); } LIBAROMA_MSG _msg; libaroma_window_process_event(win, libaroma_wm_compose(&_msg, LIBAROMA_MSG_WIN_INACTIVE, NULL, 0, 0)); } if (win->handler!=NULL){ if (win->handler->prefree!=NULL){ win->handler->prefree(win); } } /* delete childs */ int i; if (win->childn>0){ #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ libaroma_control_free(win->childs[i]); } free(win->childs); } if (win->bg){ libaroma_canvas_free(win->bg); win->bg=NULL; } if (win->dc){ libaroma_canvas_free(win->dc); win->dc=NULL; } if (win->handler!=NULL){ if (win->handler->postfree!=NULL){ win->handler->postfree(win); } } free(win); return 1; } /* End of libaroma_window_free */ /* * Function : _libaroma_window_updatebg * Return Value: byte * Descriptions: update window background */ byte _libaroma_window_updatebg(LIBAROMA_WINDOWP win){ if (win==NULL){ ALOGW("window_recalculate win is NULL"); return 0; } if (win->handler!=NULL){ if (win->handler->updatebg!=NULL){ if (win->handler->updatebg(win)){ if (win->onupdatebg){ win->onupdatebg(win,win->bg); } return 1; } return 0; } } if (win->parent!=NULL){ return 0; } int w = win->w; int h = win->h; /* draw background */ if (win->bg!=NULL){ if ((win->bg->w==w)&&(win->bg->h==h)){ /* not need recreate background */ return 1; } libaroma_canvas_free(win->bg); } win->bg = libaroma_canvas(w,h); /* default canvas color */ libaroma_canvas_setcolor( win->bg, libaroma_colorget(NULL,win)->window_bg, 0xff ); /* from theme canvas */ if (win->theme_bg[0]!=0){ libaroma_wm_draw_theme( win->bg, win->theme_bg, 0, 0, win->bg->w, win->bg->h, NULL ); } /* from updatebg callback */ if (win->onupdatebg!=NULL){ win->onupdatebg(win,win->bg); } return 1; } /* End of _libaroma_window_updatebg */ /* * Function : _libaroma_window_recalculate * Return Value: byte * Descriptions: recalculate client size */ byte _libaroma_window_recalculate(LIBAROMA_WINDOWP win){ if (win==NULL){ ALOGW("window_recalculate win is NULL"); return 0; } if (libaroma_window_isactive(win)){ _libaroma_window_updatebg(win); libaroma_window_invalidate(win, 1); } return 1; } /* End of _libaroma_window_recalculate */ /* * Function : _libaroma_window_ready * Return Value: byte * Descriptions: window is ready */ byte _libaroma_window_ready(LIBAROMA_WINDOWP win){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_resize win is NULL"); return 0; } int x = win->x; int y = win->y; int w = win->w; int h = win->h; if (w==0){ w = libaroma_wm()->w; x = 0; } if (h==0){ h = libaroma_wm()->h; y = 0; } /* set position */ if (win->dc!=NULL){ libaroma_canvas_free(win->dc); win->dc=NULL; } win->dc= libaroma_wm_canvas(x, y, w, h); if (win->dc==NULL){ ALOGW("window_ready cannot allocate workspace drawing canvas"); return 0; }/* if (libaroma_window_isactive(win)){ libaroma_wm_clean_workspace(); }*/ win->x = x; win->y = y; win->w = win->dc->w; win->h = win->dc->h; _libaroma_window_measure_save(win,NULL); _libaroma_window_recalculate(win); return 1; } /* End of _libaroma_window_ready */ /* * Function : libaroma_window_resize * Return Value: byte * Descriptions: resize window */ byte libaroma_window_resize( LIBAROMA_WINDOWP win, int x, int y, int w, int h ){ if (!win){ return 0; } if (win->parent!=NULL){ ALOGW("window_resize cannot be used for child window"); return 0; } win->rx = x; win->ry = y; win->rw = w; win->rh = h; if (libaroma_window_measure_size(win)){ return _libaroma_window_ready(win); } return 0; } /* End of libaroma_window_resize */ /* * Function : libaroma_window_isactive * Return Value: byte * Descriptions: check if window is active */ byte libaroma_window_isactive(LIBAROMA_WINDOWP win){ if (win!=NULL){ LIBAROMA_WINDOWP w = win; while(w->parent){ w=w->parent; } return ((w==libaroma_wm_get_active_window())?1:0); } return 0; } /* End of libaroma_window_isactive */ /* * Function : libaroma_window_add * Return Value: byte * Descriptions: add control into window */ byte libaroma_window_add( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl ){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_add win is NULL"); return 0; } if (ctl==NULL){ ALOGW("window_add ctl is NULL"); return 0; } if (ctl->window != NULL){ ALOGW("window_add ctl already have window"); return 0; } libaroma_window_measure(win, ctl); if (win->childn==0){ win->childs = (LIBAROMA_CONTROLP *) malloc(sizeof(LIBAROMA_CONTROLP)); if (!win->childs){ ALOGW("window_add malloc failed"); win->childs=NULL; return 0; } win->childs[0]=ctl; } else{ LIBAROMA_CONTROLP * newchilds = (LIBAROMA_CONTROLP *) realloc(win->childs, sizeof(LIBAROMA_CONTROLP)*(win->childn+1)); if (!newchilds){ ALOGW("window_add realloc failed"); return 0; } win->childs = newchilds; win->childs[win->childn] = ctl; } ctl->window = win; win->childn++; _libaroma_window_recalculate(win); return 1; } /* End of libaroma_window_add */ /* * Function : libaroma_window_del * Return Value: byte * Descriptions: delete control from window */ byte libaroma_window_del( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl ){ __CHECK_WM(0); if (ctl==NULL){ ALOGW("window_del ctl is null"); return 0; } if (win==NULL){ ALOGW("window_del win is null"); return 0; } if (win != ctl->window){ return 0; } if (win->childn<=0){ ALOGW("window_del window data corrupt doesn't have childs??"); return 0; } else if (win->childn==1){ if (win->childs[0]==ctl){ ctl->window = NULL; win->childn=0; free(win->childs); win->childs=NULL; _libaroma_window_recalculate(win); return 1; } else{ ALOGW("window_del ctl not found in window"); return 0; } } LIBAROMA_CONTROLP * newchilds = (LIBAROMA_CONTROLP *) malloc(sizeof(LIBAROMA_CONTROLP)*(win->childn-1)); if (!newchilds){ ALOGW("window_del malloc temp childs failed"); return 0; } int j = 0; int i; for (i=0;i<win->childn;i++){ if (win->childs[i]!=ctl){ newchilds[j++]=win->childs[i]; if (j==win->childn-2){ /* current ctl not found */ free(newchilds); ALOGW("window_del ctl not found in window"); return 0; } } } free(win->childs); win->childs=newchilds; win->childn--; _libaroma_window_recalculate(win); return 1; } /* End of libaroma_window_del */ /* * Function : libaroma_window_measure * Return Value: byte * Descriptions: measure control size */ byte libaroma_window_measure(LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ if (win&&ctl){ if (_libaroma_window_measurement_dp){ ctl->x = libaroma_dp(ctl->rx); ctl->y = libaroma_dp(ctl->ry); ctl->w = libaroma_dp(ctl->rw); ctl->h = libaroma_dp(ctl->rh); } else{ ctl->x = ctl->rx; ctl->y = ctl->ry; ctl->w = ctl->rw; ctl->h = ctl->rh; } ctl->x=libaroma_window_measure_calculate( ctl->x, ctl->rx, win->w, 0, 0 ); ctl->y=libaroma_window_measure_calculate( ctl->y, ctl->ry, win->h, 0, 0 ); ctl->w=libaroma_window_measure_calculate( ctl->w,ctl->rw, win->w, 1, ctl->x ); ctl->h=libaroma_window_measure_calculate( ctl->h,ctl->rh, win->h, 1, ctl->y ); if (ctl->w+ctl->x>win->w){ ctl->w = win->w-ctl->x; } if (ctl->h+ctl->y>win->h){ ctl->h = win->h-ctl->y; } if (ctl->w<ctl->minw){ ctl->w=ctl->minw; } if (ctl->h<ctl->minh){ ctl->h=ctl->minh; } _libaroma_window_measure_save(NULL,ctl); if (ctl->handler->message){ LIBAROMA_MSG _msg; ctl->handler->message(ctl, libaroma_wm_compose( &_msg, LIBAROMA_MSG_WIN_MEASURED, NULL, 0, 0) ); return 1; } } return 0; } /* End of libaroma_window_measure */ /* * Function : libaroma_window_attach * Return Value: LIBAROMA_CONTROLP * Descriptions: attach control into window */ LIBAROMA_CONTROLP libaroma_window_attach( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ /* attach into window */ if (win){ if (libaroma_window_add(win,ctl)){ return ctl; } ALOGW("window_attach cannot attach into window"); libaroma_control_free(ctl); return NULL; } return ctl; } /* End of libaroma_window_attach */ /* * Function : libaroma_window_getid * Return Value: LIBAROMA_CONTROLP * Descriptions: get control by id */ LIBAROMA_CONTROLP libaroma_window_getid( LIBAROMA_WINDOWP win, word id){ __CHECK_WM(NULL); if (win==NULL){ ALOGW("window_control_id win is null"); return NULL; } int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->id==id){ return win->childs[i]; } } return NULL; /* not found */ } /* End of libaroma_window_getid */ /* * Function : libaroma_window_setfocus * Return Value: LIBAROMA_CONTROLP * Descriptions: set control focus */ LIBAROMA_CONTROLP libaroma_window_setfocus( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ if (win==NULL){ ALOGW("window_setfocus window is null"); return NULL; } if (ctl!=NULL){ /* set */ if (win!=ctl->window){ ALOGW("window_setfocus control is not window child"); return NULL; } if (ctl->handler->focus!=NULL){ if (win->focused==ctl){ return ctl; } if (ctl->handler->focus(ctl,1)){ if (win->focused){ win->focused->handler->focus(win->focused,0); } win->focused=ctl; return ctl; } } return NULL; } else{ /* find focus */ if (win->focused){ return win->focused; } int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->focus!=NULL){ return libaroma_window_setfocus(win,win->childs[i]); } } } return NULL; } /* End of libaroma_window_setfocus */ /* * Function : libaroma_window_sync * Return Value: byte * Descriptions: sync window canvas */ byte libaroma_window_sync(LIBAROMA_WINDOWP win, int x, int y, int w, int h){ __CHECK_WM(0); if (win==NULL){ ALOGW("libaroma_window_sync win is null"); return 0; } if (win->handler!=NULL){ if (win->handler->sync!=NULL){ return win->handler->sync(win,x,y,w,h); } } if (win->parent!=NULL){ return 0; } if (!win->lock_sync){ if (!libaroma_window_isactive(win)){ ALOGW("libaroma_window_sync win is not active window"); return 0; } if (win->dc==NULL){ ALOGW("window_invalidate dc is null"); return 0; } /* sync workspace */ libaroma_wm_sync(win->x+x,win->y+y,w,h); } return 1; } /* End of libaroma_window_sync */ /* * Function : libaroma_window_invalidate * Return Value: byte * Descriptions: invalidate window drawing */ byte libaroma_window_invalidate(LIBAROMA_WINDOWP win, byte sync){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_invalidate win is null"); return 0; } if (win->handler!=NULL){ if (win->handler->invalidate!=NULL){ return win->handler->invalidate(win,sync); } } if (win->parent!=NULL){ return 0; } if (!libaroma_window_isactive(win)){ ALOGW("window_invalidate win is not active window"); return 0; } if (win->dc==NULL){ ALOGW("window_invalidate dc is null"); return 0; } if ((!win->lock_sync)||(sync==10)){ /* draw bg */ libaroma_draw( win->dc, win->bg, 0, 0, 1); /* draw childs */ int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ /* draw no sync */ libaroma_control_draw(win->childs[i], 0); } /* sync */ if (sync){ libaroma_window_sync(win, 0, 0, win->w, win->h); } } return 1; } /* End of libaroma_window_invalidate */ /* * Function : libaroma_window_hideshow_animated * Return Value: byte * Descriptions: hide/show window - animated */ byte libaroma_window_hideshow_animated(LIBAROMA_WINDOWP win, byte anim, int duration, byte close){ if ((!anim)||(duration<50)){ if (close) { byte ret=libaroma_wm_set_active_window(NULL); libaroma_window_free(win); return ret; } else return libaroma_wm_set_active_window(win); } /* lock sync */ win->lock_sync = 1; byte is_active; if (close) is_active=1; else is_active=libaroma_wm_set_active_window(win); if (is_active){ if (!close) win->active=2; if (win->prev_screen==NULL) win->prev_screen=libaroma_canvas(win->w, win->h); //TODO: MOVE THIS TO WM CODE //if (!win->prev_screen->alpha && anim==LIBAROMA_WINDOW_SHOW_ANIMATION_CIRCLE) //libaroma_canvas_fillalpha(win->prev_screen, 0, 0, win->w, win->h, 0xFF); //init alpha for prev screen if needed /* draw window into temp canvas */ LIBAROMA_CANVASP wmc = win->dc; //window had a canvas area of wm, let's grab it LIBAROMA_CANVASP tdc = libaroma_canvas(wmc->w,wmc->h); if (close) libaroma_draw(tdc,wmc,0,0,0); win->dc=tdc; /* switch dc to temporary */ //if closing, deactivate window (otherwise ripple animations are played while animate-closing) if (close) libaroma_wm_set_active_window(NULL); else libaroma_window_invalidate(win, 10); //otherwise draw real window image at temp dc long start = libaroma_tick(); int delta = 0; while ((delta=libaroma_tick()-start)<duration){ float state = (((float) delta)/((float) duration)); switch (anim){ case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_TOP: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int y; if (close) y = (swift_out_state * win->h); else y = win->h - (swift_out_state * win->h); int h = win->h - y; if (h>0){ libaroma_draw_ex( wmc, win->prev_screen, 0, 0, 0, 0, win->w, win->h-h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, 0, y, 0, 0, win->w, h, 0, 0xff ); libaroma_wm_sync(win->x,win->y,win->w, win->h); } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_LEFT: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int x; if (close) x = swift_out_state * win->w; else x = win->w - (swift_out_state * win->w); int w = win->w - x; if (w>0){ libaroma_draw_ex( wmc, win->prev_screen, 0, 0, 0, 0, win->w-w, win->h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, x, 0, 0, 0, w, win->h, 0, 0xff ); libaroma_wm_sync(win->x,win->y,win->w, win->h); } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_RIGHT: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int x; if (close) x = swift_out_state * win->w; else x = win->w - (swift_out_state * win->w); int w = win->w - x; //printf("X=%d, W=%d\n", x, w); if (w>0){ //libaroma_canvas_setcolor(wmc, RGB(0), 0xFF); libaroma_draw_ex( wmc, win->prev_screen, w, 0, w, 0, win->w-w, win->h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, 0, 0, x, 0, w, win->h, 0, 0xff ); libaroma_wm_sync(win->x,win->y, win->w, win->h); } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_SWAP_LEFT: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int x; if (close) x = swift_out_state * win->w; else x = win->w - (swift_out_state * win->w); int w = win->w - x; //printf("X=%d, W=%d\n", x, w); if (w>0){ libaroma_canvas_setcolor(wmc, RGB(0), 0xFF); libaroma_draw_ex( wmc, win->prev_screen, 0, 0, w, 0, win->w-w, win->h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, 0, 0, x, 0, w, win->h, 0, 0xff ); libaroma_wm_sync(win->x,win->y, win->w, win->h); } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_SWAP_RIGHT: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int x; if (close) x = swift_out_state * win->w; else x = win->w - (swift_out_state * win->w); int w = win->w - x; //printf("X=%d, W=%d\n", x, w); if (w>0){ libaroma_canvas_setcolor(wmc, RGB(0), 0xFF); libaroma_draw_ex( wmc, win->prev_screen, w, 0, 0, 0, win->w-w, win->h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, x, 0, 0, 0, w, win->h, 0, 0xff ); libaroma_wm_sync(win->x,win->y, win->w, win->h); } } break; default:{ state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); if (close) state = (1.0 - state); if ((!close && state>=1.0) || (close && state <=0.0)){ break; } //ALOGV("Playing %s animation with state %1.2f", close?"close":"open", state); libaroma_art_draw_switch_animation(libaroma_ani_win_to_art(anim), wmc, win->prev_screen, win->dc, //this is needed because snapshots are taken //using fb size, not wm workspace size libaroma_wm()->x, libaroma_wm()->y, win->prev_screen->w, win->prev_screen->h, 0, 0, win->w, win->h, state); libaroma_wm_sync(win->x, win->y, win->w, win->h); } break; } libaroma_sleep(12); } if (!close) libaroma_draw(wmc,win->dc,0,0,0); //copy real window image to original canvas win->dc=wmc; /* switch dc to wm canvas area */ libaroma_canvas_free(tdc); } win->lock_sync = 0; /* sync view now */ if (close){ //libaroma_wm_set_active_window(NULL); libaroma_wm_sync(win->x,win->y,win->w,win->h); libaroma_window_free(win); } else { win->active=1; libaroma_wm_sync(win->x,win->y,win->w,win->h); /* send activate */ LIBAROMA_MSG _msg; libaroma_window_process_event(win,libaroma_wm_compose( &_msg, LIBAROMA_MSG_WIN_ACTIVE, NULL, 10, 0) ); } return 1; } /* * Function : libaroma_window_calculate_pos * Return Value: void * Descriptions: calculate screen position to window/control position */ void libaroma_window_calculate_pos( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl, int * x, int * y ){ if (win!=NULL){ *x-=win->x; *y-=win->y; } else if ((ctl!=NULL)&&(ctl->window!=NULL)){ *x-=ctl->window->x; *y-=ctl->window->y; } if (ctl!=NULL){ *x-=ctl->x; *y-=ctl->y; } /* *x-=libaroma_wm()->x; *y-=libaroma_wm()->y; */ } /* End of libaroma_window_calculate_pos */ /* * Function : libaroma_window_calculate_pos_abs * Return Value: void * Descriptions: calculate absolute screen position to top window position */ void libaroma_window_calculate_pos_abs( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl, int * x, int * y ){ if (ctl!=NULL){ *x-=ctl->x; *y-=ctl->y; win=ctl->window; } while (win!=NULL){ *x-=win->ax; *y-=win->ay; win=win->parent; } } /* End of libaroma_window_calculate_pos_abs */ /* * Function : _libaroma_window_is_inside * Return Value: byte * Descriptions: check position coordinate */ byte _libaroma_window_is_inside(LIBAROMA_CONTROLP ctl, int x, int y) { int wx = ctl->x; int wx2 = wx + ctl->w; int wy = ctl->y; int wy2 = wy + ctl->h; if ((x >= wx) && (x < wx2) && (y >= wy) && (y < wy2)) { return 1; } return 0; } /* End of _libaroma_window_is_inside */ /* * Function : libaroma_window_post_command * Return Value: byte * Descriptions: post direct command */ byte libaroma_window_post_command(dword cmd){ return libaroma_msg_post( LIBAROMA_MSG_WIN_DIRECTMSG, 0, 0, (int) cmd, 0, NULL ); } /* End of libaroma_window_post_command */ /* * Function : libaroma_window_post_command_ex * Return Value: byte * Descriptions: post direct command extended */ byte libaroma_window_post_command_ex(dword cmd, byte state, int key, int y, voidp d){ return libaroma_msg_post( LIBAROMA_MSG_WIN_DIRECTMSG, state, key, (int) cmd, y, d ); } /* End of libaroma_window_post_command */ /* * Function : libaroma_window_process_event * Return Value: dword * Descriptions: process message */ dword libaroma_window_process_event(LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_event win is null"); return 0; } if (win->parent!=NULL){ ALOGW("window_event cannot used for child window..."); return 0; } dword ret = 0; if (win->handler){ if (win->handler->message_hooker){ if (win->handler->message_hooker(win,msg,&ret)){ return ret; } } } switch (msg->msg){ case LIBAROMA_MSG_WIN_ACTIVE: { /* set current window size */ win->focused=NULL; win->touched=NULL; if (msg->x!=10){ _libaroma_window_ready(win); } if ((!win->lock_sync)||(msg->x==10)){ if ((!win->active)||(msg->x==10)){ int i; win->active=1; /* signal child */ for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } } } break; case LIBAROMA_MSG_WIN_RESIZE: { int i; _libaroma_window_ready(win); for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } break; case LIBAROMA_MSG_WIN_INACTIVE: { if (win->active){ /* stop thread manager */ win->active=0; /* send inactive message to child */ int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } win->focused=NULL; win->touched=NULL; } } break; case LIBAROMA_MSG_WIN_MEASURED: { /* remeasured all childs */ int i; for (i=0;i<win->childn;i++){ libaroma_window_measure(win,win->childs[i]); } } break; case LIBAROMA_MSG_WIN_DIRECTMSG: { return (dword) msg->x; } break; case LIBAROMA_MSG_WIN_INVALIDATE: { libaroma_window_invalidate(win, 1); } break; case LIBAROMA_MSG_TOUCH: { /* touch handler */ if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){ win->touched = NULL; int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win,NULL,&x,&y); int i; for (i=0;i<win->childn;i++){ if (_libaroma_window_is_inside(win->childs[i],x,y)){ win->touched = win->childs[i]; break; } } if (win->touched!=NULL){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } } } else if (win->touched!=NULL){ if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } win->touched=NULL; } } } break; } return ret; } /* End of libaroma_window_process_event */ /* * Function : libaroma_window_pool * Return Value: dword * Descriptions: poll window messages */ dword libaroma_window_pool( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg){ if (!win){ return 0; } if (win->parent!=NULL){ ALOGW("cannot pool child window..."); return 0; } LIBAROMA_MSG _msg; LIBAROMA_MSGP cmsg=(msg!=NULL)?msg:&_msg; byte ret = libaroma_wm_getmessage(cmsg); if (ret){ dword command = libaroma_window_process_event(win,cmsg); if (command && cmsg->d){ byte cmd = LIBAROMA_CMD(command); if (cmd == LIBAROMA_CMD_CLICK || cmd == LIBAROMA_CMD_HOLD){ LIBAROMA_CONTROLP ctl = (LIBAROMA_CONTROLP) cmsg->d; if (!ctl) { return command; } if (cmd == LIBAROMA_CMD_CLICK && ctl->onclick){ ctl->onclick(ctl); } else if (cmd == LIBAROMA_CMD_HOLD && ctl->onhold){ ctl->onhold(ctl); } } } return command; } return 0; } /* End of libaroma_window_pool */ #undef __CHECK_WM #ifdef __cplusplus } #endif #endif /* __libaroma_window_c__ */
sageInterface.h
#ifndef ROSE_SAGE_INTERFACE #define ROSE_SAGE_INTERFACE #include "sage3basic.hhh" #include <stdint.h> #include <utility> #include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT #include "OmpAttribute.h" #if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project ); #else SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project ); #endif #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT #include "rewrite.h" #endif // DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser. #include "astUnparseAttribute.h" #include <set> #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT #include "LivenessAnalysis.h" #include "abstract_handle.h" #include "ClassHierarchyGraph.h" #endif // DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h //! A global function for getting the string associated with an enum (which is defined in global scope) ROSE_DLL_API std::string getVariantName (VariantT v); // DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE // This namespace is specific to interface functions that operate on the Sage III AST. // The name was chosen so as not to conflict with other classes within ROSE. // This will become the future home of many interface functions which operate on // the AST and which are generally useful to users. As a namespace multiple files can be used // to represent the compete interface and different developers may contribute interface // functions easily. // Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008) // We could add simpler layers of support for construction of IR nodes by // hiding many details in "makeSg***()" functions. Such functions would // return pointers to the associated Sg*** objects and would be able to hide // many IR specific details, including: // memory handling // optional parameter settings not often required // use of Sg_File_Info objects (and setting them as transformations) // // namespace AST_Interface (this name is taken already by some of Qing's work :-) //! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode() #define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode() /** Functions that are useful when operating on the AST. * * The Sage III IR design attempts to be minimalist. Thus additional functionality is intended to be presented using separate * higher level interfaces which work with the IR. This namespace collects functions that operate on the IR and support * numerous types of operations that are common to general analysis and transformation of the AST. */ namespace SageInterface { // Liao 6/22/2016: keep records of loop init-stmt normalization, later help undo it to support autoPar. struct Transformation_Record { // a lookup table to check if a for loop has been normalized for its c99-style init-stmt std::map <SgForStatement* , bool > forLoopInitNormalizationTable; // Detailed record about the original declaration (1st in the pair) and the normalization generated new declaration (2nd in the pair) std::map <SgForStatement* , std::pair<SgVariableDeclaration*, SgVariableDeclaration*> > forLoopInitNormalizationRecord; } ; ROSE_DLL_API extern Transformation_Record trans_records; // DQ (4/3/2014): Added general AST support separate from the AST. // Container and API for analysis information that is outside of the AST and as a result // prevents frequent modification of the IR. class DeclarationSets { // DQ (4/3/2014): This stores all associated declarations as a map of sets. // the key to the map is the first nondefining declaration and the elements of the set are // all of the associated declarations (including the defining declaration). private: //! Map of first-nondefining declaration to all other associated declarations. std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap; public: void addDeclaration(SgDeclarationStatement* decl); const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl); std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap(); bool isLocatedInDefiningScope(SgDeclarationStatement* decl); }; // DQ (4/3/2014): This constructs a data structure that holds analysis information about // the AST that is separate from the AST. This is intended to be a general mechanism // to support analysis information without constantly modifying the IR. DeclarationSets* buildDeclarationSets(SgNode*); //! An internal counter for generating unique SgName ROSE_DLL_API extern int gensym_counter; #ifdef ROSE_ENABLE_BINARY_ANALYSIS //! Find the main interpretation. SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file); //! Get the unsigned value of a disassembled constant. uint64_t getAsmConstant(SgAsmValueExpression* e); //! Get the signed value of a disassembled constant. int64_t getAsmSignedConstant(SgAsmValueExpression *e); #endif //! Function to add "C" style comment to statement. void addMessageStatement( SgStatement* stmt, std::string message ); //! A persistent attribute to represent a unique name for an expression class UniqueNameAttribute : public AstAttribute { private: std::string name; public: UniqueNameAttribute(std::string n="") {name =n; }; void set_name (std::string n) {name = n;}; std::string get_name () {return name;}; }; //------------------------------------------------------------------------ //@{ /*! @name Symbol tables \brief utility functions for symbol tables */ // DQ (8/5/2020): the "using namespace" directive will not hide existing visability of symbols in resolving visability. // So we need to test if a symbol is visible exclusing matching alises due to using direectives before we can decide to // persue name space qualification. This is best demonstrated by Cxx_tests/test2020_18.C, test2020_19.C, test2020_20.C, // and test2020_21.C. ROSE_DLL_API SgSymbol *lookupSymbolInParentScopesIgnoringAliasSymbols (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); // DQ (8/21/2013): Modified to make newest function parameters be default arguments. // DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments. //! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL. // SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL); // SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList); ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); // Liao 1/22/2008, used for get symbols for generating variable reference nodes // ! Find a variable symbol in current and ancestor scopes for a given name ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL); // DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing. //!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL); // Liao, 1/24/2008, find exact match for a function //!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, const SgType* t, SgScopeStatement *currentScope=NULL); ROSE_DLL_API SgFunctionSymbol *lookupTemplateFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL); ROSE_DLL_API SgFunctionSymbol *lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & functionName, SgFunctionType * ftype, SgTemplateParameterPtrList * tplparams, SgScopeStatement *currentScope=NULL); ROSE_DLL_API SgTemplateVariableSymbol * lookupTemplateVariableSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList * tplparams, SgTemplateArgumentPtrList* tplargs, SgScopeStatement *currentScope=NULL); // DQ (8/21/2013): Modified to make newest function parameters be default arguments. // DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments. // DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support). // SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); ROSE_DLL_API SgNonrealSymbol* lookupNonrealSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); #if 0 // DQ (8/13/2013): This function does not make since any more, now that we have made the symbol // table handling more precise and we have to provide template parameters for any template lookup. // We also have to know if we want to lookup template classes, template functions, or template // member functions (since each have specific requirements). SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); #endif #if 0 // DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes. // Where these are called we might not know enough information about the template parameters or function // types, for example. SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL); SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL); SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL); #endif // DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments. // DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes. ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL); ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL); ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL); // DQ (7/17/2011): Added function from cxx branch that I need here for the Java support. // SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope); /*! \brief set_name of symbol in symbol table. This function extracts the symbol from the relavant symbol table, changes the name (at the declaration) and reinserts it into the symbol table. \internal I think this is what this function does, I need to double check. */ // DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName // to this location where it can be a part of the interface for the Sage III AST. ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name); /*! \brief Output function type symbols in global function type symbol table. */ void outputGlobalFunctionTypeSymbolTable (); // DQ (6/27/2005): /*! \brief Output the local symbol tables. \implementation Each symbol table is output with the file infor where it is located in the source code. */ ROSE_DLL_API void outputLocalSymbolTables (SgNode * node); class OutputLocalSymbolTables:public AstSimpleProcessing { public: void visit (SgNode * node); }; /*! \brief Regenerate the symbol table. \implementation current symbol table must be NULL pointer before calling this function (for safety, but is this a good idea?) */ // DQ (9/28/2005): void rebuildSymbolTable (SgScopeStatement * scope); /*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted. */ void clearUnusedVariableSymbols (SgNode* root = NULL); // DQ (3/1/2009): //! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table. void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help ); //@} //------------------------------------------------------------------------ //@{ /*! @name Stringify \brief Generate a useful string (name) to describe a SgNode */ /*! \brief Generate a useful name to describe the SgNode \internal default names are used for SgNode objects that can not be associated with a name. */ // DQ (9/21/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgNode * node); /*! \brief Generate a useful name to describe the declaration \internal default names are used for declarations that can not be associated with a name. */ // DQ (6/13/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgStatement * stmt); /*! \brief Generate a useful name to describe the expression \internal default names are used for expressions that can not be associated with a name. */ std::string get_name (const SgExpression * expr); /*! \brief Generate a useful name to describe the declaration \internal default names are used for declarations that can not be associated with a name. */ // DQ (6/13/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgDeclarationStatement * declaration); /*! \brief Generate a useful name to describe the scope \internal default names are used for scope that cannot be associated with a name. */ // DQ (6/13/2005): General function for extracting the name of declarations (when they have names) std::string get_name (const SgScopeStatement * scope); /*! \brief Generate a useful name to describe the SgSymbol \internal default names are used for SgSymbol objects that cannot be associated with a name. */ // DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support). std::string get_name (const SgSymbol * symbol); /*! \brief Generate a useful name to describe the SgType \internal default names are used for SgType objects that cannot be associated with a name. */ std::string get_name (const SgType * type); /*! \brief Generate a useful name to describe the SgSupport IR node */ std::string get_name (const SgSupport * node); /*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node */ std::string get_name (const SgLocatedNodeSupport * node); /*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node */ std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive ); /*! \brief Generate a useful name to describe the SgToken IR node */ std::string get_name ( const SgToken* token ); // DQ (3/20/2016): Added to refactor some of the DSL infrastructure support. /*! \brief Generate a useful name to support construction of identifiers from declarations. This function permits names to be generated that will be unique across translation units (a specific requirement different from the context of the get_name() functions above). \internal This supports only a restricted set of declarations presently. */ std::string generateUniqueNameForUseAsIdentifier ( SgDeclarationStatement* declaration ); std::string generateUniqueNameForUseAsIdentifier_support ( SgDeclarationStatement* declaration ); /*! \brief Global map of name collisions to support generateUniqueNameForUseAsIdentifier() function. */ extern std::map<std::string,int> local_name_collision_map; extern std::map<std::string,SgNode*> local_name_to_node_map; extern std::map<SgNode*,std::string> local_node_to_name_map; /*! \brief Traversal to set the global map of names to node and node to names.collisions to support generateUniqueNameForUseAsIdentifier() function. */ void computeUniqueNameForUseAsIdentifier( SgNode* astNode ); /*! \brief Reset map variables used to support generateUniqueNameForUseAsIdentifier() function. */ void reset_name_collision_map(); //@} //------------------------------------------------------------------------ //@{ /*! @name Class utilities \brief */ /*! \brief Get the default destructor from the class declaration */ // DQ (6/21/2005): Get the default destructor from the class declaration ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration* classDeclaration); /*! \brief Get the default constructor from the class declaration */ // DQ (6/22/2005): Get the default constructor from the class declaration ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration* classDeclaration); /*! \brief Return true if template definition is in the class, false if outside of class. */ // DQ (8/27/2005): ROSE_DLL_API bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl* memberFunctionDeclaration); /*! \brief Generate a non-defining (forward) declaration from a defining function declaration. \internal should put into sageBuilder ? */ // DQ (9/17/2005): ROSE_DLL_API SgTemplateInstantiationMemberFunctionDecl* buildForwardFunctionDeclaration (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation); //! Check if a SgNode is a declaration for a structure ROSE_DLL_API bool isStructDeclaration(SgNode * node); //! Check if a SgNode is a declaration for a union ROSE_DLL_API bool isUnionDeclaration(SgNode * node); #if 0 // DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration // (so that it can handle template functions and member functions) /*! \brief Return true if member function of a template member function, of false if a non-template member function in a templated class. */ // DQ (8/27/2005): bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl* memberFunctionDeclaration); #endif // DQ (11/9/2020): Added function to support adding a default constructor definition to a class // if it does not have a default constructor, but has any other constructor that would prevend // a compiler generated default constructor from being generated by the compiler. // Note the physical_file_id is so that it can be marked to be unparsed when header file unparsing is active. ROSE_DLL_API bool addDefaultConstructorIfRequired ( SgClassType* classType, int physical_file_id = Sg_File_Info::TRANSFORMATION_FILE_ID ); //@} //------------------------------------------------------------------------ //@{ /*! @name Misc. \brief Not sure the classifications right now */ //! Recursively print current and parent nodes. used within gdb to probe the context of a node. void recursivePrintCurrentAndParent (SgNode* n) ; //! Save AST into a pdf file. Start from a node to find its enclosing file node. The entire file's AST will be saved into a pdf. void saveToPDF(SgNode* node, std::string filename); void saveToPDF(SgNode* node); // enable calling from gdb //! Pretty print AST horizontally, output to std output void printAST (SgNode* node); //! Pretty print AST horizontally, output to a specified text file. void printAST2TextFile (SgNode* node, const char* filename); void printAST2TextFile (SgNode* node, std::string filename); // DQ (2/12/2012): Added some diagnostic support. //! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened. void whereAmI(SgNode* node); //! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp". std::string extractPragmaKeyword(const SgPragmaDeclaration *); //! Check if a node is SgOmp*Statement ROSE_DLL_API bool isOmpStatement(SgNode* ); /*! \brief Return true if function is overloaded. */ // DQ (8/27/2005): bool isOverloaded (SgFunctionDeclaration * functionDeclaration); // DQ (2/14/2012): Added support function used for variable declarations in conditionals. //! Support function used for variable declarations in conditionals void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body); //! Support function used for variable declarations in conditionals void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body); //! Support function used for variable declarations in conditionals void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body); //! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute") void annotateExpressionsWithUniqueNames (SgProject* project); //! Check if a SgNode is a main() function declaration ROSE_DLL_API bool isMain (const SgNode* node); // DQ (6/22/2005): /*! \brief Generate unique name from C and C++ constructs. The name may contain space. This is support for the AST merge, but is generally useful as a more general mechanism than name mangling which is more closely ties to the generation of names to support link-time function name resolution. This is more general than common name mangling in that it resolves more relevant differences between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;"). \implementation current work does not support expressions. */ std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations); /** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter. * @param baseName the word to be included in the variable names. */ std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp"); // DQ (8/10/2010): Added const to first parameter. // DQ (3/10/2007): //! Generate a unique string from the source file position information std::string declarationPositionString (const SgDeclarationStatement * declaration); // DQ (1/20/2007): //! Added mechanism to generate project name from list of file names ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false ); //! Given a SgExpression that represents a named function (or bound member //! function), return the mentioned function SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func); //! Get the mask expression from the header of a SgForAllStatement SgExpression* forallMaskExpression(SgForAllStatement* stmt); //! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t); // DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation). /*! \brief Support for faster mangled name generation (caching avoids recomputation). */ #ifndef SWIG // DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time). void clearMangledNameCache (SgGlobal * globalScope); void resetMangledNameCache (SgGlobal * globalScope); #endif std::string getMangledNameFromCache (SgNode * astNode); std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName); SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation); //! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically //! Used to have a struct declaration embedded into a variable declaration void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl); // DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the // bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration ); //! Check if a defining declaration comes before of after the non-defining declaration. bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration); // DQ (10/19/2006): Function calls have interesting context dependent rules to determine if // they are output with a global qualifier or not. Were this is true we have to avoid global // qualifiers, since the function's scope has not been defined. This is an example of where // qualification of function names in function calls are context dependent; an interesting // example of where the C++ language is not friendly to source-to-source processing :-). bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall); /*! \brief Compute the intersection set for two ASTs. This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST. */ ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL); //! Deep copy an arbitrary subtree ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree); //! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e); template <typename NodeType> NodeType* deepCopy (const NodeType* subtree) { return dynamic_cast<NodeType*>(deepCopyNode(subtree)); } //! Deep copy an expression ROSE_DLL_API SgExpression* copyExpression(SgExpression* e); //!Deep copy a statement ROSE_DLL_API SgStatement* copyStatement(SgStatement* s); // from VarSym.cc in src/midend/astOutlining/src/ASTtools //! Get the variable symbol for the first initialized name of a declaration stmt. ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl); //! Get the first initialized name of a declaration statement ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl); //! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now. ROSE_DLL_API void myRemoveStatement(SgStatement* stmt); ROSE_DLL_API bool isConstantTrue(SgExpression* e); ROSE_DLL_API bool isConstantFalse(SgExpression* e); ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e); ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e); //! Check if a declaration has a "static' modifier bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt); //! Set a declaration as static ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt); //! Check if a declaration has an "extern" modifier ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt); //! Set a declaration as extern ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt); //! True if an SgInitializedName is "mutable' (has storage modifier set) bool ROSE_DLL_API isMutable(SgInitializedName* name); //! True if a parameter name is a Jovial output parameter bool ROSE_DLL_API isJovialOutParam(SgInitializedName* name); //! Get a vector of Jovial input parameters from the function parameter list (may work for Fortran in the future) std::vector<SgInitializedName*> getInParameters(const SgInitializedNamePtrList &params); //! Get a vector of Jovial output parameters from the function parameter list (may work for Fortran in the future) std::vector<SgInitializedName*> getOutParameters(const SgInitializedNamePtrList &params); //! Interface for creating a statement whose computation writes its answer into //! a given variable. class StatementGenerator { public: virtual ~StatementGenerator() {}; virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0; }; //! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc) //! //! Return the left hand, right hand expressions and if the left hand variable is also being read bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL); //! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. For Dot and Arrow Expressions, their lhs is used to obtain SgInitializedName (coarse grain) by default. Otherwise, fine-grain rhs is used. ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current, bool coarseGrain=true); //! Build an abstract handle from an AST node, reuse previously built handle when possible ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*); //! Obtain a matching SgNode from an abstract handle string ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string); //! Dump information about a SgNode for debugging ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc=""); //! Reorder a list of declaration statements based on their appearance order in source files ROSE_DLL_API std::vector<SgDeclarationStatement*> sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec); // DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names. //! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc. // bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp ); bool isPrefixOperator( SgExpression* exp ); //! Check for proper names of possible prefix operators (used in isPrefixOperator()). bool isPrefixOperatorName( const SgName & functionName ); //! Is an overloaded operator a postfix operator. (e.g. ). bool isPostfixOperator( SgExpression* exp ); //! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()). bool isIndexOperator( SgExpression* exp ); // DQ (1/10/2014): Adding more general support for token based unparsing. //! Used to support token unparsing (when the output the trailing token sequence). SgStatement* lastStatementOfScopeWithTokenInfo (SgScopeStatement* scope, std::map<SgNode*,TokenStreamSequenceToNodeMapping*> & tokenStreamSequenceMap); // DQ (8/12/2020): Check the access permissions of all defining and nodefining declarations. void checkAccessPermissions ( SgNode* ); // DQ (8/14/2020): Check the symbol tables for specific scopes (debugging support). void checkSymbolTables ( SgNode* ); // DQ (11/9/2020): Added support for makring IR nodes and subtrees of the AST to be unparsed (physical_file_id // is required when unparsing header files is true or support multiple files and shared IR nodes). void markSubtreeToBeUnparsed(SgNode* root, int physical_file_id); void markNodeToBeUnparsed(SgNode* node, int physical_file_id); //@} //------------------------------------------------------------------------ //@{ /*! @name AST properties \brief version, language properties of current AST. */ // DQ (11/25/2020): Add support to set this as a specific language kind file (there is at least one language kind file processed by ROSE). // The value of 0 allows the old implementation to be tested, and the value of 1 allows the new optimized implementation to be tested. // However to get all of the functions to be inlined, we have to recompile all of ROSE. #define INLINE_OPTIMIZED_IS_LANGUAGE_KIND_FUNCTIONS 1 // std::string version(); // utility_functions.h, version number /*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use! */ #if INLINE_OPTIMIZED_IS_LANGUAGE_KIND_FUNCTIONS ROSE_DLL_API inline bool is_Ada_language () { return Rose::is_Ada_language; } ROSE_DLL_API inline bool is_C_language () { return Rose::is_C_language; } ROSE_DLL_API inline bool is_Cobol_language () { return Rose::is_Cobol_language; } ROSE_DLL_API inline bool is_OpenMP_language () { return Rose::is_OpenMP_language; } ROSE_DLL_API inline bool is_UPC_language () { return Rose::is_UPC_language; } ROSE_DLL_API inline bool is_UPC_dynamic_threads() { return Rose::is_UPC_dynamic_threads; } ROSE_DLL_API inline bool is_C99_language () { return Rose::is_C99_language; } ROSE_DLL_API inline bool is_Cxx_language () { return Rose::is_Cxx_language; } ROSE_DLL_API inline bool is_Java_language () { return Rose::is_Java_language; } ROSE_DLL_API inline bool is_Jovial_language () { return Rose::is_Jovial_language; } ROSE_DLL_API inline bool is_Fortran_language () { return Rose::is_Fortran_language; } ROSE_DLL_API inline bool is_CAF_language () { return Rose::is_CAF_language; } ROSE_DLL_API inline bool is_PHP_language() { return Rose::is_PHP_language; } ROSE_DLL_API inline bool is_Python_language() { return Rose::is_Python_language; } ROSE_DLL_API inline bool is_Cuda_language() { return Rose::is_Cuda_language; } ROSE_DLL_API inline bool is_OpenCL_language() { return Rose::is_OpenCL_language; } ROSE_DLL_API inline bool is_X10_language() { return Rose::is_X10_language; } ROSE_DLL_API inline bool is_binary_executable() { return Rose::is_binary_executable; } #else ROSE_DLL_API bool is_Ada_language (); ROSE_DLL_API bool is_C_language (); ROSE_DLL_API bool is_Cobol_language (); ROSE_DLL_API bool is_OpenMP_language (); ROSE_DLL_API bool is_UPC_language (); //! Check if dynamic threads compilation is used for UPC programs ROSE_DLL_API bool is_UPC_dynamic_threads(); ROSE_DLL_API bool is_C99_language (); ROSE_DLL_API bool is_Cxx_language (); ROSE_DLL_API bool is_Java_language (); ROSE_DLL_API bool is_Jovial_language (); ROSE_DLL_API bool is_Fortran_language (); ROSE_DLL_API bool is_CAF_language (); ROSE_DLL_API bool is_PHP_language(); ROSE_DLL_API bool is_Python_language(); ROSE_DLL_API bool is_Cuda_language(); ROSE_DLL_API bool is_OpenCL_language(); ROSE_DLL_API bool is_X10_language(); ROSE_DLL_API bool is_binary_executable(); #endif ROSE_DLL_API bool is_mixed_C_and_Cxx_language (); ROSE_DLL_API bool is_mixed_Fortran_and_C_language (); ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language (); ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language (); ROSE_DLL_API bool is_language_case_insensitive (); ROSE_DLL_API bool language_may_contain_nondeclarations_in_scope (); //@} //------------------------------------------------------------------------ //@{ /*! @name Scope \brief */ // DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique // labels for scopes in a function (as required for name mangling). /*! \brief Assigns unique numbers to each SgScopeStatement of a function. This is used to provide unique names for variables and types defined is different nested scopes of a function (used in mangled name generation). */ void resetScopeNumbers (SgFunctionDefinition * functionDeclaration); // DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique // labels for scopes in a function (as required for name mangling). /*! \brief Clears the cache of scope,integer pairs for the input function. This is used to clear the cache of computed unique labels for scopes in a function. This function should be called after any transformation on a function that might effect the allocation of scopes and cause the existing unique numbers to be incorrect. This is part of support to provide unique names for variables and types defined is different nested scopes of a function (used in mangled name generation). */ void clearScopeNumbers (SgFunctionDefinition * functionDefinition); //!Find the enclosing namespace of a declaration SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration); // SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node); bool isPrototypeInScope (SgScopeStatement * scope, SgFunctionDeclaration * functionDeclaration, SgDeclarationStatement * startingAtDeclaration); //!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor) bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2); //@} //------------------------------------------------------------------------ //@{ /*! @name Preprocessing Information \brief #if-#else-#end, comments, #include, etc */ //! Dumps a located node's preprocessing information. void dumpPreprocInfo (SgLocatedNode* locatedNode); //! Find the preprocessingInfo node representing #include <header.h> or #include "header.h" within a source file. Return NULL if not found. ROSE_DLL_API PreprocessingInfo * findHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader); //! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file, add to be the last #include .. by default among existing headers, Or as the first header. Recommended for use. ROSE_DLL_API PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader, bool asLastHeader); //! Insert a new header right before stmt, if there are existing headers attached to stmt, insert it as the last or first header as specified by asLastHeader ROSE_DLL_API void insertHeader (SgStatement* stmt, PreprocessingInfo* newheader, bool asLastHeader); //! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file ROSE_DLL_API PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader = false, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before); //! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX. ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL); //! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon. ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false); //! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position. ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false); //!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation. ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf); //!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo() ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf); //! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes. ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target, const std::string & text, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before); //!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on. ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target); //@} //! Build and attach comment onto the global scope of a source file PreprocessingInfo* attachComment( SgSourceFile * source_file, const std::string & content, PreprocessingInfo::DirectiveType directive_type = PreprocessingInfo::C_StyleComment, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before ); //! Build and attach comment, comment style is inferred from the language type of the target node if not provided ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before, PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration); // DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface //! Add a string to be unparsed to support code generation for back-end specific tools or compilers. ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation ); /** * Add preproccessor guard around a given node. * It surrounds the node with "#if guard" and "#endif" */ void guardNode(SgLocatedNode * target, std::string guard); //@} //------------------------------------------------------------------------ //@{ /*! @name Source File Position \brief set Sg_File_Info for a SgNode */ // ************************************************************************ // Newer versions of now depricated functions // ************************************************************************ // DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder // interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This // function is the only function that should be called directly (though in a namespace we can't define permissions). //! Set the source code positon for the current (input) node. ROSE_DLL_API void setSourcePosition(SgNode* node); // A better name might be "setSourcePositionForSubTree" //! Set the source code positon for the subtree (including the root). ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root); //! DQ (5/1/2012): New function with improved name. void setSourcePositionAsTransformation(SgNode *node); // DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability). void setSourcePositionPointersToNull(SgNode *node); // ************************************************************************ // ************************************************************************ // Older deprecated functions // ************************************************************************ // Liao, 1/8/2007, set file info. for a whole subtree as transformation generated //! Set current node's source position as transformation generated ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node); //! Set current node's source position as NULL ROSE_DLL_API void setOneSourcePositionNull(SgNode *node); //! Recursively set source position info(Sg_File_Info) as transformation generated ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root); //! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool // ROSE_DLL_API void setSourcePositionForTransformation_memoryPool(); //! Check if a node is from a system header file ROSE_DLL_API bool insideSystemHeader (SgLocatedNode* node); // DQ (2/27/2021): Adding support to detect if a SgLocatedNode is located in a header file. //! Check if a node is from a header file ROSE_DLL_API bool insideHeader (SgLocatedNode* node); //! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage. // ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode); // ************************************************************************ //@} //------------------------------------------------------------------------ //@{ /*! @name Data types \brief */ // from src/midend/astInlining/typeTraits.h // src/midend/astUtil/astInterface/AstInterface.h //! Get the right bool type according to C or C++ language input SgType* getBoolType(SgNode* n); //! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long. ////! ////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types ROSE_DLL_API bool isStrictIntegerType(SgType* t); //!Get the data type of the first initialized name of a declaration statement ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl); //! Is a type default constructible? This may not quite work properly. ROSE_DLL_API bool isDefaultConstructible(SgType* type); //! Is a type copy constructible? This may not quite work properly. ROSE_DLL_API bool isCopyConstructible(SgType* type); //! Is a type assignable? This may not quite work properly. ROSE_DLL_API bool isAssignable(SgType* type); #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT //! Check if a class type is a pure virtual class. True means that there is at least //! one pure virtual function that has not been overridden. //! In the case of an incomplete class type (forward declaration), this function returns false. ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy); #endif //! Does a type have a trivial (built-in) destructor? ROSE_DLL_API bool hasTrivialDestructor(SgType* t); //! Is this type a non-constant reference type? (Handles typedefs correctly) ROSE_DLL_API bool isNonconstReference(SgType* t); //! Is this type a const or non-const reference type? (Handles typedefs correctly) ROSE_DLL_API bool isReferenceType(SgType* t); //! Is this type a pointer type? (Handles typedefs correctly) ROSE_DLL_API bool isPointerType(SgType* t); //! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to //! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile, //! it returns false for (int const * x) and (int const * const x) because these types point to a const int. //! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns //! false for const (int * const * x) ROSE_DLL_API bool isPointerToNonConstType(SgType* type); //! Is this a const type? /* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char. * Similarly, neither for const int b[10]; or const int & c =10; * The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type". */ ROSE_DLL_API bool isConstType(SgType* t); //! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers. SgType* removeConst(SgType* t); //! Is this a volatile type? ROSE_DLL_API bool isVolatileType(SgType* t); //! Is this a restrict type? ROSE_DLL_API bool isRestrictType(SgType* t); //! Is this a scalar type? /*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary */ ROSE_DLL_API bool isScalarType(SgType* t); //! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long. //! //! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool. ROSE_DLL_API bool isStrictIntegerType(SgType* t); //! Check if a type is a struct type (a special SgClassType in ROSE) ROSE_DLL_API bool isStructType(SgType* t); //! Generate a mangled string for a given type based on Itanium C++ ABI ROSE_DLL_API std::string mangleType(SgType* type); //! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE ROSE_DLL_API std::string mangleScalarType(SgType* type); //! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types. ROSE_DLL_API std::string mangleModifierType(SgModifierType* type); //! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array. ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t); //! Get the number of dimensions of an array type ROSE_DLL_API int getDimensionCount(SgType* t); //! Get the element type of an array. It recursively find the base type for multi-dimension array types ROSE_DLL_API SgType* getArrayElementType(SgType* t); //! Get the element type of an array, pointer or string, or NULL if not applicable. This function only check one level base type. No recursion. ROSE_DLL_API SgType* getElementType(SgType* t); /// \brief returns the array dimensions in an array as defined for arrtype /// \param arrtype the type of a C/C++ array /// \return an array that contains an expression indicating each dimension's size. /// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which /// becomes responsible for freeing the expressions). /// Note, the first entry of the array is a SgNullExpression, iff the /// first array dimension was not specified. /// \code /// int x[] = { 1, 2, 3 }; /// \endcode /// note, the expression does not have to be a constant /// \code /// int x[i*5]; /// \endcode /// \post return-value.empty() == false /// \post return-value[*] != NULL (no nullptr in the returned vector) std::vector<SgExpression*> get_C_array_dimensions(const SgArrayType& arrtype); /// \brief returns the array dimensions in an array as defined for arrtype /// \param arrtype the type of a C/C++ array /// \param varref a reference to an array variable (the variable of type arrtype) /// \return an array that contains an expression indicating each dimension's size. /// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which /// becomes responsible for freeing the expressions). /// If the first array dimension was not specified an expression /// that indicates that size is generated. /// \code /// int x[][3] = { 1, 2, 3, 4, 5, 6 }; /// \endcode /// the entry for the first dimension will be: /// \code /// // 3 ... size of 2nd dimension /// sizeof(x) / (sizeof(int) * 3) /// \endcode /// \pre arrtype is the array-type of varref /// \post return-value.empty() == false /// \post return-value[*] != NULL (no nullptr in the returned vector) /// \post !isSgNullExpression(return-value[*]) std::vector<SgExpression*> get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref); /// \overload /// \note see get_C_array_dimensions for SgVarRefExp for details. /// \todo make initname const std::vector<SgExpression*> get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname); //! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp. ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL); //! Collect variable references in array types. The default NodeQuery::querySubTree() will miss variables referenced in array type's index list. e.g. double *buffer = new double[numItems] ; ROSE_DLL_API int collectVariableReferencesInArrayTypes (SgLocatedNode* root, Rose_STL_Container<SgNode*> & currentVarRefList); //! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information. /*! * Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense. AST graph for some examples: - shared scalar: SgModifierType -->base type - shared array: SgArrayType --> SgModiferType --> base type - shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt - shared to private: SgModifierType --> SgPointerType --> base type - private to shared: SgPointerType --> SgModifierType --> base type */ ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL ); //! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property. /*! * ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property. */ ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL); //! Check if a modifier type is a UPC shared type. ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type); //! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array. ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type); //! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.) ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type); //! Get the block size of a UPC shared modifier type ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type); //! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays) ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t); //! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type. ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t); //! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first. ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t); //! Is a UPC array with dimension of X*THREADS ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t); //! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to. ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL); // DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types. //! Get the type of the associated argument expression from the function type. ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression); //! Verify that 2 SgTemplateArgument are equivalent (same type, same expression, or same template declaration) ROSE_DLL_API bool templateArgumentEquivalence(SgTemplateArgument * arg1, SgTemplateArgument * arg2); //! Verify that 2 SgTemplateArgumentPtrList are equivalent. ROSE_DLL_API bool templateArgumentListEquivalence(const SgTemplateArgumentPtrList & list1, const SgTemplateArgumentPtrList & list2); //! Test for equivalence of types independent of access permissions (private or protected modes for members of classes). ROSE_DLL_API bool isEquivalentType (const SgType* lhs, const SgType* rhs); //! Find the function type matching a function signature plus a given return type ROSE_DLL_API SgFunctionType* findFunctionType (SgType* return_type, SgFunctionParameterTypeList* typeList); //! Test if two types are equivalent SgFunctionType nodes. This is necessary for template function types //! They may differ in one SgTemplateType pointer but identical otherwise. ROSE_DLL_API bool isEquivalentFunctionType (const SgFunctionType* lhs, const SgFunctionType* rhs); //@} //------------------------------------------------------------------------ //@{ /*! @name Loop handling \brief */ // by Jeremiah //! Add a step statement to the end of a loop body //! Add a new label to the end of the loop, with the step statement after //! it; then change all continue statements in the old loop body into //! jumps to the label //! //! For example: //! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes //! while (a < 5) {if (a < -3) goto label; label: a++;} ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step); ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f); ROSE_DLL_API void convertForToWhile(SgForStatement* f); ROSE_DLL_API void convertAllForsToWhiles(SgNode* top); //! Change continue statements in a given block of code to gotos to a label ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label); //!Return the loop index variable for a for loop ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop); //!Check if a SgInitializedName is used as a loop index within a AST subtree //! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them. ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root); //! Check if a for loop uses C99 style initialization statement with multiple expressions like for (int i=0, j=0; ..) or for (i=0,j=0;...) /*! for (int i=0, j=0; ..) is stored as two variable declarations under SgForInitStatement's init_stmt member for (i=0,j=0;...) is stored as a single expression statement, with comma expression (i=0,j=0). */ ROSE_DLL_API bool hasMultipleInitStatmentsOrExpressions (SgForStatement* for_loop); //! Routines to get and set the body of a loop ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop); ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body); //! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop); //! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop. ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond); //! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested //! //! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL); //! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1 ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/); //! Set the lower bound of a loop header for (i=lb; ...) ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb); //! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...) ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub); //! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc) ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride); //! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop); //! Undo the normalization of for loop's C99 init declaration. Previous record of normalization is used to ease the reverse transformation. ROSE_DLL_API bool unnormalizeForLoopInitDeclaration(SgForStatement* loop); //! Normalize a for loop, return true if successful. Generated constants will be fold by default. //! //! Translations are : //! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..) //! For test expression: //! i<x is normalized to i<= (x-1) and //! i>x is normalized to i>= (x+1) //! For increment expression: //! i++ is normalized to i+=1 and //! i-- is normalized to i+=-1 //! i-=s is normalized to i+= -s ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true); //! Normalize a for loop's test expression //! i<x is normalized to i<= (x-1) and //! i>x is normalized to i>= (x+1) ROSE_DLL_API bool normalizeForLoopTest(SgForStatement* loop); ROSE_DLL_API bool normalizeForLoopIncrement(SgForStatement* loop); //!Normalize a Fortran Do loop. Make the default increment expression (1) explicit ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop); //! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor. ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor); //! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!). ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder); //! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize); //Winnie Loop Collapsing SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor); bool getForLoopInformations( SgForStatement * for_loop, SgVariableSymbol * & iterator, SgExpression * & lower_bound, SgExpression * & upper_bound, SgExpression * & stride ); //@} //------------------------------------------------------------------------ //@{ /*! @name Topdown search \brief Top-down traversal from current node to find a node of a specified type */ //! Query a subtree to get all nodes of a given type, with an appropriate downcast. template <typename NodeType> std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant) { #if 0 printf ("Top of SageInterface::querySubTree() \n"); #endif Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant); std::vector<NodeType*> result(nodes.size(), NULL); int count = 0; #if 0 printf ("In SageInterface::querySubTree(): before initialization loop \n"); #endif for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin(); i != nodes.end(); ++i, ++count) { #if 0 printf ("In SageInterface::querySubTree(): in loop: count = %d \n",count); #endif NodeType* node = dynamic_cast<NodeType*>(*i); ROSE_ASSERT (node); result[count] = node; } #if 0 printf ("Leaving SageInterface::querySubTree(): after initialization loop \n"); #endif return result; } /*! \brief Returns STL vector of SgFile IR node pointers. Demonstrates use of restricted traversal over just SgFile IR nodes. */ std::vector < SgFile * >generateFileList (); /** Get the current SgProject IR Node. * * The library should never have more than one project and it asserts such. If no project has been created yet then this * function returns the null pointer. */ ROSE_DLL_API SgProject * getProject(); //! \return the project associated with a node SgProject * getProject(const SgNode * node); //! Query memory pools to grab SgNode of a specified type template <typename NodeType> static std::vector<NodeType*> getSgNodeListFromMemoryPool() { // This function uses a memory pool traversal specific to the SgFile IR nodes class MyTraversal : public ROSE_VisitTraversal { public: std::vector<NodeType*> resultlist; void visit ( SgNode* node) { NodeType* result = dynamic_cast<NodeType* > (node); ROSE_ASSERT(result!= NULL); if (result!= NULL) { resultlist.push_back(result); } }; virtual ~MyTraversal() {} }; MyTraversal my_traversal; NodeType::traverseMemoryPoolNodes(my_traversal); return my_traversal.resultlist; } /*! \brief top-down traversal from current node to find the main() function declaration */ ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode); //! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another variable declaration statement. Pragma declarations are not treated as a declaration by default in this context. SgStatement* findLastDeclarationStatement(SgScopeStatement * scope, bool includePragma = false); //midend/programTransformation/partialRedundancyElimination/pre.h //! Find referenced symbols within an expression std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr); //! Find break statements inside a particular statement, stopping at nested loops or switches /*! loops or switch statements defines their own contexts for break statements. The function will stop immediately if run on a loop or switch statement. If fortranLabel is non-empty, breaks (EXITs) to that label within nested loops are included in the returned list. */ std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = ""); //! Find all continue statements inside a particular statement, stopping at nested loops /*! Nested loops define their own contexts for continue statements. The function will stop immediately if run on a loop statement. If fortranLabel is non-empty, continues (CYCLEs) to that label within nested loops are included in the returned list. */ std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = ""); std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l); std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw); //! Collect all variable references in a subtree void collectVarRefs(SgLocatedNode* root, std::vector<SgVarRefExp* >& result); //! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag. template <typename T> T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining) { bool found = false; #if 0 printf ("In findDeclarationStatement(): root = %p \n",root); printf ("In findDeclarationStatement(): name = %s \n",name.c_str()); printf ("In findDeclarationStatement(): scope = %p \n",scope); printf ("In findDeclarationStatement(): isDefining = %s \n",isDefining ? "true" : "false"); #endif // Do we really want a NULL pointer to be acceptable input to this function? // Maybe we should have an assertion that it is non-null? if (!root) return NULL; T* decl = dynamic_cast<T*>(root); #if 0 printf ("In findDeclarationStatement(): decl = %p \n",decl); #endif if (decl != NULL) { if (scope) { if ((decl->get_scope() == scope) && (decl->search_for_symbol_from_symbol_table()->get_name() == name)) { found = true; } } else // Liao 2/9/2010. We should allow NULL scope { #if 0 // DQ (12/6/2016): Include this into the debugging code to aboid compiler warning about unused variable. SgSymbol* symbol = decl->search_for_symbol_from_symbol_table(); printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table() = %p \n",symbol); printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table()->get_name() = %s \n",symbol->get_name().str()); #endif if (decl->search_for_symbol_from_symbol_table()->get_name() == name) { found = true; } } } if (found) { if (isDefining) { #if 0 printf ("In findDeclarationStatement(): decl->get_firstNondefiningDeclaration() = %p \n",decl->get_firstNondefiningDeclaration()); printf ("In findDeclarationStatement(): decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration()); #endif ROSE_ASSERT (decl->get_definingDeclaration() != NULL); #if 0 printf ("In findDeclarationStatement(): returing decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration()); #endif return dynamic_cast<T*> (decl->get_definingDeclaration()); } else { #if 0 printf ("In findDeclarationStatement(): returing decl = %p \n",decl); #endif return decl; } } std::vector<SgNode*> children = root->get_traversalSuccessorContainer(); #if 0 printf ("In findDeclarationStatement(): children.size() = %zu \n",children.size()); #endif // DQ (4/10/2016): Note that if we are searching for a function member that has it's defining // declaration defined outside of the class then it will not be found in the child list. for (std::vector<SgNode*>::const_iterator i = children.begin(); i != children.end(); ++i) { T* target = findDeclarationStatement<T> (*i,name,scope,isDefining); if (target) { return target; } } return NULL; } //! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>. SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining); #if 0 //TODO // 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX // until reach the end node SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL); // 2. return all nodes of type VariantT following the source node std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL); #endif //@} //------------------------------------------------------------------------ //@{ /*! @name Bottom up search \brief Backwards traverse through the AST to find a node, findEnclosingXXX() */ // remember to put const to all arguments. /** Find a node by type using upward traversal. * * Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant * ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the * starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode. * * For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first * non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining * declaration is different than the first non-defining declaration. * * If no ancestor of the requisite type of subtypes is found then this function returns a null pointer. * * If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot * be an enclosing node of the specified type. */ template <typename NodeType> NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false) { #define DEBUG_GET_ENCLOSING_NODE 0 #if 1 /* TOP_LEVEL_IF */ // DQ (12/31/2019): This version does not detect a cycle that Robb's version detects in processing Cxx11_tests/test2016_23.C. // This will have to be investigated seperately from the issue I am working on currently. // DQ (10/20/2012): This is the older version of this implementation. Until I am sure that // the newer version (below) is what we want to use I will resolve this conflict by keeping // the previous version in place. if (NULL == astNode) { return NULL; } if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) ) { return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode)); } // DQ (3/5/2012): Check for reference to self... ROSE_ASSERT(astNode->get_parent() != astNode); SgNode* parent = astNode->get_parent(); // DQ (3/5/2012): Check for loops that will cause infinite loops. SgNode* previouslySeenParent = parent; bool foundCycle = false; int counter = 0; #if DEBUG_GET_ENCLOSING_NODE printf ("In getEnclosingNode(): previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str()); #endif while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) ) { ROSE_ASSERT(parent->get_parent() != parent); #if DEBUG_GET_ENCLOSING_NODE printf (" --- parent = %p = %s \n",parent,parent->class_name().c_str()); printf (" --- --- parent->get_parent() = %p = %s \n",parent->get_parent(),parent->get_parent()->class_name().c_str()); #endif #if 1 // DQ (1/8/2020): ROSE-82 (on RZ) This limit needs to be larger and increasing it to 500 was enough // for a specific code with a long chain of if-then-else nesting, So to make this sufficent for more // general code we have increased the lomit to 100,000. Note that 50 was not enough for real code, // but was enough for our regression tests. // DQ (12/30/2019): This is added to support detection of infinite loops over parent pointers. // if (counter >= 500) if (counter >= 100000) { printf ("Exiting: In getEnclosingNode(): loop limit exceeded: counter = %d \n",counter); ROSE_ABORT(); } #endif parent = parent->get_parent(); // DQ (3/5/2012): Check for loops that will cause infinite loops. // ROSE_ASSERT(parent != previouslySeenParent); if (parent == previouslySeenParent) { foundCycle = true; } counter++; } #if DEBUG_GET_ENCLOSING_NODE printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str()); #endif parent = previouslySeenParent; SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent); if (declarationStatement != NULL) { #if 0 printf ("Found a SgDeclarationStatement \n"); #endif SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration(); SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration(); #if 0 printf (" --- declarationStatement = %p \n",declarationStatement); printf (" --- definingDeclaration = %p \n",definingDeclaration); if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL) printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str()); printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration); if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL) printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str()); #endif if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration) { #if 0 printf ("Found a nondefining declaration so use the non-defining declaration instead \n"); #endif // DQ (10/19/2012): Use the defining declaration instead. // parent = firstNondefiningDeclaration; parent = definingDeclaration; } } #if 0 printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str()); #endif // DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for // debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However, // this will have to be revisited later since it appears clear that it is a problem for the binary analysis // work when it is visited for this case. Since the cycle is detected, but there is no assertion on the // cycle, we don't exit when a cycle is identified (which is the point of the code below). // Note also that I have fixed the code (above and below) to only chase pointers through defining // declarations (where they exist), this is important since non-defining declarations can be almost // anywhere (and thus chasing them can make it appear that there are cycles where there are none // (I think); test2012_234.C demonstrates an example of this. // DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work. // if (foundCycle == true) if (foundCycle == false) { while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) ) { ROSE_ASSERT(parent->get_parent() != parent); #if 0 printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str()); if (parent->get_file_info() != NULL) parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug"); #endif SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent); if (declarationStatement != NULL) { #if DEBUG_GET_ENCLOSING_NODE printf ("Found a SgDeclarationStatement \n"); #endif SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration(); SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration(); #if 0 printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null"); printf (" --- definingDeclaration = %p \n",definingDeclaration); if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL) printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str()); printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration); if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL) printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str()); #endif if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration) { #if 0 printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n"); #endif // DQ (10/19/2012): Use the defining declaration instead. // parent = firstNondefiningDeclaration; parent = definingDeclaration; } } parent = parent->get_parent(); #if 1 // DQ (3/5/2012): Check for loops that will cause infinite loops. ROSE_ASSERT(parent != previouslySeenParent); #else printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n"); if (parent == previouslySeenParent) break; #endif } } return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent)); #else /* TOP_LEVEL_IF */ // DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below). // Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop). // Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const. SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent()); std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles while (node) { if (NodeType *found = dynamic_cast<NodeType*>(node)) return found; // FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09] // DQ (12/30/2019): Provide more detail in error message. if (seen.insert(node).second == false) { printf ("Error: node is already in set and defines a cycle: node = %p = %s \n",node,node->class_name().c_str()); std::set<const SgNode*>::const_iterator i = seen.begin(); while (i != seen.end()) { const SgNode* element = *i; printf (" --- seen element: element = %p = %s \n",element,element->class_name().c_str()); i++; } printf ("Exiting after error! \n"); ROSE_ABORT(); } // ROSE_ASSERT(seen.insert(node).second); // Traverse to parent (declaration statements are a special case) if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) { SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration(); SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration(); if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) { // DQ (10/19/2012): Use the defining declaration instead. // node = firstNondefiningDeclaration; node = definingDeclaration; } } else { node = node->get_parent(); } } return NULL; #endif /* TOP_LEVEL_IF */ } //! Find enclosing source file node ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false); //! Get the closest scope from astNode. Return astNode if it is already a scope. ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode); //! Get the enclosing scope from a node n ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false); //! Traverse back through a node's parents to find the enclosing global scope ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode); // DQ (12/7/2020): This is supporting the recognition of functions in header files from two different AST. //! This is supporting the recognition of functions in header files from two different ASTs ROSE_DLL_API bool hasSameGlobalScope ( SgStatement* statement_1, SgStatement* statement_2 ); //! Find the function definition ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false); ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false); //! Find the closest enclosing statement, including the given node ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n); //! Find the closest switch outside a given statement (normally used for case and default statements) ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s); //! Find enclosing OpenMP clause body statement from s. If s is already one, return it directly. ROSE_DLL_API SgOmpClauseBodyStatement* findEnclosingOmpClauseBodyStatement(SgStatement* s); //! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false); //! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration. ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false); //roseSupport/utility_functions.h //! get the SgFile node from current node ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode ); //! Get the initializer containing an expression if it is within an initializer. ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n); //! Get the closest class definition enclosing the specified AST node, ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false); //! Get the closest class declaration enclosing the specified AST node, ROSE_DLL_API SgClassDeclaration* getEnclosingClassDeclaration( SgNode* astNode ); // DQ (2/7/2019): Adding support for name qualification of variable references associated with SgPointerMemberType function parameters. //! Get the enclosing SgExprListExp (used as part of function argument index evaluation in subexpressions). ROSE_DLL_API SgExprListExp* getEnclosingExprListExp(SgNode* astNode, const bool includingSelf = false); // DQ (2/7/2019): Need a function to return when an expression is in an expression subtree. // This is part of index evaluation ofr expressions in function argument lists, but likely usefule elsewhere as well. ROSE_DLL_API bool isInSubTree(SgExpression* subtree, SgExpression* exp); // DQ (2/7/2019): Need a function to return the SgFunctionDeclaration from a SgFunctionCallExp. ROSE_DLL_API SgFunctionDeclaration* getFunctionDeclaration ( SgFunctionCallExp* functionCallExp ); // DQ (2/17/2019): Generalizing this support for SgVarRefExp and SgMemberFunctionRefExp nodes. // DQ (2/8/2019): Adding support for detecting when to use added name qualification for pointer-to-member expressions. ROSE_DLL_API bool isDataMemberReference(SgVarRefExp* varRefExp); // ROSE_DLL_API bool isAddressTaken(SgVarRefExp* varRefExp); ROSE_DLL_API bool isAddressTaken(SgExpression* refExp); // DQ (2/17/2019): Adding support for detecting when to use added name qualification for membr function references. ROSE_DLL_API bool isMemberFunctionMemberReference(SgMemberFunctionRefExp* memberFunctionRefExp); // DQ (2/15/2019): Adding support for detecting which class a member reference is being made from. // ROSE_DLL_API SgClassType* getClassTypeForDataMemberReference(SgVarRefExp* varRefExp); // ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForDataMemberReference(SgVarRefExp* varRefExp); ROSE_DLL_API std::list<SgClassType*> getClassTypeChainForMemberReference(SgExpression* refExp); ROSE_DLL_API std::set<SgNode*> getFrontendSpecificNodes(); // DQ (2/17/2019): Display the shared nodes in the AST for debugging. ROSE_DLL_API void outputSharedNodes( SgNode* node ); // DQ (10/31/2020): Added function to help debug edits to statements in scopes. ROSE_DLL_API void displayScope(SgScopeStatement* scope); // TODO #if 0 SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL); std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL); SgVariableDeclaration* findVariableDeclaratin( const string& varname) SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode); // e.g. for some expression, find its parent statement SgStatement* getEnclosingStatement(const SgNode* astNode); SgSwitchStatement* getEnclosingSwitch(SgStatement* s); SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode); // used to build a variable reference for compiler generated code in current scope SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name); #endif //@} //------------------------------------------------------------------------ //@{ /*! @name AST Walk and Traversal \brief */ // Liao, 1/9/2008 /*! \brief return the first global scope under current project */ ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project); /*! \brief get the last statement within a scope, return NULL if it does not exit */ ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope); //! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers. ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false); //!Find the first defining function declaration statement in a scope ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope); //! Get next statement within the same scope of current statement ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt); //! Get previous statement of the current statement. It may return a previous statement of a parent scope by default (climbOutScope is true), otherwise only a previous statement of the same scope is returned. ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt, bool climbOutScope = true); #if 0 //TODO // preorder traversal from current SgNode till find next SgNode of type V_SgXXX SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode); #endif // DQ (11/15/2018): Adding support for traversals over the include file tree. //! return path prefix for subtree of include files. void listHeaderFiles ( SgIncludeFile* includeFile ); //@} //------------------------------------------------------------------------ //@{ /*! @name AST Comparison \brief Compare AST nodes, subtree, etc */ //! Check if a SgIntVal node has a given value ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value); //! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same. /*! * There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C */ ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2); //! Check if a statement is the last statement within its closed scope ROSE_DLL_API bool isLastStatement(SgStatement* stmt); //@} //------------------------------------------------------------------------ //@{ /*! @name AST insert, removal, and replacement \brief Add, remove,and replace AST scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc. */ #if 1 struct DeferredTransformation { // DQ (11/19/2020): We need to expand the use of this to cover deffered transformations of common SageInterface transformations (e.g. replaceStatement). // So I needed to move this out of being specific to the outliner and make it more generally data structure in the SageInterface. // DQ (11/15/2020): Need to add the concept of deffered transformation to cover replaceStatement operations. // DQ (8/7/2019): Store data required to support defering the transformation to insert the outlined function prototypes // into class declaration (when this is required to support the outlined function's access to protected or private data members). // This is part of an optimization to support the optimization of header file unparsing (limiting the overhead of supporting any // header file to just focus on the few (typically one) header file that would have to be unparsed. enum TransformationKind { // DQ (11/22/2020): Might need to also add SageInterface::addDefaultConstructorIfRequired() and SageStatement::insert_statment() // to support the processStatements.C transforamtions to pre-process the AST (return expressions and variable initializations). e_error, e_default, e_outliner, e_replaceStatement, e_removeStatement, e_replaceDefiningFunctionDeclarationWithFunctionPrototype, e_last }; TransformationKind deferredTransformationKind; // DQ (12/12/2020): Adding a string label so that we can name the different kinds of transformations. // E.g. moving pattern matched function from header file to dynamic library, vs. replacing function // definitions in the dynamic library file with function prototypes. std::string transformationLabel; // Remove sets statementToRemove, replace sets statementToRemove and StatementToAdd. SgStatement* statementToRemove; SgStatement* statementToAdd; SgClassDefinition* class_definition; SgDeclarationStatement* target_class_member; SgDeclarationStatement* new_function_prototype; typedef std::set<SgClassDefinition *> ClassDefSet_t; ClassDefSet_t targetClasses; typedef std::vector<SgFunctionDeclaration *> FuncDeclList_t; FuncDeclList_t targetFriends; // DQ (2/28/2021): Adding support for outlining where it involves building up pre-transformations. // For example, in the code segregation, we build a conditiona around the interval of statements // that we are outlining. This conditional is used to overwrite the first statement in the interval // list. Because we don't want to transform the AST until after the outlining, we need so save the // whole interval so that we, after the outlining, remove the statements in the interval after that // first statement. typedef std::vector<SgStatement*> IntervalType; IntervalType statementInterval; SgStatement* locationToOverwriteWithTransformation; SgStatement* transformationToOverwriteFirstStatementInInterval; SgBasicBlock* blockOfStatementsToOutline; // DQ (12/5/2019): Added ROSE_DLL_API prefix for Windows support (too all of these functions). ROSE_DLL_API DeferredTransformation(); ROSE_DLL_API DeferredTransformation(SgClassDefinition* class_definition, SgDeclarationStatement* target_class_member, SgDeclarationStatement* new_function_prototype); ROSE_DLL_API DeferredTransformation (const DeferredTransformation& X); //! Copy constructor. ROSE_DLL_API ~DeferredTransformation (void); //! Shallow; does not delete fields. ROSE_DLL_API DeferredTransformation & operator= (const DeferredTransformation& X); //! operator=() // DQ (11/20/20): static function to generate specialized version of deferred transformation object. static ROSE_DLL_API DeferredTransformation replaceDefiningFunctionDeclarationWithFunctionPrototype( SgFunctionDeclaration* functionDeclaration ); static ROSE_DLL_API DeferredTransformation replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false); static ROSE_DLL_API std::string outputDeferredTransformationKind(const TransformationKind & kind); ROSE_DLL_API void display ( std::string label ) const; }; #endif // DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining). //! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result. ROSE_DLL_API void deleteAST(SgNode* node); //! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only). ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root); // DQ (2/25/2009): Added new function to support outliner. //! Move statements in first block to the second block (preserves order and rebuilds the symbol table). ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock ); //! Move statements in Ada's package spec into C++ namespace's definition ROSE_DLL_API void moveStatementsBetweenBlocks ( SgAdaPackageSpec * sourceBlock, SgNamespaceDefinitionStatement* targetBlock ); //! Move statements in Ada's package body into C++ namespace's definition ROSE_DLL_API void moveStatementsBetweenBlocks ( SgAdaPackageBody* sourceBlock, SgNamespaceDefinitionStatement* targetBlock ); //! Move statements between C++ namespace's definitions ROSE_DLL_API void moveStatementsBetweenBlocks ( SgNamespaceDefinitionStatement* sourceBlock, SgNamespaceDefinitionStatement* targetBlock ); //! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc. ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope); //! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc. ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL); //! Append a statement to the end of SgForInitStatement ROSE_DLL_API void appendStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt); //! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc. ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL); // DQ (2/6/2009): Added function to support outlining into separate file. //! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers). ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles ); //! Prepend a statement to the beginning of the current scope, handling side //! effects as appropriate ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL); //! Prepend a statement to the beginning of SgForInitStatement ROSE_DLL_API void prependStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt); //! prepend a list of statements to the beginning of the current scope, //! handling side effects as appropriate ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL); //! Check if a scope statement has a simple children statement list //! so insert additional statements under the scope is straightforward and unambiguous . //! for example, SgBasicBlock has a simple statement list while IfStmt does not. ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope); //! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true); //! Insert a list of statements before or after the target statement within the //target's scope ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true); //! Insert a statement before a target statement ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true); //! Insert a list of statements before a target statement ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts); //! Insert a statement after a target statement, Move around preprocessing info automatically by default ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true); //! Insert a list of statements after a target statement ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt); //! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope); //! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope); //! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements // then the statement is inserted at the end of the scope. ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope, bool movePreprocessingInfo=true); //! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements //then the new statements are inserted at the end of the scope. ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts, SgScopeStatement *scope); // DQ (11/21/2018): We need to sometimes insert something after the last statement of the collection from rose_edg_required_macros_and_functions.h. ROSE_DLL_API SgStatement* lastFrontEndSpecificStatement( SgGlobal* globalScope ); //! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()). ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true); //! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST() ROSE_DLL_API void deepDelete(SgNode* root); //! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested. ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false); //! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node. ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern); //! Replace all variable references to an old symbol in a scope to being references to a new symbol. // Essentially replace variable a with b. ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope ); // DQ (11/12/2018): Adding test to avoid issues that we can't test for in the unparsing of header files using the token based unparsing. //! If header file unparsing and token-based unparsing are used, then some statements in header files //! used with the same name and different include syntax can't be transformed. This is currently because //! there is no way to generally test the resulting transformed code generated by ROSE. ROSE_DLL_API bool statementCanBeTransformed(SgStatement* stmt); /** Given an expression, generates a temporary variable whose initializer optionally evaluates * that expression. Then, the var reference expression returned can be used instead of the original * expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp; * this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles * reference types correctly by using pointer types for the temporary. * @param expression Expression which will be replaced by a variable * @param scope scope in which the temporary variable will be generated * @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed * @return declaration of the temporary variable, and a a variable reference expression to use instead of * the original expression. */ std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression, SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL); /* This function creates a temporary variable for a given expression in the given scope This is different from SageInterface::createTempVariableForExpression in that it does not try to be smart to create pointers to reference types and so on. The tempt is initialized to expression. The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage. @param expression Expression which will be replaced by a variable @param scope scope in which the temporary variable will be generated */ std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression (SgExpression* expression, SgScopeStatement* scope); //! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible /*! We recommend to build SgFunctionParameterList before building a function declaration However, it is still allowed to append new arguments for existing function declarations. \todo function type , function symbol also need attention. */ ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*); //!Prepend an argument to SgFunctionParameterList ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*); //! Append an expression to a SgExprListExp, set the parent pointer also ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*); //! Append an expression list to a SgExprListExp, set the parent pointers also ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&); //! Set parameter list for a function declaration, considering existing parameter list etc. template <class actualFunction> void setParameterList(actualFunction *func,SgFunctionParameterList *paralist) { // TODO consider the difference between C++ and Fortran // fixup the scope of arguments,no symbols for nondefining function declaration's arguments // DQ (11/25/2011): templated function so that we can handle both // SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member // function derived classes). ROSE_ASSERT(func != NULL); ROSE_ASSERT(paralist != NULL); #if 0 // At this point we don't have cerr and endl defined, so comment this code out. // Warn to users if a paralist is being shared if (paralist->get_parent() !=NULL) { cerr << "Waring! Setting a used SgFunctionParameterList to function: " << (func->get_name()).getString()<<endl << " Sharing parameter lists can corrupt symbol tables!"<<endl << " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl; // ROSE_ASSERT(false); } #endif // Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!! if (func->get_parameterList() != NULL) { if (func->get_parameterList() != paralist) { delete func->get_parameterList(); } } func->set_parameterList(paralist); paralist->set_parent(func); if (SageInterface::is_Ada_language()) { // Ada stores variable declarations in the function parameter scope (for functions) // and in a discriminantScope (for discriminated declarations). // ==> just make sure that these are set. SgInitializedNamePtrList& args = paralist->get_args(); for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); ++i) { ROSE_ASSERT(*i && isSgVariableDeclaration((*i)->get_declptr())); } } else { // DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node. // This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C, // test2012_81.C and testcode2012_82.C demonstrate this problem. SgInitializedNamePtrList & args = paralist->get_args(); for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++) { (*i)->set_declptr(func); } } } //! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer. ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma); //! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept. ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false); //! Replace a given expression with a list of statements produced by a generator ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from, SageInterface::StatementGenerator* to); //! Similar to replaceExpressionWithStatement, but with more restrictions. //! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from, SageInterface::StatementGenerator* to); //! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc. ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand); //!set left hand operand for binary expressions, transparently downcasting target expressions when necessary ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs); //!set left hand operand for binary expression ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs); //! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly. ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top); // DQ (1/25/2010): Added support for directories //! Move file to be generated in a subdirectory (will be generated by the unparser). ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file ); //! Supporting function to comment relocation in insertStatement() and removeStatement(). ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement); //! Relocate comments and CPP directives from one statement to another. ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement); // DQ (7/19/2015): This is required to support general unparsing of template instantations for the GNU g++ // compiler which does not permit name qualification to be used to support the expression of the namespace // where a template instantiatoon would be places. Such name qualification would also sometimes require // global qualification which is also not allowed by the GNU g++ compiler. These issues appear to be // specific to the GNU compiler versions, at least versions 4.4 through 4.8. //! Relocate the declaration to be explicitly represented in its associated namespace (required for some backend compilers to process template instantiations). ROSE_DLL_API void moveDeclarationToAssociatedNamespace ( SgDeclarationStatement* declarationStatement ); ROSE_DLL_API bool isTemplateInstantiationNode(SgNode* node); ROSE_DLL_API void wrapAllTemplateInstantiationsInAssociatedNamespaces(SgProject* root); // DQ (12/1/2015): Adding support for fixup internal data struuctures that have references to statements (e.g. macro expansions). ROSE_DLL_API void resetInternalMapsForTargetStatement(SgStatement* sourceStatement); // DQ (6/7/2019): Add support for transforming function definitions to function prototypes in a subtree. // We might have to make this specific to a file (only traversing the functions in that file). /*!\brief XXX * This function operates on the new file used to support outlined function definitions. * We use a copy of the file where the code will be outlined FROM, so that if there are references to * declarations in the outlined code we can support the outpiled code with those references. This * approach has the added advantage of also supporting the same include file tree as the original * file where the outlined code is being taken from. */ ROSE_DLL_API void convertFunctionDefinitionsToFunctionPrototypes(SgNode* node); // DQ (11/10/2019): Lower level support for convertFunctionDefinitionsToFunctionPrototypes(). // DQ (10/27/2020): Need to return the generated function prototype (incase we want to mark it for output or template unparsing from the AST). // ROSE_DLL_API void replaceDefiningFunctionDeclarationWithFunctionPrototype ( SgFunctionDeclaration* functionDeclaration ); // ROSE_DLL_API SgDeclarationStatement* replaceDefiningFunctionDeclarationWithFunctionPrototype ( SgFunctionDeclaration* functionDeclaration ); ROSE_DLL_API SgFunctionDeclaration* replaceDefiningFunctionDeclarationWithFunctionPrototype ( SgFunctionDeclaration* functionDeclaration ); ROSE_DLL_API std::vector<SgFunctionDeclaration*> generateFunctionDefinitionsList(SgNode* node); // DQ (10/29/2020): build a function prototype for all but member functions outside of the class (except for template instantiations). // The reason why member functions outside of the class are an exception is because they can not be used except in a class and there // would already be one present for the code to compile. ROSE_DLL_API SgFunctionDeclaration* buildFunctionPrototype ( SgFunctionDeclaration* functionDeclaration ); //@} //------------------------------------------------------------------------ //@{ /*! @name AST repair, fix, and postprocessing. \brief Mostly used internally when some AST pieces are built without knowing their target scope/parent, especially during bottom-up construction of AST. The associated symbols, parent and scope pointers cannot be set on construction then. A set of utility functions are provided to patch up scope, parent, symbol for them when the target scope/parent become know. */ //! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed. /*! In AST translation, it is possible to build a variable reference before the variable is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders to get the work done. Users should call fixVariableReference() when AST is complete and all variable declarations are in place. */ ROSE_DLL_API int fixVariableReferences(SgNode* root, bool cleanUnusedSymbol=true); //!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known. /*! It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general. In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement(). */ ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope); //! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope. ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope); //! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope. ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope); //! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope. ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope); //! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL. ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope); //! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed. ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value, SgLabelSymbol::label_type_enum label_type=SgLabelSymbol::e_start_label_type, SgScopeStatement* label_scope=NULL); //! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def); //! Fix the symbol table and set scope (only if scope in declaration is not already set). ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope); //! Fix the symbol table and set scope (only if scope in declaration is not already set). ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope); //! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST. ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope); // DQ (6/11/2015): This reports the statements that are marked as transformed (used to debug the token-based unparsing). //! This collects the statements that are marked as transformed (useful in debugging). ROSE_DLL_API std::set<SgStatement*> collectTransformedStatements( SgNode* node ); //! This collects the statements that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging). ROSE_DLL_API std::set<SgStatement*> collectModifiedStatements( SgNode* node ); //! This collects the SgLocatedNodes that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging). ROSE_DLL_API std::set<SgLocatedNode*> collectModifiedLocatedNodes( SgNode* node ); // DQ (6/5/2019): Use the previously constructed set (above) to reset the IR nodes to be marked as isModified. //! Use the set of IR nodes and set the isModified flag in each IR node to true. ROSE_DLL_API void resetModifiedLocatedNodes(const std::set<SgLocatedNode*> & modifiedNodeSet); // DQ (10/23/2018): Report nodes that are marked as modified. ROSE_DLL_API void reportModifiedStatements(const std::string & label, SgNode* node); // DQ (3/22/2019): Translate CPP directives from attached preprocessor information to CPP Directive Declaration IR nodes. ROSE_DLL_API void translateToUseCppDeclarations( SgNode* n ); ROSE_DLL_API void translateScopeToUseCppDeclarations( SgScopeStatement* scope ); ROSE_DLL_API std::vector<SgC_PreprocessorDirectiveStatement*> translateStatementToUseCppDeclarations( SgStatement* statement, SgScopeStatement* scope); ROSE_DLL_API void printOutComments ( SgLocatedNode* locatedNode ); ROSE_DLL_API bool skipTranslateToUseCppDeclaration( PreprocessingInfo* currentPreprocessingInfo ); // DQ (12/2/2019): Debugging support. ROSE_DLL_API void outputFileIds( SgNode* node ); //@} //! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope. /*! This function not only set the defining and nondefining links of the newly introduced * function declaration inside a scope, but also update other same function declarations' links * accordingly if there are any. * Assumption: The function has already inserted/appended/prepended into the scope before calling this function. */ ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope); //------------------------------------------------------------------------ //@{ /*! @name Advanced AST transformations, analyses, and optimizations \brief Some complex but commonly used AST transformations. */ //! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++ ROSE_DLL_API bool collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false); //!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default. ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars, bool coarseGrain=true); //!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default. ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars, bool coarseGrain=true); //!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default. ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols, bool coarseGrain=true); //! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++ ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref); //! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++ ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB); #ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT //!Call liveness analysis on an entire project ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false); //!get liveIn and liveOut variables for a for loop from liveness analysis result liv. ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts); #endif //!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types. ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, OmpSupport::omp_construct_enum> > & results); //! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations! /*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */ ROSE_DLL_API void constantFolding(SgNode* r); //!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements (with duplicated statement s) and return expressions with side effects. Return the number of statements inserted. /*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement. */ ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s); //! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments. ROSE_DLL_API void removeJumpsToNextStatement(SgNode*); //! Remove labels which are not targets of any goto statements: its child statement is also removed by default. ROSE_DLL_API void removeUnusedLabels(SgNode* top, bool keepChild =false); //! Find unused labels which are not targets of any goto statements ROSE_DLL_API std::set<SgLabelStatement*> findUnusedLabels (SgNode* top); //! Remove consecutive labels ROSE_DLL_API void removeConsecutiveLabels(SgNode* top); //! Merge a variable assignment statement into a matching variable declaration statement. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check. /*! * e.g. int i; i=10; becomes int i=10; the original i=10 will be deleted after the merge * if success, return true, otherwise return false (e.g. variable declaration does not match or already has an initializer) * The original assignment stmt will be removed by default * This function is a bit ambiguous about the merge direction, to be phased out. */ ROSE_DLL_API bool mergeDeclarationAndAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt, bool removeAssignStmt = true); //! Merge an assignment into its upstream declaration statement. Callers should make sure the merge is semantically correct. ROSE_DLL_API bool mergeAssignmentWithDeclaration (SgExprStatement* assign_stmt, SgVariableDeclaration* decl, bool removeAssignStmt = true); //! Merge a declaration statement into a matching followed variable assignment. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check. /*! * e.g. int i; i=10; becomes int i=10; the original int i; will be deleted after the merge */ ROSE_DLL_API bool mergeDeclarationWithAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt); //! Split a variable declaration with an rhs assignment into two statements: a declaration and an assignment. /*! Return the generated assignment statement, if any * e.g. int i =10; becomes int i; i=10; * This can be seen as a normalization of declarations */ ROSE_DLL_API SgExprStatement* splitVariableDeclaration (SgVariableDeclaration* decl); //! Split declarations within a scope into declarations and assignment statements, by default only top level declarations are considered. Return the number of declarations split. ROSE_DLL_API int splitVariableDeclaration (SgScopeStatement* scope, bool topLevelOnly = true); //! Replace an expression with a temporary variable and an assignment statement /*! Add a new temporary variable to contain the value of 'from'. Change reference to 'from' to use this new variable. Assumptions: (1)'from' is not within the test of a loop or 'if'; (2)not currently traversing 'from' or the statement it is in. Return value: the new temp variable declaration's assign initializer containing the from expression. */ ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = ""); //! Split long expressions into blocks of statements ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr); //! Remove labeled goto statements ROSE_DLL_API void removeLabeledGotos(SgNode* top); //! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label. ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch); //! Check if the body of a 'for' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs); //! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs); //! Check if the body of a 'while' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws); //! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws); //! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws); //! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not. SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs); //! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not. SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs); //! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs); //! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true); //! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not. ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos); //! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt); // DQ (1/18/2015): This is added to support better quality token-based unparsing. //! Remove unused basic block IR nodes added as part of normalization. ROSE_DLL_API void cleanupNontransformedBasicBlockNode(); // DQ (1/18/2015): This is added to support better quality token-based unparsing. //! Record where normalization have been done so that we can preform denormalizations as required for the token-based unparsing to generate minimal diffs. ROSE_DLL_API void recordNormalizations(SgStatement* s); //! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while, //! switch, If, Catch, OmpBodyStmt, etc bool isBodyStatement (SgStatement* s); //! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them. void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true); // The same as changeAllBodiesToBlocks(SgNode* top). Phased out. //void changeAllLoopBodiesToBlocks(SgNode* top); //! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc. SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt); #if 0 /** If s is the body of a loop, catch, or if statement and is already a basic block, * s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent * (a loop, catch, or if statement, etc). */ SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s); #endif //! Get the constant value from a constant integer expression; abort on //! everything else. Note that signed long longs are converted to unsigned. unsigned long long getIntegerConstantValue(SgValueExp* expr); //! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace. std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt ); //! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned. SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp); //! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref. SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL); /// \brief moves the body of a function f to a new function f`; /// f's body is replaced with code that forwards the call to f`. /// \return a pair indicating the statement containing the call of f` /// and an initialized name refering to the temporary variable /// holding the result of f`. In case f returns void /// the initialized name is NULL. /// \param definingDeclaration the defining function declaration of f /// \param newName the name of function f` /// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; } /// for functions returning void and a value, respectively. /// two function declarations are inserted in f's enclosing scope /// \code /// result_type f`(...); <--- (1) /// result_type f (...) { forward call to f` } /// result_type f`(...) { original code } <--- (2) /// \endcode /// Calls to f are not updated, thus in the transformed code all /// calls will continue calling f (this is also true for /// recursive function calls from within the body of f`). /// After the function has created the wrapper, /// definingDeclaration becomes the wrapper function /// The definition of f` is the next entry in the /// statement list; the forward declaration of f` is the previous /// entry in the statement list. /// \pre definingDeclaration must be a defining declaration of a /// free standing function. /// typeid(SgFunctionDeclaration) == typeid(definingDeclaration) /// i.e., this function is NOT implemented for class member functions, /// template functions, procedures, etc. std::pair<SgStatement*, SgInitializedName*> wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName); /// \overload /// \tparam NameGen functor that generates a new name based on the old name. /// interface: SgName nameGen(const SgName&) /// \param nameGen name generator /// \brief see wrapFunction for details template <class NameGen> std::pair<SgStatement*, SgInitializedName*> wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen) { return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name())); } /// \brief convenience function that returns the first initialized name in a /// list of variable declarations. SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl); //@} // DQ (6/7/2012): Unclear where this function should go... bool hasTemplateSyntax( const SgName & name ); #if 0 //------------------------AST dump, stringify----------------------------- //------------------------------------------------------------------------ std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h // do we need these? std::string dump_node(const SgNode* astNode); std::string dump_tree(const SgNode* astNode); // or a friendly version of unparseToString(), as a memeber function std::string SgNode::toString(bool asSubTree=true); // dump node or subtree //----------------------------AST comparison------------------------------ //------------------------------------------------------------------------ // How to get generic functions for comparison? bool isNodeEqual(SgNode* node1, SgNode* node2); //? bool isTreeEqual(SgNode* tree1, SgNode* tree2); //! Are two expressions equal (using a deep comparison)? bool expressionTreeEqual(SgExpression*, SgExpression*); //! Are corresponding expressions in two lists equal (using a deep comparison)? bool expressionTreeEqualStar(const SgExpressionPtrList&, const SgExpressionPtrList&); //----------------------AST verfication/repair---------------------------- //------------------------------------------------------------------------ // sanity check of AST subtree, any suggestions? // TODO verifySgNode(SgNode* node, bool subTree=true); //src/midend/astDiagnostics/AstConsistencyTests.h // AstTests::runAllTests(SgProject * ) //src/midend/astUtil/astInterface/AstInterface.h.C //FixSgProject(SgProject &project) //FixSgTree(SgNode* r) //src/frontend/SageIII/astPostProcessing //AstPostProcessing(SgNode * node) //--------------------------AST modification------------------------------ //------------------------------------------------------------------------ // any operations changing AST tree, including // insert, copy, delete(remove), replace // insert before or after some point, argument list is consistent with LowLevelRewrite void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true); // previous examples //void myStatementInsert(SgStatement* target,...) // void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock) // copy // copy children of one basic block to another basic block //void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b); void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst); // delete (remove) a node or a whole subtree void removeSgNode(SgNode* targetNode); // need this? void removeSgNodeTree(SgNode* subtree); // need this? void removeStatement( SgStatement* targetStmt); //Move = delete + insert void moveAst (SgNode* src, SgNode* target); // need this? // similar to void moveStatements (SgBasicBlock* src, SgBasicBlock* target); // replace= delete old + insert new (via building or copying) // DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE. // void replaceAst(SgNode* oldNode, SgNode* newNode); //void replaceChild(SgNode* parent, SgNode* from, SgNode* to); //bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n) //--------------------------AST transformations--------------------------- //------------------------------------------------------------------------ // Advanced AST modifications through basic AST modifications // Might not be included in AST utitlity list, but listed here for the record. // extract statements/content from a scope void flattenBlocks(SgNode* n); //src/midend/astInlining/inlinerSupport.h void renameVariables(SgNode* n); void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition); void simpleCopyAndConstantPropagation(SgNode* top); void changeAllMembersToPublic(SgNode* n); void removeVariableDeclaration(SgInitializedName* initname); //! Convert something like "int a = foo();" into "int a; a = foo();" SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init); //! Rewrites a while or for loop so that the official test is changed to //! "true" and what had previously been the test is now an if-break //! combination (with an inverted condition) at the beginning of the loop //! body void pushTestIntoBody(LoopStatement* loopStmt); //programTransformation/finiteDifferencing/finiteDifferencing.h //! Move variables declared in a for statement to just outside that statement. void moveForDeclaredVariables(SgNode* root); //------------------------ Is/Has functions ------------------------------ //------------------------------------------------------------------------ // misc. boolean functions // some of them could moved to SgXXX class as a member function bool isOverloaded (SgFunctionDeclaration * functionDeclaration); bool isSwitchCond (const SgStatement* s); bool isIfCond (const SgStatement* s); bool isWhileCond (const SgStatement* s); bool isStdNamespace (const SgScopeStatement* scope); bool isTemplateInst (const SgDeclarationStatement* decl); bool isCtor (const SgFunctionDeclaration* func); bool isDtor (const SgFunctionDeclaration* func); // src/midend/astInlining/typeTraits.h bool hasTrivialDestructor(SgType* t); ROSE_DLL_API bool isNonconstReference(SgType* t); ROSE_DLL_API bool isReferenceType(SgType* t); // generic ones, or move to the SgXXX class as a member function bool isConst(SgNode* node); // const type, variable, function, etc. // .... and more bool isConstType (const SgType* type); bool isConstFunction (const SgFunctionDeclaration* decl); bool isMemberVariable(const SgInitializedName & var); //bool isMemberVariable(const SgNode& in); bool isPrototypeInScope (SgScopeStatement * scope, SgFunctionDeclaration * functionDeclaration, SgDeclarationStatement * startingAtDeclaration); bool MayRedefined(SgExpression* expr, SgNode* root); // bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h bool hasAddressTaken(SgExpression* expr, SgNode* root); //src/midend/astInlining/inlinerSupport.C // can also classified as topdown search bool containsVariableReference(SgNode* root, SgInitializedName* var); bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var); bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc, SgInitializedName* toCheck, SgInitializedName* lifetime) //src/midend/programTransformation/partialRedundancyElimination/pre.h bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n); //------------------------ loop handling --------------------------------- //------------------------------------------------------------------------ //get and set loop control expressions // 0: init expr, 1: condition expr, 2: stride expr SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt ); int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp); bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref); SgInitializedName * getLoopIndexVar(SgForStatement* forstmt); //------------------------expressions------------------------------------- //------------------------------------------------------------------------ //src/midend/programTransformation/partialRedundancyElimination/pre.h int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root); //src/midend/astInlining/replaceExpressionWithStatement.h void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to); void replaceSubexpressionWithStatement(SgExpression* from, StatementGenerator* to); SgExpression* getRootOfExpression(SgExpression* n); //--------------------------preprocessing info. ------------------------- //------------------------------------------------------------------------ //! Removes all preprocessing information at a given position. void cutPreprocInfo (SgBasicBlock* b, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf); //! Pastes preprocessing information at the front of a statement. void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf, SgStatement* s); //! Pastes preprocessing information at the back of a statement. void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf, SgStatement* s); /*! * \brief Moves 'before' preprocessing information. * Moves all preprocessing information attached 'before' the source * statement to the front of the destination statement. */ // a generic one for all /// void movePreprocessingInfo(src, dest, RelativePositionType); void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest); void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest); void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest); //--------------------------------operator-------------------------------- //------------------------------------------------------------------------ from transformationSupport.h, not sure if they should be included here /* return enum code for SAGE operators */ operatorCodeType classifyOverloadedOperator(); // transformationSupport.h /*! \brief generates a source code string from operator name. This function returns a string representing the elementwise operator (for primative types) that would be match that associated with the overloaded operator for a user-defined abstractions (e.g. identifyOperator("operator+()") returns "+"). */ std::string stringifyOperator (std::string name); //--------------------------------macro ---------------------------------- //------------------------------------------------------------------------ std::string buildMacro ( std::string s ); //transformationSupport.h //--------------------------------access functions--------------------------- //----------------------------------get/set sth.----------------------------- // several categories: * get/set a direct child/grandchild node or fields * get/set a property flag value * get a descendent child node using preorder searching * get an ancestor node using bottomup/reverse searching // SgName or string? std::string getFunctionName (SgFunctionCallExp* functionCallExp); std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression ); // do we need them anymore? or existing member functions are enought? // a generic one: std::string get_name (const SgNode* node); std::string get_name (const SgDeclarationStatement * declaration); // get/set some property: should moved to SgXXX as an inherent memeber function? // access modifier void setExtern (SgFunctionDeclartion*) void clearExtern() // similarly for other declarations and other properties void setExtern (SgVariableDeclaration*) void setPublic() void setPrivate() #endif // DQ (1/23/2013): Added support for generated a set of source sequence entries. std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode ); //--------------------------------Type Traits (C++)--------------------------- bool HasNoThrowAssign(const SgType * const inputType); bool HasNoThrowCopy(const SgType * const inputType); bool HasNoThrowConstructor(const SgType * const inputType); bool HasTrivialAssign(const SgType * const inputType); bool HasTrivialCopy(const SgType * const inputType); bool HasTrivialConstructor(const SgType * const inputType); bool HasTrivialDestructor(const SgType * const inputType); bool HasVirtualDestructor(const SgType * const inputType); bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType); bool IsAbstract(const SgType * const inputType); bool IsClass(const SgType * const inputType); bool IsEmpty(const SgType * const inputType); bool IsEnum(const SgType * const inputType); bool IsPod(const SgType * const inputType); bool IsPolymorphic(const SgType * const inputType); bool IsStandardLayout(const SgType * const inputType); bool IsLiteralType(const SgType * const inputType); bool IsTrivial(const SgType * const inputType); bool IsUnion(const SgType * const inputType); SgType * UnderlyingType(SgType *type); // DQ (3/2/2014): Added a new interface function (used in the snippet insertion support). // void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList ); // DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators. bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 ); // JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface /*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */ struct const_int_expr_t { size_t value_; bool hasValue_; }; /*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */ struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr); // JP (9/17/14): Added function to test whether two SgType* are equivalent or not bool checkTypesAreEqual(SgType *typeA, SgType *typeB); //--------------------------------Java interface functions --------------------- #ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT ROSE_DLL_API std::string getTempDirectory(SgProject *project); ROSE_DLL_API void destroyTempDirectory(std::string); ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false); ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string); ROSE_DLL_API std::string preprocessImport(SgProject *, std::string); ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true); ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string); ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false); ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string); ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string); ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *); ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *); ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *); #endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT // DQ (8/31/2016): Making this a template function so that we can have it work with user defined filters. //! This function detects template instantiations that are relevant when filters are used. /*! EDG normalizes some in-class template functions and member functions to be redefined outside of a class. this causes the associated template instantiations to be declared outside of the class, and to be marked as compiler generated (since the compiler generated form outside of the class declaration). ROSE captures the function definitions, but in the new location (defined outside of the class declaration). This can confuse some simple tests for template instantiations that are a part of definitions in a file, thus we have this function to detect this specific normalization. */ template < class T > bool isTemplateInstantiationFromTemplateDeclarationSatisfyingFilter (SgFunctionDeclaration* function, T* filter ) { // DQ (9/1/2016): This function is called in the Call graph generation to avoid filtering out EDG normalized // function template instnatiations (which come from normalized template functions and member functions). // Note that because of the EDG normailzation the membr function is moved outside of the class, and // thus marked as compiler generated. However the template instantiations are always marked as compiler // generated (if not specializations) and so we want to include a template instantiation that is marked // as compiler generated, but is from a template declaration that satisfyied a specific user defined filter. // The complexity of this detection is isolated here, but knowing that it must be called is more complex. // This function is call in the CG.C file of tests/nonsmoke/functional/roseTests/programAnalysisTests/testCallGraphAnalysis. bool retval = false; #define DEBUG_TEMPLATE_NORMALIZATION_DETECTION 0 #if DEBUG_TEMPLATE_NORMALIZATION_DETECTION printf ("In isNormalizedTemplateInstantiation(): function = %p = %s = %s \n",function,function->class_name().c_str(),function->get_name().str()); #endif // Test for this to be a template instantation (in which case it was marked as // compiler generated but we may want to allow it to be used in the call graph, // if it's template was a part was defined in the current directory). SgTemplateInstantiationFunctionDecl* templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(function); SgTemplateInstantiationMemberFunctionDecl* templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(function); if (templateInstantiationFunction != NULL) { // When the defining function has been normalized by EDG, only the non-defining declaration will have a source position. templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(templateInstantiationFunction->get_firstNondefiningDeclaration()); SgTemplateFunctionDeclaration* templateFunctionDeclaration = templateInstantiationFunction->get_templateDeclaration(); if (templateFunctionDeclaration != NULL) { retval = filter->operator()(templateFunctionDeclaration); } else { // Assume false. } #if DEBUG_TEMPLATE_NORMALIZATION_DETECTION printf (" --- case of templateInstantiationFunction: retval = %s \n",retval ? "true" : "false"); #endif } else { if (templateInstantiationMemberFunction != NULL) { // When the defining function has been normalized by EDG, only the non-defining declaration will have a source position. templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(templateInstantiationMemberFunction->get_firstNondefiningDeclaration()); SgTemplateMemberFunctionDeclaration* templateMemberFunctionDeclaration = templateInstantiationMemberFunction->get_templateDeclaration(); if (templateMemberFunctionDeclaration != NULL) { retval = filter->operator()(templateMemberFunctionDeclaration); } else { // Assume false. } #if DEBUG_TEMPLATE_NORMALIZATION_DETECTION printf (" --- case of templateInstantiationMemberFunction: retval = %s \n",retval ? "true" : "false"); #endif } } return retval; } void detectCycleInType(SgType * type, const std::string & from); // DQ (7/14/2020): Debugging support. void checkForInitializers( SgNode* node ); }// end of namespace #endif
bug_proxy_task_dep_waiting.c
// RUN: %libomp-compile-and-run // The runtime currently does not get dependency information from GCC. // UNSUPPORTED: gcc, icc-16 // REQUIRES: !abt // Very flaky on openmp-clang-x86_64-linux-debian. // https://bugs.llvm.org/show_bug.cgi?id=45397 // UNSUPPORTED: linux #include <stdio.h> #include <omp.h> #include <pthread.h> #include "omp_my_sleep.h" /* An explicit task can have a dependency on a target task. If it is not directly satisfied, the runtime should not wait but resume execution. */ // Compiler-generated code (emulation) typedef long kmp_intptr_t; typedef int kmp_int32; typedef char bool; typedef struct ident { kmp_int32 reserved_1; /**< might be used in Fortran; see above */ kmp_int32 flags; /**< also f.flags; KMP_IDENT_xxx flags; KMP_IDENT_KMPC identifies this union member */ kmp_int32 reserved_2; /**< not really used in Fortran any more; see above */ #if USE_ITT_BUILD /* but currently used for storing region-specific ITT */ /* contextual information. */ #endif /* USE_ITT_BUILD */ kmp_int32 reserved_3; /**< source[4] in Fortran, do not use for C++ */ char const *psource; /**< String describing the source location. The string is composed of semi-colon separated fields which describe the source file, the function and a pair of line numbers that delimit the construct. */ } ident_t; typedef struct kmp_depend_info { kmp_intptr_t base_addr; size_t len; struct { bool in:1; bool out:1; } flags; } kmp_depend_info_t; struct kmp_task; typedef kmp_int32 (* kmp_routine_entry_t)( kmp_int32, struct kmp_task * ); typedef struct kmp_task { /* GEH: Shouldn't this be aligned somehow? */ void * shareds; /**< pointer to block of pointers to shared vars */ kmp_routine_entry_t routine; /**< pointer to routine to call for executing task */ kmp_int32 part_id; /**< part id for the task */ } kmp_task_t; #ifdef __cplusplus extern "C" { #endif kmp_int32 __kmpc_global_thread_num ( ident_t * ); kmp_task_t* __kmpc_omp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, kmp_routine_entry_t task_entry ); void __kmpc_proxy_task_completed_ooo ( kmp_task_t *ptask ); kmp_int32 __kmpc_omp_task_with_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list ); kmp_int32 __kmpc_omp_task( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task ); #ifdef __cplusplus } #endif void *target(void *task) { my_sleep( 0.1 ); __kmpc_proxy_task_completed_ooo((kmp_task_t*) task); return NULL; } pthread_t target_thread; // User's code int task_entry(kmp_int32 gtid, kmp_task_t *task) { pthread_create(&target_thread, NULL, &target, task); return 0; } int main() { int dep; /* * Corresponds to: #pragma omp target nowait depend(out: dep) { my_sleep( 0.1 ); } */ kmp_depend_info_t dep_info; dep_info.base_addr = (long) &dep; dep_info.len = sizeof(int); // out = inout per spec and runtime expects this dep_info.flags.in = 1; dep_info.flags.out = 1; kmp_int32 gtid = __kmpc_global_thread_num(NULL); kmp_task_t *proxy_task = __kmpc_omp_task_alloc(NULL,gtid,17,sizeof(kmp_task_t),0,&task_entry); __kmpc_omp_task_with_deps(NULL,gtid,proxy_task,1,&dep_info,0,NULL); int first_task_finished = 0; #pragma omp task shared(first_task_finished) depend(inout: dep) { first_task_finished = 1; } int second_task_finished = 0; #pragma omp task shared(second_task_finished) depend(in: dep) { second_task_finished = 1; } // check that execution has been resumed and the runtime has not waited // for the dependencies to be satisfied. int error = (first_task_finished == 1); error += (second_task_finished == 1); #pragma omp taskwait // by now all tasks should have finished error += (first_task_finished != 1); error += (second_task_finished != 1); return error; }
GB_resize.c
//------------------------------------------------------------------------------ // GB_resize: change the size of a matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #include "GB_select.h" #define GB_FREE_ALL \ { \ GB_FREE (&Ax_new, Ax_new_size) ; \ GB_FREE (&Ab_new, Ab_new_size) ; \ GB_phbix_free (A) ; \ } //------------------------------------------------------------------------------ // GB_resize: resize a GrB_Matrix //------------------------------------------------------------------------------ GrB_Info GB_resize // change the size of a matrix ( GrB_Matrix A, // matrix to modify const GrB_Index nrows_new, // new number of rows in matrix const GrB_Index ncols_new, // new number of columns in matrix GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; GB_void *restrict Ax_new = NULL ; size_t Ax_new_size = 0 ; int8_t *restrict Ab_new = NULL ; size_t Ab_new_size = 0 ; ASSERT_MATRIX_OK (A, "A to resize", GB0) ; //-------------------------------------------------------------------------- // handle the CSR/CSC format //-------------------------------------------------------------------------- int64_t vdim_old = A->vdim ; int64_t vlen_old = A->vlen ; int64_t vlen_new, vdim_new ; if (A->is_csc) { vlen_new = nrows_new ; vdim_new = ncols_new ; } else { vlen_new = ncols_new ; vdim_new = nrows_new ; } if (vdim_new == vdim_old && vlen_new == vlen_old) { // nothing to do return (GrB_SUCCESS) ; } //-------------------------------------------------------------------------- // delete any lingering zombies and assemble any pending tuples //-------------------------------------------------------------------------- // only do so if either dimension is shrinking, or if pending tuples exist // and vdim_old <= 1 and vdim_new > 1, since in that case, Pending->j has // not been allocated yet, but would be required in the resized matrix. // If A is jumbled, it must be sorted. if (vdim_new < vdim_old || vlen_new < vlen_old || A->jumbled || (GB_PENDING (A) && vdim_old <= 1 && vdim_new > 1)) { GB_MATRIX_WAIT (A) ; ASSERT_MATRIX_OK (A, "A to resize, wait", GB0) ; } ASSERT (!GB_JUMBLED (A)) ; //-------------------------------------------------------------------------- // resize the matrix //-------------------------------------------------------------------------- bool A_is_bitmap = GB_IS_BITMAP (A) ; bool A_is_full = GB_IS_FULL (A) ; bool A_is_shrinking = (vdim_new <= vdim_old && vlen_new <= vlen_old) ; if ((A_is_full || A_is_bitmap) && A_is_shrinking) { //---------------------------------------------------------------------- // A is full or bitmap //---------------------------------------------------------------------- // get the old and new dimensions int64_t anz_old = vlen_old * vdim_old ; int64_t anz_new = vlen_new * vdim_new ; size_t nzmax_new = GB_IMAX (anz_new, 1) ; size_t nzmax_old = A->nzmax ; bool in_place = A_is_full && (vlen_new == vlen_old || vdim_new <= 1) ; size_t asize = A->type->size ; //---------------------------------------------------------------------- // allocate or reallocate A->x and A->b //---------------------------------------------------------------------- bool ok = true ; if (in_place) { // reallocate A->x in-place; no data movement needed GB_REALLOC (A->x, nzmax_new*asize, nzmax_old*asize, GB_void, &(A->x_size), &ok, Context) ; } else { // allocate new space for A->x Ax_new = GB_MALLOC (nzmax_new*asize, GB_void, &Ax_new_size) ; ok = (Ax_new != NULL) ; if (A_is_bitmap) { // allocate new space for A->b Ab_new = GB_MALLOC (nzmax_new*asize, int8_t, &Ab_new_size) ; ok = ok && (Ab_new != NULL) ; } } if (!ok) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //---------------------------------------------------------------------- // move data if not in-place //---------------------------------------------------------------------- if (!in_place) { //------------------------------------------------------------------ // determine number of threads to use //------------------------------------------------------------------ GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (anz_new, chunk, nthreads_max) ; //------------------------------------------------------------------ // resize Ax //------------------------------------------------------------------ GB_void *restrict Ax_old = (GB_void *) A->x ; int64_t j ; if (vdim_new <= 4*nthreads) { // use all threads for each vector for (j = 0 ; j < vdim_new ; j++) { GB_void *pdest = Ax_new + j * vlen_new * asize ; GB_void *psrc = Ax_old + j * vlen_old * asize ; GB_memcpy (pdest, psrc, vlen_new * asize, nthreads) ; } } else { // use a single thread for each vector #pragma omp parallel for num_threads(nthreads) schedule(static) for (j = 0 ; j < vdim_new ; j++) { GB_void *pdest = Ax_new + j * vlen_new * asize ; GB_void *psrc = Ax_old + j * vlen_old * asize ; memcpy (pdest, psrc, vlen_new * asize) ; } } GB_FREE (&Ax_old, A->x_size) ; A->x = Ax_new ; A->x_size = Ax_new_size ; //------------------------------------------------------------------ // resize Ab if A is bitmap, and count the # of entries //------------------------------------------------------------------ if (A_is_bitmap) { int8_t *restrict Ab_old = A->b ; int64_t pnew ; int64_t anvals = 0 ; #pragma omp parallel for num_threads(nthreads) \ schedule(static) reduction(+:anvals) for (pnew = 0 ; pnew < anz_new ; pnew++) { int64_t i = pnew % vlen_new ; int64_t j = pnew / vlen_new ; int64_t pold = i + j * vlen_old ; int8_t ab = Ab_old [pold] ; Ab_new [pnew] = ab ; anvals += ab ; } A->nvals = anvals ; GB_FREE (&Ab_old, A->b_size) ; A->b = Ab_new ; A->b_size = Ab_new_size ; } } //---------------------------------------------------------------------- // adjust dimensions and return result //---------------------------------------------------------------------- A->vdim = vdim_new ; A->vlen = vlen_new ; A->nzmax = nzmax_new ; A->nvec = vdim_new ; A->nvec_nonempty = (vlen_new == 0) ? 0 : vdim_new ; ASSERT_MATRIX_OK (A, "A bitmap/full shrunk", GB0) ; return (GrB_SUCCESS) ; } else { //---------------------------------------------------------------------- // convert A to hypersparse and resize it //---------------------------------------------------------------------- // convert to hypersparse GB_OK (GB_convert_any_to_hyper (A, Context)) ; ASSERT (GB_IS_HYPERSPARSE (A)) ; // resize the number of sparse vectors int64_t *restrict Ah = A->h ; int64_t *restrict Ap = A->p ; A->vdim = vdim_new ; if (vdim_new < A->plen) { // reduce the size of A->p and A->h; this cannot fail info = GB_hyper_realloc (A, vdim_new, Context) ; ASSERT (info == GrB_SUCCESS) ; Ap = A->p ; Ah = A->h ; } if (vdim_new < vdim_old) { // descrease A->nvec to delete the vectors outside the range // 0...vdim_new-1. int64_t pleft = 0 ; int64_t pright = GB_IMIN (A->nvec, vdim_new) - 1 ; bool found ; GB_SPLIT_BINARY_SEARCH (vdim_new, Ah, pleft, pright, found) ; A->nvec = pleft ; } if (vdim_new < vdim_old) { // number of vectors is decreasing, need to count the new number of // non-empty vectors: done during pruning or by selector, below. A->nvec_nonempty = -1 ; // recomputed just below } //---------------------------------------------------------------------- // resize the length of each vector //---------------------------------------------------------------------- // if vlen is shrinking, delete entries outside the new matrix if (vlen_new < vlen_old) { GB_OK (GB_selector (NULL /* A in-place */, GB_RESIZE_opcode, NULL, false, A, vlen_new-1, NULL, Context)) ; } //---------------------------------------------------------------------- // vlen has been resized //---------------------------------------------------------------------- A->vlen = vlen_new ; ASSERT_MATRIX_OK (A, "A vlen resized", GB0) ; //---------------------------------------------------------------------- // conform the matrix to its desired sparsity structure //---------------------------------------------------------------------- return (GB_conform (A, Context)) ; } }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 4; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
specden.c
/************************************************************************ * This program takes time-data and calculates the * powerspectrum/fourier transform of the autocorrelation function. * This is a re-write of the fourier.x code written by Volker Kleinschmidt * and Harald Forbert as a tcl plugin for VMD by Axel Kohlmeyer. * (c) 2002-2005 Harald Forbert, Volker Kleinschmidt (c) 2002-2008 Axel Kohlmeyer. * * usage: calc_specden(<ndat>,<input>,<output>,<deltat>,<maxfreq>,<temp>,<specr>); * <ndat> number of data sets. * <input> time series data. * <output> power spectrum. * <normtype> normalization correction type (fourier, classic, kubo, harmonic, schofield) * <deltat> time difference between data sets (in atomic units). * <maxfreq> max fequency (in wavenumbers). * <temp> temperature (in kelvin) * <specr> resolution of spectrum (1 gives maximal resolution and noise). * * the various corrections are: * fourier: is the plain power spectrum of the input data (normalized to * unity in the output frequency range. * classical: is the power spectrum with a prefactor of * \omega ( 1 - \exp(-\beta \hbar \omega) ) * corresponding to the classical/Gordon limit. * kubo: is the power spectrum with a prefactor of * \omega \tanh(\beta \hbar \omega/2) * corresponding to the Kubo correction * harmonic: is the power spectrum with a prefactor of * \omega \beta \hbar \omega * corresponding to the high temperature / harmonic limit * NOTE: this is the _recommended_ correction factor. * schofield: is the power spectrum with a prefactor of * \omega ( 1 - \exp(-\beta \hbar \omega) ) * * \exp(\beta \hbar \omega /2) * corresponding to Schofield's correction * * All spectra with their corresponding prefactor are separately normalized * in the output range to sum up to unity. * * Note: the index of refraction of the medium is set to unity. *************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include "specden.h" typedef union {double re; double im;} cmplx; /* helper function: * * calculate f_sum = cos_sum**2 + sin_sum**2, * with cos_sum = sum_j cos(j*w) input_j * and sin_sum = sum_j sin(j*w) input_j * * sums start at 0, but indices of input start at 1, e.g. * cos_sum = sum_{j=1}^n cos((j-1)*w) input_j */ static cmplx fourier_sum (const int n, const double *input, const double omega) { int k; double lambda, duk, uk, cs, ss, cf, sf; cmplx result; /* in order to be able to sum up the input_j in ascending order * use above algorithm with inverse data ordering and correct * omega -> - omega and the new origin at the end */ uk = 0.0; duk = 0.0; if (cos(omega) > 0.0) { lambda = -4.0*sin(0.5*omega)*sin(0.5*omega); for (k=1; k <= n; ++k) { uk = uk + duk; duk = lambda*uk + duk + input[3*k]; } } else { /* cos(omega) <= 0.0_dbl */ lambda = 4.0*cos(0.5*omega)*cos(0.5*omega); for (k=1; k <= n; ++k) { uk = duk - uk; duk = lambda*uk - duk + input[3*k]; } } cs = duk - 0.5 * lambda * uk; ss = uk * sin(omega); /* now correct for ordering: */ cf = cos(omega*(n-1)); sf = sin(omega*(n-1)); result.re = cf*cs+sf*ss; /* cos_sum */ result.im = sf*cs-cf*ss; /* sin_sum */ return result; /* return cf*cs*cf*cs+sf*ss*sf*ss+sf*cs*sf*cs+cf*ss*cf*ss; */ } /* main function */ int calc_specden(const int ndat, double *input, double *output, const int normtype, const int specr, const double maxfreq, const double deltat, const double temp) { int nn, i, k; double wave_fac, bh, dt, t, c, f, s, e; double *ftrans, *wtrans; double norm_fourier, norm_classic, norm_kubo, norm_harmonic, norm_schofield; wave_fac = 219474.0/deltat; bh = 1.05459e-34/1.38066e-23/2.41889e-17/deltat/temp; if (specr < 1) { fprintf(stderr, "\nspecden spectrum resolution factor must be bigger or equal 1.\n"); return -20; } /* number of frequencies */ nn = (int) ((double)ndat)*maxfreq/wave_fac/(2.0*M_PI); if (nn+1 > ndat) { fprintf(stderr, "Maximum frequency too large\n"); return -40; } nn = nn/specr; ftrans = malloc((nn+2)*sizeof(double)); if (ftrans == NULL) { fprintf(stderr, "Out of memory, while trying to allocate array 'ftrans'.\n"); return -50; } wtrans = malloc((nn+2)*sizeof(double)); if (ftrans == NULL) { fprintf(stderr, "Out of memory, while trying to allocate array 'wtrans'.\n"); return -60; } /* read data and apply windowing function */ #if defined(_OPENMP) #pragma omp parallel for private(i) schedule(static) #endif for (i=1; i<ndat+1; ++i) { double win; win=((double)(2*i-ndat-1))/((double)(ndat+1)); win=1.0-win*win; input[3*i] *=win; input[3*i+1] *=win; input[3*i+2] *=win; } input[3*ndat+3] = 0.0; input[3*ndat+4] = 0.0; input[3*ndat+5] = 0.0; dt = 2.0*specr*M_PI/((ndat+1)*specr); #if defined(_OPENMP) #pragma omp parallel for private(i,k,t,c,f,s,e) schedule(static) #endif for (i=0; i<nn+1; ++i) { cmplx f1,f2,f3; t = 2.0*((double)(i*specr))*M_PI/((double)(ndat+1)); c = 0.0; for (k=0; k < specr; ++k) { /* sum over all three dimensions */ f1 = fourier_sum(ndat,(input+0), t+(double)k*dt); f2 = fourier_sum(ndat,(input+1), t+(double)k*dt); f3 = fourier_sum(ndat,(input+2), t+(double)k*dt); f = f1.re*f1.re; f += f1.im*f1.im; f += f2.re*f2.re; f += f2.im*f2.im; f += f3.re*f3.re; f += f3.im*f3.im; /* input data should have zero mean... */ if (i+k == 0) f=0.0; /* apply cubic spline correction for input data */ s=0.5*(t+k*dt); if (s>0.1) { e=pow(sin(s)/s,4.0); } else { e=pow(1.0-(s*s)/6.0+(s*s*s*s)/120.0,4.0); } e = e*3.0/(1.0+2.0*cos(s)*cos(s)); c = c+e*e*f; } wtrans[1+i] = t+0.5*dt*((double)(specr-1)); ftrans[1+i] = c; } /* compute norm */ norm_fourier=norm_classic=norm_kubo=norm_harmonic=norm_schofield=0.0; for (i=0; i<=nn; ++i) { t = wtrans[1+i]; f = ftrans[1+i]; e = t*(1.0 - exp(-bh*t)); norm_fourier += f; norm_classic += f*e; norm_kubo += f*e/(1.0+exp(-bh*t)); norm_harmonic += f*t*t; norm_schofield += f*e*exp(0.5*bh*t); } norm_fourier = 1.0/norm_fourier; norm_classic = 1.0/norm_classic; norm_kubo = 1.0/norm_kubo; norm_harmonic = 1.0/norm_harmonic; norm_schofield = 1.0/norm_schofield; /* output */ for (i=0; i<=nn; ++i) { t = wtrans[1+i]; f = ftrans[1+i]; e = t*(1.0 - exp(-bh*t)); output[2*i] = wave_fac*t; switch (normtype) { case NORM_FOURIER: output[2*i+1] = norm_fourier*f; break; case NORM_CLASSIC: output[2*i+1] = norm_classic *f*e; break; case NORM_KUBO: output[2*i+1] = norm_kubo*f*e/(1.0+exp(-bh*t)); break; case NORM_HARMONIC: output[2*i+1] = norm_harmonic*f*t*t; break; case NORM_SCHOFIELD: output[2*i+1] = norm_schofield*f*e*exp(0.5*bh*t); break; default: fprintf(stderr, "specden: unknown normalization. %d\n", normtype); return -200; } } return nn; }
mlp.c
#include <stdio.h> #include <stdlib.h> #include "popc.h" #include "activation.h" #include "loss.h" #include "optimization.h" #include "ann.h" #include "cell.h" #include "mlp.h" #include "training.h" mlpSingleton ptr mlpSingletonNew () { static mlpSingleton ptr np = NULL; if (np == NULL) { np = (mlpSingleton ptr) malloc (sizeof (mlpSingleton)); dbcEnsure (np != NULL, "Memory Allocation Error!"); np -> mlpNew = mlpNew; np -> mlpDel = mlpDel; np -> addLayer = mlpAddLayer; np -> train = mlpTrain; np -> trainTuple = mlpTrainTuple; np -> predict = mlpPredict; np -> predictTuple = mlpPredictTuple; np -> setInput = mlpSetInput; np -> setTarget = mlpSetTarget; np -> propagateForward = mlpPropagateForward; np -> propagateBackward = mlpPropagateBackward; } return np; } mlp ptr mlpNew () { mlp ptr nw = (mlp ptr) malloc (sizeof (mlp)); dbcEnsure (nw != NULL, "Memory Allocation Error!"); nw -> annt = anntMultiLayerPerceptron; nw -> layer2d = NULL; nw -> inputLayerCount = 0; nw -> hiddenLayerCount = 0; nw -> outputLayerCount = 0; nw -> totalLayerCount = 0; nw -> onTrainEpochBegin = NULL; nw -> onTrainEpochEnd = NULL; nw -> onTrainTupleBegin = NULL; nw -> onTrainTupleEnd = NULL; return nw; } void mlpDel (mlp ptr nw) { // PENDING! need to clean-up inner objects free (nw); nw = NULL; } void mlpAddInputLayer () { } void mlpAddHiddenLayer () { } void mlpAddOutputLayer () { } void mlpAddLayer ( mlp ptr nw, int cellCount, layerType lt, cellType ct, activationFunctionType aft, lossFunctionType lft, optimizationFunctionType oft ) { /* subject mlpLayer { // header layerIndex index; layerCellCount cellCount; layerType lt; cellType ct; layerError error; layerDeltaError deltaErrorGradient; activationFunctionType aft; lossFunctionType lft; optimizationFunctionType oft; double learningRate; double learningMomentumRate; // /header // cell vector cellConnectionCount ptr connectionCount1d; cellInput ptr2d input2d; cellWeight ptr2d weight2d; cellOutput ptr output1d; cellTarget ptr target1d; cellError ptr outputError1d; cellDeltaError ptr outputDeltaErrorGradient1d; cellBias ptr bias1d; cellDeltaBias ptr biasDeltaGradient1d; activation ptr2d actFn2d; loss ptr2d lossFn2d; optimization ptr2d optFn2d; // /cell vector } mlpLayer; */ //mlpLayer ptr currentLayer; mlpLayerAppend (nw, cellCount, lt, ct, aft, lft, oft); } // Propagation/ void mlpPropagateForward (mlp ptr nw) { mlpLayer ptr previousLayer, ptr currentLayer //ptr nextLayer ; /* printf ("SL\t\ II:0\tII:1\tIO:0\tIO:1\t\ HW:00\tHW:01\tHW:10\tHW:11\tHO:0\tHO:1\t\ OW:00\tOW:01\tOO:0\n" ); int sl = 0; */ //#pragma omp parallel for for (int index = 1; index < nw -> totalLayerCount; index ++) { previousLayer = index == 0 ? NULL : nw -> layer2d [index - 1]; currentLayer = nw -> layer2d [index]; //nextLayer = index == nw -> totalLayerCount - 1 ? NULL : nw -> layer2d [index + 1]; mlpLayerPropagateForward (previousLayer, currentLayer); } /* printf ("%d\t\ %lf\t%lf\t%lf\t%lf\t\ %lf\t%lf\t%lf\t%lf\t%lf\t%lf\t\ %lf\t%lf\t%lf\n", sl ++, nw -> layer2d [0] -> input2d [0][0], nw -> layer2d [0] -> input2d [0][1], nw -> layer2d [0] -> output1d [0], nw -> layer2d [0] -> output1d [1], nw -> layer2d [1] -> weight2d [0][0], nw -> layer2d [1] -> weight2d [0][1], nw -> layer2d [1] -> weight2d [1][0], nw -> layer2d [1] -> weight2d [1][1], nw -> layer2d [1] -> output1d [0], nw -> layer2d [1] -> output1d [1], nw -> layer2d [2] -> weight2d [0][0], nw -> layer2d [2] -> weight2d [0][1], nw -> layer2d [2] -> output1d [0] ); */ } // Back Propagation/ void mlpPropagateBackward (mlp ptr nw, double learningRate, double learningMomentumRate) { mlpLayer ptr previousLayer, ptr currentLayer, ptr nextLayer ; //#pragma omp parallel for for (int index = nw -> totalLayerCount - 1; index > 0; index --) { previousLayer = index == 0 ? NULL : nw -> layer2d [index - 1]; currentLayer = nw -> layer2d [index]; nextLayer = index == nw -> totalLayerCount - 1 ? NULL : nw -> layer2d [index + 1]; mlpLayerPropagateBackward (previousLayer, currentLayer, nextLayer, learningRate, learningMomentumRate); } } void mlpTrain (mlp ptr nn, double ptr1d inputTable1d, int inputTupleMax, int inputColumnMax, double ptr1d targetTable1d, int targetTupleMax, int targetColumnMax, trainingType tt, int epochMax, int batchSize, double learningRate, double learningMomentumRate) { dbcRequire (tt == ttOnlineTraining, "Only Online Training Type is allowed."); dbcRequire (batchSize == 1, "Only batch size of 1 is allowed, due to the Online Training Type implementation."); // reset epoch statistics fn ()/ nn -> epochTally = 0; for (int epoch = 0; epoch < epochMax; epoch ++) { if (nn -> onTrainEpochBegin != NULL && nn -> onTrainEpochBegin (nn, inputTable1d, inputTupleMax, inputColumnMax, targetTable1d, targetTupleMax, targetColumnMax, epoch) == /* should we skip? */ true) continue ; // reset table statistics fn ()/ nn -> tupleTally = 0; //#pragma omp parallel for for (int r = 0; r < inputTupleMax; r ++) { //mlp ptr nw, double ptr inputTuple1d, int inputColumnMax, double ptr targetTuple1d, int targetColumnMax if (nn -> onTrainTupleBegin != NULL && nn -> onTrainTupleBegin (nn, addr inputTable1d [r * inputColumnMax], inputColumnMax, addr targetTable1d[r * targetColumnMax], targetColumnMax) == /* should we skip? */ true) continue ; mlpTrainTuple (nn, tt, addr inputTable1d [r * inputColumnMax], /*(double ptr)*/ addr targetTable1d [r * targetColumnMax], learningRate, learningMomentumRate); // update table statistics fn ()/ nn -> tupleTally ++; if (nn -> onTrainTupleEnd != NULL) nn -> onTrainTupleEnd (nn, addr inputTable1d [r * inputColumnMax], inputColumnMax, addr targetTable1d[r * targetColumnMax], targetColumnMax) ; } // update epoch statistics fn ()/ nn -> epochTally ++; if (nn -> onTrainEpochEnd != NULL) nn -> onTrainEpochEnd (nn, inputTable1d, inputTupleMax, inputColumnMax, targetTable1d, targetTupleMax, targetColumnMax, epoch) ; } } void mlpTrainTuple (mlp ptr nw, trainingType tt, double ptr input1d, double ptr target1d, double learningRate, double learningMomentumRate) { dbcRequire (tt == ttOnlineTraining, "Given Training Type not implemented yet."); mlpSetInput (nw, input1d); mlpSetTarget (nw, target1d); mlpPropagateForward (nw); mlpPropagateBackward (nw, learningRate, learningMomentumRate); } void mlpPredict (mlp ptr nn, double ptr2d input2d) { int inputEndTuple = sizeof (input2d); //#pragma omp parallel for for (int r = 0; r < inputEndTuple; r ++) { mlpPredictTuple (nn, input2d [r]); //printf ("%f XOR %f = %f. Predicted: %f\n", dataIn [r][0], dataIn [r][1], dataOut [r][0], ((layer ptr) nw -> layerLinkedList -> tail -> obj) -> neuron1d [0] -> axon -> output ); //printf ("%f XOR %f = %f. Target: %f; Predicted: %f.\n", input2d [r][0], input2d [r][1], dataOut [r][0], nw -> layer2d [nw -> totalLayerCount - 1] -> target1d [0], nw -> layer2d [nw -> totalLayerCount - 1] -> output1d [0] ); } } void mlpPredictTuple (mlp ptr nw, double ptr input1d) { mlpSetInput (nw, input1d); mlpPropagateForward (nw); } void mlpSetInput (mlp ptr nw, double ptr input1d) { mlpLayer ptr inputLayer = nw -> layer2d [0]; //#pragma omp parallel for for (int c = 0; c < inputLayer -> cellCount; c ++) { // <output = input> //inputLayer -> input2d [c][0] = input1d [c]; // We could omit this line. Kept for brevity. inputLayer -> output1d [c] = input1d [c]; // </output = input> } } void mlpSetTarget (mlp ptr nw, double ptr target1d) { mlpLayer ptr outputLayer = nw -> layer2d [nw -> totalLayerCount - 1]; //#pragma omp parallel for for (int c = 0; c < outputLayer -> cellCount; c ++) { outputLayer -> target1d [c] = target1d [c]; } }
header.h
/*-------------------------------------------------------------------- c--------------------------------------------------------------------- c c header.h c c--------------------------------------------------------------------- c-------------------------------------------------------------------*/ /*-------------------------------------------------------------------- c The following include file is generated automatically by the c "setparams" utility. It defines c maxcells: the square root of the maximum number of processors c problem_size: 12, 64, 102, 162 (for class T, A, B, C) c dt_default: default time step for this problem size if no c config file c niter_default: default number of iterations for this problem size --------------------------------------------------------------------*/ #include "npbparams.h" #define AA 0 #define BB 1 #define CC 2 #define BLOCK_SIZE 5 /* COMMON block: global */ static int grid_points[3]; /* grid_ponts(1:3) */ /* COMMON block: constants */ static double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3; static double dx1, dx2, dx3, dx4, dx5; static double dy1, dy2, dy3, dy4, dy5; static double dz1, dz2, dz3, dz4, dz5; static double dssp, dt; static double ce[5][13]; /* ce(5,13) */ static double dxmax, dymax, dzmax; static double xxcon1, xxcon2, xxcon3, xxcon4, xxcon5; static double dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1; static double yycon1, yycon2, yycon3, yycon4, yycon5; static double dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1; static double zzcon1, zzcon2, zzcon3, zzcon4, zzcon5; static double dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1; static double dnxm1, dnym1, dnzm1, c1c2, c1c5, c3c4, c1345; static double conz1, c1, c2, c3, c4, c5, c4dssp, c5dssp, dtdssp; static double dttx1, dttx2, dtty1, dtty2, dttz1, dttz2; static double c2dttx1, c2dtty1, c2dttz1, comz1, comz4, comz5, comz6; static double c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16; #define IMAX PROBLEM_SIZE #define JMAX PROBLEM_SIZE #define KMAX PROBLEM_SIZE /* c to improve cache performance, grid dimensions padded by 1 c for even number sizes only. */ /* COMMON block: fields */ static double us[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double vs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double ws[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double qs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double rho_i[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double square[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1]; static double forcing[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5+1]; static double u[(IMAX+1)/2*2+1][(JMAX+1)/2*2+1][(KMAX+1)/2*2+1][5]; static double rhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][5]; static double lhs[IMAX/2*2+1][JMAX/2*2+1][KMAX/2*2+1][3][5][5]; /* COMMON block: work_1d */ static double cuf[PROBLEM_SIZE]; static double q[PROBLEM_SIZE]; static double ue[PROBLEM_SIZE][5]; static double buf[PROBLEM_SIZE][5]; #pragma omp threadprivate(cuf, q, ue, buf) /* c to improve cache performance, grid dimensions (first two for these c to arrays) padded by 1 for even number sizes only. */ /* COMMON block: work_lhs */ static double fjac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5]; /* fjac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */ static double njac[IMAX/2*2+1][JMAX/2*2+1][KMAX-1+1][5][5]; /* njac(5, 5, 0:IMAX/2*2, 0:JMAX/2*2, 0:KMAX-1) */ static double tmp1, tmp2, tmp3;
private-clause.c
/* gcc -fopenmp -O2 private-clause.c -o private-clause export OMP_DYNAMIC=FALSE export OMP_NUM_THREADS=2 $ ./private-clause thread 0 suma a[0] / thread 0 suma a[1] / thread 0 suma a[2] / thread 0 suma a[3] / thread 1 suma a[4] / thread 1 suma a[5] / thread 1 suma a[6] / * thread 0 suma= 6 * thread 1 suma= 15 */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main() { int i, n = 7; int a[n], suma = 18; for (i=0; i<n; i++) a[i] = i; printf("-- suma_antes_de_parallel = %i\n", suma); //Utilizar private es un peligro, en el momento en que una variable compartida la vuelvo privada, se inicializan si o si a basura y se pierde su valor anterior #pragma omp parallel private(suma) { // si comentamos la variable suma "la de aqui debajo" vemos como se suma basura suma=0; #pragma omp for for (i=0; i<n; i++){ suma = suma + a[i]; printf("thread %d suma a[%d] / ", omp_get_thread_num(), i); } printf("\n* thread %d suma= %d", omp_get_thread_num(), suma); } printf("\n"); printf("-- suma = %i\n", suma); }
gsrb.ca.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ //#define GSRB_STRIDE2 //#define GSRB_FP //------------------------------------------------------------------------------------------------------------------------------ // This implements a communication avoiding (aggregation) smoother // It assumes... // in-place updates (no ping pong) // stencil radius==1 //------------------------------------------------------------------------------------------------------------------------------ void smooth(level_type * level, int phi_id, int rhs_id, double a, double b){ int box,s; int ghosts = level->box_ghosts; int communicationAvoiding = ghosts > stencil_get_radius(); if(stencil_get_radius()>1){fprintf(stderr,"CA GSRB requires a stencil radius of 1\n");exit(0);} // if communication-avoiding, need updated RHS for stencils in ghost zones if(communicationAvoiding)exchange_boundary(level,rhs_id,STENCIL_SHAPE_BOX); for(s=0;s<2*NUM_SMOOTHS;s+=ghosts){ // there are two sweeps per GSRB smooth exchange_boundary(level,phi_id,communicationAvoiding ? STENCIL_SHAPE_BOX: stencil_get_shape()); apply_BCs(level,phi_id,communicationAvoiding ? STENCIL_SHAPE_BOX: stencil_get_shape()); // now do ghosts communication-avoiding smooths on each box... uint64_t _timeStart = CycleTime(); for(box=0;box<level->num_my_boxes;box++){ int i,j,k,ss; int color000 = (level->my_boxes[box].low.i^level->my_boxes[box].low.j^level->my_boxes[box].low.k)&1; // is element 000 red or black ??? (should only be an issue if box dimension is odd) const int jStride = level->my_boxes[box].jStride; const int kStride = level->my_boxes[box].kStride; const int dim = level->my_boxes[box].dim; const double h2inv = 1.0/(level->h*level->h); const double * __restrict__ phi = level->my_boxes[box].vectors[ phi_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point double * __restrict__ phi_new = level->my_boxes[box].vectors[ phi_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride); const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride); const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride); const double * __restrict__ valid = level->my_boxes[box].vectors[VECTOR_VALID ] + ghosts*(1+jStride+kStride); // cell is inside the domain const double * __restrict__ RedBlack[2] = {level->RedBlack_FP[0] + ghosts*(1+jStride), level->RedBlack_FP[1] + ghosts*(1+jStride)}; int ghostsToOperateOn=ghosts-1; for(ss=s;ss<s+ghosts;ss++,ghostsToOperateOn--){ #if defined(GSRB_FP) #warning GSRB using pre-computed 1.0/0.0 FP array for Red-Black to facilitate vectorization... #pragma omp parallel for private(i,j,k) collapse(2) for(k=0-ghostsToOperateOn;k<dim+ghostsToOperateOn;k++){ for(j=0-ghostsToOperateOn;j<dim+ghostsToOperateOn;j++){ for(i=0-ghostsToOperateOn;i<dim+ghostsToOperateOn;i++){ int EvenOdd = (k^ss^color000)&1; int ij = i + j*jStride; int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(phi); double lambda = Dinv_ijk(); phi_new[ijk] = phi[ijk] + RedBlack[EvenOdd][ij]*lambda*(rhs[ijk]-Ax); // compiler seems to get confused unless there are disjoint read/write pointers }}} #elif defined(GSRB_STRIDE2) #warning GSRB using stride-2 accesses to minimie the number of flop's #pragma omp parallel for private(i,j,k) collapse(2) for(k=0-ghostsToOperateOn;k<dim+ghostsToOperateOn;k++){ for(j=0-ghostsToOperateOn;j<dim+ghostsToOperateOn;j++){ for(i=((j^k^ss^color000)&1)+1-ghosts;i<dim+ghostsToOperateOn;i+=2){ // stride-2 GSRB int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(phi); double lambda = Dinv_ijk(); phi_new[ijk] = phi[ijk] + lambda*(rhs[ijk]-Ax); }}} #else #warning GSRB using if-then-else on loop indices for Red-Black because its easy to read... #pragma omp parallel for private(i,j,k) collapse(2) for(k=0-ghostsToOperateOn;k<dim+ghostsToOperateOn;k++){ for(j=0-ghostsToOperateOn;j<dim+ghostsToOperateOn;j++){ for(i=0-ghostsToOperateOn;i<dim+ghostsToOperateOn;i++){ if((i^j^k^ss^color000^1)&1){ // looks very clean when [0] is i,j,k=0,0,0 int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(phi); double lambda = Dinv_ijk(); phi_new[ijk] = phi[ijk] + lambda*(rhs[ijk]-Ax); }}}} #endif } // ss-loop } // boxes level->cycles.smooth += (uint64_t)(CycleTime()-_timeStart); } // s-loop } //------------------------------------------------------------------------------------------------------------------------------
SynchronizerMPI.h
/* * SynchronizerMPI.h * Cubism * * Copyright 2018 ETH Zurich. All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include <map> #include <vector> #include <cmath> #include <algorithm> #include "mpi.h" #ifdef _OPENMP #include <omp.h> #endif #include "BlockInfo.h" #include "StencilInfo.h" #include "PUPkernelsMPI.h" #include "DependencyCubeMPI.h" class SynchronizerMPI { struct I3 { int ix, iy, iz; I3(int ix, int iy, int iz):ix(ix), iy(iy), iz(iz){} I3(const I3& c): ix(c.ix), iy(c.iy), iz(c.iz){} bool operator<(const I3& a) const { return ix<a.ix || (ix==a.ix && iy<a.iy) || (ix==a.ix && iy==a.iy && iz<a.iz); } }; struct PackInfo { Real * block, * pack; int sx, sy, sz, ex, ey, ez; }; struct SubpackInfo { Real * block, * pack; int sx, sy, sz, ex, ey, ez; int x0, y0, z0, xpacklenght, ypacklenght; }; DependencyCubeMPI<MPI_Request> cube; const int synchID; bool isroot; int send_thickness[3][2], recv_thickness[3][2]; int blockinfo_counter; StencilInfo stencil; std::vector<PackInfo> send_packinfos; std::map<Real *, std::vector<PackInfo> > recv_packinfos; std::map<Real *, std::vector<SubpackInfo> > recv_subpackinfos; std::vector<Real *> all_mallocs; std::vector<BlockInfo> globalinfos; std::map<Region, std::vector<BlockInfo> > region2infos; //?static? MPI_Comm cartcomm; int blocksize[3]; int mypeindex[3], pesize[3], mybpd[3]; int periodic[3]; int neighborsrank[3][3][3]; std::map<I3,int> c2i; struct CommData { Real * faces[3][2], * edges[3][2][2], * corners[2][2][2]; std::set<MPI_Request> pending; } send, recv; bool _face_needed(const int d) const { return periodic[d] || (mypeindex[d] > 0 && mypeindex[d] < pesize[d] - 1); } bool _myself(const int indx[3]) { return (indx[0]+pesize[0]) % pesize[0] == mypeindex[0] && (indx[1]+pesize[1]) % pesize[1] == mypeindex[1] && (indx[2]+pesize[2]) % pesize[2] == mypeindex[2]; } int _rank(const int indx[3]) { int indx_final[3]={indx[0],indx[1],indx[2]}; for(int i=0; i<3; ++i) { if (pesize[i]==1) continue; const int d=indx[i]- mypeindex[i]; indx_final[i]=d-pesize[i]*(int)((double)d/(pesize[i]-1))+mypeindex[i]; } #if !defined(NDEBUG) for(int i=0;i<3;++i) assert(indx_final[i]>=-1+mypeindex[i] && indx_final[i]<2+mypeindex[i]); #endif return neighborsrank[indx_final[2]+1-mypeindex[2]][indx_final[1]+1-mypeindex[1]][indx_final[0]+1-mypeindex[0]]; } template <bool computesubregions> std::map<Real *, std::vector<SubpackInfo> > _setup(CommData& data, const int thickness[3][2], const int blockstart[3], const int blockend[3], const int origin[3], std::vector<PackInfo>& packinfos) { std::map<Real *, std::vector<SubpackInfo> > retval; const int NC = stencil.selcomponents.size(); const int bpd[3] = { mybpd[0], mybpd[1], mybpd[2] }; //faces for(int d=0; d<3; ++d) { const int dim_other1 = (d+1)%3; const int dim_other2 = (d+2)%3; for(int s=0; s<2; ++s) { const int NFACEBLOCK = NC * thickness[d][s] * blocksize[dim_other1] * blocksize[dim_other2]; const int NFACE = NFACEBLOCK * mybpd[dim_other1] * mybpd[dim_other2]; const bool needed = _face_needed(d) || NFACE == 0; data.faces[d][s] = needed ? _myalloc(sizeof(Real)*NFACE, 16) : NULL; if (!needed) continue; int neighbor_index[3]; neighbor_index[d] = (mypeindex[d] + 2*s-1 + pesize[d])%pesize[d]; neighbor_index[dim_other1] = mypeindex[dim_other1]; neighbor_index[dim_other2] = mypeindex[dim_other2]; if (_myself(neighbor_index)) continue; int start[3]; start[d] = (1-s)*blockstart[d] + s*(blockend[d]-thickness[d][s]); start[dim_other1] = 0; start[dim_other2] = 0; int end[3]; end[d] = (1-s)*(blockstart[d] + thickness[d][s]) + s*blockend[d]; end[dim_other1] = blocksize[dim_other1]; end[dim_other2] = blocksize[dim_other2]; const int n1 = bpd[dim_other1]; const int n2 = bpd[dim_other2]; for(int b=0; b<n2; ++b) for(int a=0; a<n1; ++a) { int index[3]; index[d] = s*(bpd[d]-1); index[dim_other1] = a; index[dim_other2] = b; assert(c2i.find(I3(origin[0] + index[0], origin[1] + index[1], origin[2] + index[2]))!=c2i.end()); const int blockid = c2i[I3(origin[0] + index[0], origin[1] + index[1], origin[2] + index[2])]; PackInfo info = {(Real *)globalinfos[blockid].ptrBlock, data.faces[d][s] + NFACEBLOCK*(a + n1*b), start[0], start[1], start[2], end[0], end[1], end[2]}; const bool nonempty = end[0]>start[0] && end[1]>start[1] && end[2]>start[2]; if (nonempty) packinfos.push_back(info); } } } if (!stencil.tensorial) return retval; //edges for(int d=0; d<3; ++d) { const int dim_other1 = (d+1)%3; const int dim_other2 = (d+2)%3; for(int b=0; b<2; ++b) for(int a=0; a<2; ++a) { const int NEDGEBLOCK = NC * blocksize[d] * thickness[dim_other2][b] * thickness[dim_other1][a]; const int NEDGE = NEDGEBLOCK * mybpd[d]; const bool needed = NEDGE > 0; data.edges[d][b][a] = needed ? _myalloc(sizeof(Real)*NEDGE, 16) : NULL; if (!needed) continue; int neighbor_index[3]; neighbor_index[d] = mypeindex[d]; neighbor_index[dim_other1] = (mypeindex[dim_other1] + 2*a-1 + pesize[dim_other1])%pesize[dim_other1]; neighbor_index[dim_other2] = (mypeindex[dim_other2] + 2*b-1 + pesize[dim_other2])%pesize[dim_other2]; if (_myself(neighbor_index)) continue; int start[3]; start[d] = 0; start[dim_other1] = blockstart[dim_other1]*(1-a) + a*(blockend[dim_other1]-thickness[dim_other1][1]); start[dim_other2] = blockstart[dim_other2]*(1-b) + b*(blockend[dim_other2]-thickness[dim_other2][1]); int end[3]; end[d] = blocksize[d]; end[dim_other1] = a*blockend[dim_other1] + (1-a)*(blockstart[dim_other1] + thickness[dim_other1][0]); end[dim_other2] = b*blockend[dim_other2] + (1-b)*(blockstart[dim_other2] + thickness[dim_other2][0]); const int n = bpd[d]; for(int c=0; c<n; ++c) { int index[3]; index[d] = c; index[dim_other1] = a*(bpd[dim_other1]-1); index[dim_other2] = b*(bpd[dim_other2]-1); assert(c2i.find(I3(origin[0] + index[0], origin[1] + index[1], origin[2] + index[2]))!=c2i.end()); const int blockid = c2i[I3(origin[0] + index[0], origin[1] + index[1], origin[2] + index[2])]; PackInfo info = {(Real *)globalinfos[blockid].ptrBlock, data.edges[d][b][a] + NEDGEBLOCK*c, start[0], start[1], start[2], end[0], end[1], end[2]}; const bool nonempty = end[0]>start[0] && end[1]>start[1] && end[2]>start[2]; if (nonempty) packinfos.push_back(info); } } } //new part if (computesubregions) { for(int dface=0; dface<3; ++dface) { const int dim_other1face = (dface+1)%3; const int dim_other2face = (dface+2)%3; for(int s=0; s<2; ++s) { { int neighbor_pe[3]; neighbor_pe[dface] = (mypeindex[dface] + 2*s-1 + pesize[dface])%pesize[dface]; neighbor_pe[dim_other1face] = mypeindex[dim_other1face]; neighbor_pe[dim_other2face] = mypeindex[dim_other2face]; if (_myself(neighbor_pe)) continue; } const int n1 = mybpd[dim_other1face]; const int n2 = mybpd[dim_other2face]; const int NFACEBLOCK = NC * thickness[dface][s] * blocksize[dim_other1face] * blocksize[dim_other2face]; int face_start[3]; face_start[dface] = (1-s)*blockstart[dface] + s*(blockend[dface]-thickness[dface][s]); face_start[dim_other1face] = 0; face_start[dim_other2face] = 0; int face_end[3]; face_end[dface] = (1-s)*(blockstart[dface] + thickness[dface][s]) + s*blockend[dface]; face_end[dim_other1face] = blocksize[dim_other1face]; face_end[dim_other2face] = blocksize[dim_other2face]; assert(NFACEBLOCK == NC*(face_end[0]-face_start[0])*(face_end[1]-face_start[1])*(face_end[2]-face_start[2])); for(int p2=0; p2<n2; ++p2) for(int p1=0; p1<n1; ++p1) //iterate over inner face blocks { int index[3]; index[dface] = s*(mybpd[dface]-1); index[dim_other1face] = p1 ; index[dim_other2face] = p2; assert(c2i.find(I3(origin[0] + index[0], origin[1] + index[1], origin[2] + index[2]))!=c2i.end()); const int blockID = c2i[I3(origin[0] + index[0], origin[1] + index[1], origin[2] + index[2])]; Real * const ptrBlock = (Real*)globalinfos[blockID].ptrBlock; for(int dedge=0; dedge<3; ++dedge) //iterate over edges { const int dim_other1edge = (dedge+1)%3; const int dim_other2edge = (dedge+2)%3; for(int b=0; b<2; ++b) for(int a=0; a<2; ++a) { { int asd[3]; asd[dedge] = 0; asd[dim_other1edge] = a; asd[dim_other2edge] = b; if (dedge==dface || asd[dface] != s) continue; } int start[3]; start[dedge] = 0; start[dim_other1edge] = blockstart[dim_other1edge]*(1-a) + a*(blockend[dim_other1edge]-thickness[dim_other1edge][1]); start[dim_other2edge] = blockstart[dim_other2edge]*(1-b) + b*(blockend[dim_other2edge]-thickness[dim_other2edge][1]); int end[3]; end[dedge] = blocksize[dedge]; end[dim_other1edge] = a*blockend[dim_other1edge] + (1-a)*(blockstart[dim_other1edge] + thickness[dim_other1edge][0]); end[dim_other2edge] = b*blockend[dim_other2edge] + (1-b)*(blockstart[dim_other2edge] + thickness[dim_other2edge][0]); const int vol = std::max(0, end[2]-start[2])*std::max(0, end[1]-start[1])*std::max(0, end[0]-start[0]); if (vol == 0) continue; int xxx[3]; xxx[dedge] = 0; xxx[dim_other1edge] = 2*a-1; xxx[dim_other2edge] = 2*b-1; int neighbor[3]; neighbor[dface] = index[dface]; neighbor[dedge] = index[dedge]; neighbor[3-dface-dedge] = index[3-dface-dedge] + xxx[3-dface-dedge]; if(c2i.find(I3(origin[0] + neighbor[0], origin[1] + neighbor[1], origin[2] + neighbor[2]))==c2i.end()) continue; assert(n1 > neighbor[dim_other1face]); assert(n2 > neighbor[dim_other2face]); assert(0 <= neighbor[dim_other1face]); assert(0 <= neighbor[dim_other2face]); { const int sregion[3] = { start[0] + (index[0] - neighbor[0])*blocksize[0] - face_start[0], start[1] + (index[1] - neighbor[1])*blocksize[1] - face_start[1], start[2] + (index[2] - neighbor[2])*blocksize[2] - face_start[2] }; const int L[3] = { face_end[0] - face_start[0], face_end[1] - face_start[1], face_end[2] - face_start[2] }; //if (isroot) // { // printf("-----EDGE ---------------> index: %d %d %d\n", index[0], index[1], index[2]); // printf("neighbor: %d %d %d\n", neighbor[0], neighbor[1], neighbor[2]); // printf("face: %d %d\n", dface, s); // printf("edge: %d %d %d\n", dedge, a, b); // printf("facestart: %d %d %d\n", face_start[0], face_start[1], face_start[2]); // printf("mystart-end: %d %d %d , %d %d %d\n", start[0], start[1], start[2], end[0], end[1], end[2]); // printf("s: %d %d %d\n",sregion[0], sregion[1], sregion[2]); // printf("L: %d %d %d\n",L[0], L[1], L[2]); // printf("neighbor p1, p2: %d %d\n", neighbor[dim_other1face], neighbor[dim_other2face]); // } assert(sregion[0]>= 0); assert(sregion[1]>= 0); assert(sregion[2]>= 0); assert(sregion[0]< L[0]); assert(sregion[1]< L[1]); assert(sregion[2]< L[2]); Real * src_base = data.faces[dface][s] + NFACEBLOCK*(neighbor[dim_other1face] + n1*neighbor[dim_other2face]); SubpackInfo subinfo = { ptrBlock, src_base, start[0], start[1], start[2], end[0], end[1], end[2], sregion[0], sregion[1], sregion[2], L[0], L[1]}; retval[ptrBlock].push_back(subinfo); } } } //iterate over corners for(int z=0; z<2; ++z) for(int y=0; y<2; ++y) for(int x=0; x<2; ++x) { int xxx[3] = {x,y,z}; if (xxx[dface] != s) continue; const int start[3] = { x*(blockend[0] - thickness[0][1]) + (1-x)*blockstart[0], y*(blockend[1] - thickness[1][1]) + (1-y)*blockstart[1], z*(blockend[2] - thickness[2][1]) + (1-z)*blockstart[2] }; const int end[3] = { x*blockend[0] + (1-x)*(thickness[0][0] + blockstart[0]), y*blockend[1] + (1-y)*(thickness[1][0] + blockstart[1]), z*blockend[2] + (1-z)*(thickness[2][0] + blockstart[2]) }; const int vol = std::max(0, end[2]-start[2])*std::max(0, end[1]-start[1])*std::max(0, end[0]-start[0]); if (vol == 0) continue; int neighbor[3]; neighbor[0] = index[0] + 2*x-1; neighbor[1] = index[1] + 2*y-1; neighbor[2] = index[2] + 2*z-1; neighbor[dface] = index[dface]; if(c2i.find(I3(origin[0] + neighbor[0], origin[1] + neighbor[1], origin[2] + neighbor[2]))==c2i.end()) continue; assert(n1 > neighbor[dim_other1face]); assert(n2 > neighbor[dim_other2face]); assert(0 <= neighbor[dim_other1face]); assert(0 <= neighbor[dim_other2face]); { const int sregion[3] = { start[0] + (index[0] - neighbor[0])*blocksize[0] - face_start[0], start[1] + (index[1] - neighbor[1])*blocksize[1] - face_start[1], start[2] + (index[2] - neighbor[2])*blocksize[2] - face_start[2] }; const int L[3] = { face_end[0] - face_start[0], face_end[1] - face_start[1], face_end[2] - face_start[2] }; assert(c2i.find(I3(origin[0] + neighbor[0], origin[1] + neighbor[1], origin[2] + neighbor[2]))!=c2i.end()); assert(sregion[0]>= 0); assert(sregion[1]>= 0); assert(sregion[2]>= 0); assert(sregion[0]< L[0]); assert(sregion[1]< L[1]); assert(sregion[2]< L[2]); Real * src_base = data.faces[dface][s] + NFACEBLOCK*(neighbor[dim_other1face] + n1*neighbor[dim_other2face]); SubpackInfo subinfo = { ptrBlock, src_base, start[0], start[1], start[2], end[0], end[1], end[2], sregion[0], sregion[1], sregion[2], L[0], L[1]}; retval[ptrBlock].push_back(subinfo); } } } } } for(int d=0; d<3; ++d) { const int dim_other1 = (d+1)%3; const int dim_other2 = (d+2)%3; for(int b=0; b<2; ++b) for(int a=0; a<2; ++a) { { int neighbor_pe[3]; neighbor_pe[d] = mypeindex[d]; neighbor_pe[dim_other1] = (mypeindex[dim_other1] + 2*a-1 + pesize[dim_other1])%pesize[dim_other1]; neighbor_pe[dim_other2] = (mypeindex[dim_other2] + 2*b-1 + pesize[dim_other2])%pesize[dim_other2]; if (_myself(neighbor_pe)) continue; } const int n = bpd[d]; const int NEDGEBLOCK = NC * blocksize[d] * thickness[dim_other2][b] * thickness[dim_other1][a]; int edge_start[3]; edge_start[d] = 0; edge_start[dim_other1] = blockstart[dim_other1]*(1-a) + a*(blockend[dim_other1]-thickness[dim_other1][1]); edge_start[dim_other2] = blockstart[dim_other2]*(1-b) + b*(blockend[dim_other2]-thickness[dim_other2][1]); int edge_end[3]; edge_end[d] = blocksize[d]; edge_end[dim_other1] = a*blockend[dim_other1] + (1-a)*(blockstart[dim_other1] + thickness[dim_other1][0]); edge_end[dim_other2] = b*blockend[dim_other2] + (1-b)*(blockstart[dim_other2] + thickness[dim_other2][0]); assert(NEDGEBLOCK == NC*(edge_end[0]-edge_start[0])*(edge_end[1]-edge_start[1])*(edge_end[2]-edge_start[2])); for(int p1=0; p1<n; ++p1) //iterate over inner edge blocks { int index[3]; index[d] = p1; index[dim_other1] = a*(bpd[dim_other1]-1); index[dim_other2] = b*(bpd[dim_other2]-1); assert(c2i.find(I3(origin[0] + index[0], origin[1] + index[1], origin[2] + index[2]))!=c2i.end()); const int blockID = c2i[I3(origin[0] + index[0], origin[1] + index[1], origin[2] + index[2])]; Real * const ptrBlock = (Real*)globalinfos[blockID].ptrBlock; for(int z=0; z<2; ++z) //iterate over corners for(int y=0; y<2; ++y) for(int x=0; x<2; ++x) { int xxx[3] = {x,y,z}; if (xxx[dim_other1] != a || xxx[dim_other2] != b) continue; const int start[3] = { x*(blockend[0] - thickness[0][1]) + (1-x)*blockstart[0], y*(blockend[1] - thickness[1][1]) + (1-y)*blockstart[1], z*(blockend[2] - thickness[2][1]) + (1-z)*blockstart[2] }; const int end[3] = { x*blockend[0] + (1-x)*(thickness[0][0] + blockstart[0]), y*blockend[1] + (1-y)*(thickness[1][0] + blockstart[1]), z*blockend[2] + (1-z)*(thickness[2][0] + blockstart[2]) }; const int vol = std::max(0, end[2]-start[2])*std::max(0, end[1]-start[1])*std::max(0, end[0]-start[0]); if (vol == 0) continue; int neighbor[3]; neighbor[0] = index[0]; neighbor[1] = index[1]; neighbor[2] = index[2]; neighbor[d] = index[d] + xxx[d]*2-1; if(c2i.find(I3(origin[0] + neighbor[0], origin[1] + neighbor[1], origin[2] + neighbor[2]))==c2i.end()) continue; assert(n > neighbor[d]); assert(0 <= neighbor[d]); { const int sregion[3] = { start[0] + (index[0] - neighbor[0])*blocksize[0] - edge_start[0], start[1] + (index[1] - neighbor[1])*blocksize[1] - edge_start[1], start[2] + (index[2] - neighbor[2])*blocksize[2] - edge_start[2] }; const int L[3] = { edge_end[0] - edge_start[0], edge_end[1] - edge_start[1], edge_end[2] - edge_start[2] }; // if (isroot) // { // printf("---CORNER (from edge) -----------------> index: %d %d %d\n", index[0], index[1], index[2]); // printf("neighbor: %d %d %d\n", neighbor[0], neighbor[1], neighbor[2]); // printf("edge: %d %d %d\n", d, a, b); // printf("corner: %d %d %d\n", x, y, z); // printf("edgestart: %d %d %d\n", edge_start[0], edge_start[1], edge_start[2]); // printf("mystart: %d %d %d\n", start[0], start[1], start[2]); // printf("s: %d %d %d\n",sregion[0], sregion[1], sregion[2]); // printf("L: %d %d %d\n",L[0], L[1], L[2]); // printf("neighbor p1: %d\n", neighbor[d]); // } assert(c2i.find(I3(origin[0] + neighbor[0], origin[1] + neighbor[1], origin[2] + neighbor[2]))!=c2i.end()); assert(sregion[0]>= 0); assert(sregion[1]>= 0); assert(sregion[2]>= 0); assert(sregion[0]< L[0]); assert(sregion[1]< L[1]); assert(sregion[2]< L[2]); assert(vol <NEDGEBLOCK); //Real * src_base = data.faces[dface][s] + NFACEBLOCK*(neighbor[dim_other1face] + n1*neighbor[dim_other2face]); Real * src_base = data.edges[d][b][a] + NEDGEBLOCK*neighbor[d]; SubpackInfo subinfo = { ptrBlock, src_base, start[0], start[1], start[2], end[0], end[1], end[2], sregion[0], sregion[1], sregion[2], L[0], L[1]}; retval[ptrBlock].push_back(subinfo); } } } } } } //corners for(int z=0; z<2; ++z) for(int y=0; y<2; ++y) for(int x=0; x<2; ++x) { const int NCORNERBLOCK = NC * thickness[0][x]*thickness[1][y]*thickness[2][z]; const bool needed = NCORNERBLOCK > 0; data.corners[z][y][x] = needed ? _myalloc(sizeof(Real)*NCORNERBLOCK, 16) : NULL; if (!needed) continue; int neighbor_index[3]; neighbor_index[0] = (mypeindex[0] + 2*x-1 + pesize[0])%pesize[0]; neighbor_index[1] = (mypeindex[1] + 2*y-1 + pesize[1])%pesize[1]; neighbor_index[2] = (mypeindex[2] + 2*z-1 + pesize[2])%pesize[2]; if (_myself(neighbor_index)) continue; const int start[3] = { x*(blockend[0] - thickness[0][1]) + (1-x)*blockstart[0], y*(blockend[1] - thickness[1][1]) + (1-y)*blockstart[1], z*(blockend[2] - thickness[2][1]) + (1-z)*blockstart[2] }; const int end[3] = { x*blockend[0] + (1-x)*(thickness[0][0] + blockstart[0]), y*blockend[1] + (1-y)*(thickness[1][0] + blockstart[1]), z*blockend[2] + (1-z)*(thickness[2][0] + blockstart[2]) }; const int index[3] = { x*(bpd[0]-1), y*(bpd[1]-1), z*(bpd[2]-1), }; assert(c2i.find(I3(origin[0] + index[0], origin[1] + index[1], origin[2] + index[2]))!=c2i.end()); const int blockid = c2i[I3(origin[0] + index[0], origin[1] + index[1], origin[2] + index[2])]; PackInfo info = {(Real *)globalinfos[blockid].ptrBlock, data.corners[z][y][x], start[0], start[1], start[2], end[0], end[1], end[2]}; const bool nonempty = end[0]>start[0] && end[1]>start[1] && end[2]>start[2]; if (nonempty) packinfos.push_back(info); } return retval; } Real * _myalloc(const int NBYTES, const int ALIGN) { if (NBYTES>0) { //Real * ret_val = (Real *)_mm_malloc(NBYTES, ALIGN); Real * ret_val = NULL; int error = posix_memalign((void**)&ret_val, std::max(8, ALIGN), NBYTES); assert(error == 0); all_mallocs.push_back(ret_val); return ret_val; } return NULL; } void _myfree(Real *& ptr) {if (ptr!=NULL) { free(ptr); ptr=NULL;} } //forbidden methods SynchronizerMPI(const SynchronizerMPI& c):cube(-1,-1,-1), synchID(-1), isroot(true){ abort(); } void operator=(const SynchronizerMPI& c){ abort(); } public: SynchronizerMPI(const int synchID, StencilInfo stencil, std::vector<BlockInfo> globalinfos, MPI_Comm cartcomm, const int mybpd[3], const int blocksize[3]): cube(mybpd[0], mybpd[1], mybpd[2]), synchID(synchID), stencil(stencil), globalinfos(globalinfos), cartcomm(cartcomm) { int myrank; MPI_Comm_rank(cartcomm, &myrank); isroot = (myrank == 0); MPI_Cart_get(cartcomm, 3, pesize, periodic, mypeindex); MPI_Cart_coords(cartcomm, myrank, 3, mypeindex); for(int iz=0; iz<3; iz++) for(int iy=0; iy<3; iy++) for(int ix=0; ix<3; ix++) { int s[3] = { ix-1+mypeindex[0], iy-1+mypeindex[1], iz-1+mypeindex[2]}; int nbrRank; MPI_Cart_rank(cartcomm, s, &nbrRank); neighborsrank[iz][iy][ix] = nbrRank; } for(int i=0; i<3; ++i) this->mybpd[i]=mybpd[i]; for(int i=0; i<3; ++i) this->blocksize[i]=blocksize[i]; for(int i=0; i< globalinfos.size(); ++i) { I3 coord(globalinfos[i].index[0], globalinfos[i].index[1], globalinfos[i].index[2]); c2i[coord] = i; } const int origin[3] = { mypeindex[0]*mybpd[0], mypeindex[1]*mybpd[1], mypeindex[2]*mybpd[2] }; const int s[3] = {stencil.sx, stencil.sy, stencil.sz}; const int e[3] = {stencil.ex, stencil.ey, stencil.ez}; const int z[3] = {0, 0, 0}; send_thickness[0][0] = e[0] - 1; send_thickness[0][1] = -s[0]; send_thickness[1][0] = e[1] - 1; send_thickness[1][1] = -s[1]; send_thickness[2][0] = e[2] - 1; send_thickness[2][1] = -s[2]; _setup<false>(send, send_thickness, z, blocksize, origin, send_packinfos); recv_thickness[0][0] = -s[0]; recv_thickness[0][1] = e[0] - 1; recv_thickness[1][0] = -s[1]; recv_thickness[1][1] = e[1] - 1; recv_thickness[2][0] = -s[2]; recv_thickness[2][1] = e[2] - 1; { const int blockstart[3] = { stencil.sx , stencil.sy , stencil.sz }; const int blockend[3] = { stencil.ex + blocksize[0]-1, stencil.ey + blocksize[1]-1, stencil.ez + blocksize[2]-1 }; std::vector<PackInfo> packinfos; recv_subpackinfos = _setup<true>(recv, recv_thickness, blockstart, blockend, origin, packinfos); for(std::vector<PackInfo>::const_iterator it = packinfos.begin(); it<packinfos.end(); ++it) recv_packinfos[it->block].push_back(*it); } assert(recv.pending.size() == 0); assert(send.pending.size() == 0); } virtual ~SynchronizerMPI() { for(int i=0;i<all_mallocs.size();++i) _myfree(all_mallocs[i]); } virtual void sync(unsigned int gptfloats, MPI_Datatype MPIREAL, const int timestamp) { //0. wait for pending sends, couple of checks //1. pack all stuff //2. perform send/receive requests //3. setup the dependency //0. { const int NPENDINGSENDS = send.pending.size(); if (NPENDINGSENDS > 0) { std::vector<MPI_Request> pending(NPENDINGSENDS); std::copy(send.pending.begin(), send.pending.end(), pending.begin()); #if 1 MPI_Waitall(NPENDINGSENDS, &pending.front(), MPI_STATUSES_IGNORE); #else int done = false; while (1) { MPI_Testall(NPENDINGSENDS, &pending.front(), &done, MPI_STATUSES_IGNORE); if (done) break; sched_yield(); }; #endif send.pending.clear(); } } assert(recv.pending.size() == 0); assert(send.pending.size() == 0); cube.prepare(); blockinfo_counter = globalinfos.size(); const int NC = stencil.selcomponents.size(); //1. pack { const int N = send_packinfos.size(); std::vector<int> selcomponents = stencil.selcomponents; std::sort(selcomponents.begin(), selcomponents.end()); const bool contiguous = false;//selcomponents.back()+1-selcomponents.front() == selcomponents.size(); if (!contiguous) { #pragma omp parallel for schedule(runtime) for(int i=0; i<N; ++i) { PackInfo info = send_packinfos[i]; pack(info.block, info.pack, gptfloats, &selcomponents.front(), NC, info.sx, info.sy, info.sz, info.ex, info.ey, info.ez); } } else { const int selstart = selcomponents.front(); const int selend = selcomponents.back()+1; #pragma omp parallel for schedule(runtime) for(int i=0; i<N; ++i) { PackInfo info = send_packinfos[i]; pack_stripes(info.block, info.pack, gptfloats, selstart, selend, info.sx, info.sy, info.sz, info.ex, info.ey, info.ez); } } } //2. send requests { //faces for(int d=0; d<3; ++d) { if (!_face_needed(d)) continue; const int dim_other1 = (d+1)%3; const int dim_other2 = (d+2)%3; for(int s=0; s<2; ++s) { const int NFACEBLOCK_SEND = NC * send_thickness[d][s] * blocksize[dim_other1] * blocksize[dim_other2]; const int NFACEBLOCK_RECV = NC * recv_thickness[d][s] * blocksize[dim_other1] * blocksize[dim_other2]; const int NFACE_SEND = NFACEBLOCK_SEND * mybpd[dim_other1] * mybpd[dim_other2]; const int NFACE_RECV = NFACEBLOCK_RECV * mybpd[dim_other1] * mybpd[dim_other2]; int neighbor_index[3]; neighbor_index[d] = (mypeindex[d] + 2*s-1 + pesize[d])%pesize[d]; neighbor_index[dim_other1] = mypeindex[dim_other1]; neighbor_index[dim_other2] = mypeindex[dim_other2]; if (_myself(neighbor_index)) continue; if (NFACE_SEND > 0) { MPI_Request req; MPI_Isend(send.faces[d][s], NFACE_SEND, MPIREAL, _rank(neighbor_index), 6*timestamp + 2*d + 1-s, cartcomm, &req); send.pending.insert( req ); } if (NFACE_RECV > 0) { MPI_Request rc; MPI_Irecv(recv.faces[d][s], NFACE_RECV, MPIREAL, _rank(neighbor_index), 6*timestamp + 2*d + s, cartcomm, &rc); recv.pending.insert(rc); cube.face(rc, d, s); } } } if (stencil.tensorial) { //edges for(int d=0; d<3; ++d) { const int dim_other1 = (d+1)%3; const int dim_other2 = (d+2)%3; for(int b=0; b<2; ++b) for(int a=0; a<2; ++a) { const int NEDGEBLOCK_SEND = NC * blocksize[d] * send_thickness[dim_other2][b] * send_thickness[dim_other1][a]; const int NEDGEBLOCK_RECV = NC * blocksize[d] * recv_thickness[dim_other2][b] * recv_thickness[dim_other1][a]; const int NEDGE_SEND = NEDGEBLOCK_SEND * mybpd[d]; const int NEDGE_RECV = NEDGEBLOCK_RECV * mybpd[d]; int neighbor_index[3]; neighbor_index[d] = mypeindex[d]; neighbor_index[dim_other1] = (mypeindex[dim_other1] + 2*a-1 + pesize[dim_other1])%pesize[dim_other1]; neighbor_index[dim_other2] = (mypeindex[dim_other2] + 2*b-1 + pesize[dim_other2])%pesize[dim_other2]; if (_myself(neighbor_index)) continue; if (NEDGE_RECV > 0) { MPI_Request rc; MPI_Irecv(recv.edges[d][b][a], NEDGE_RECV, MPIREAL, _rank(neighbor_index), 12*timestamp + 4*d + 2*b + a, cartcomm, &rc); recv.pending.insert(rc); cube.edge(rc, d, a, b); } if (NEDGE_SEND > 0) { MPI_Request req; MPI_Isend(send.edges[d][b][a], NEDGE_SEND, MPIREAL, _rank(neighbor_index), 12*timestamp + 4*d + 2*(1-b) + (1-a), cartcomm, &req); send.pending.insert(req); } } } //corners { for(int z=0; z<2; ++z) for(int y=0; y<2; ++y) for(int x=0; x<2; ++x) { const int NCORNERBLOCK_SEND = NC * send_thickness[0][x]*send_thickness[1][y]*send_thickness[2][z]; const int NCORNERBLOCK_RECV = NC * recv_thickness[0][x]*recv_thickness[1][y]*recv_thickness[2][z]; int neighbor_index[3]; neighbor_index[0] = (mypeindex[0] + 2*x-1 + pesize[0])%pesize[0]; neighbor_index[1] = (mypeindex[1] + 2*y-1 + pesize[1])%pesize[1]; neighbor_index[2] = (mypeindex[2] + 2*z-1 + pesize[2])%pesize[2]; if (_myself(neighbor_index)) continue; if (NCORNERBLOCK_RECV) { MPI_Request rc; MPI_Irecv(recv.corners[z][y][x], NCORNERBLOCK_RECV, MPIREAL, _rank(neighbor_index), 8*timestamp + 4*z + 2*y + x, cartcomm, &rc); recv.pending.insert(rc); cube.corner(rc, x, y, z); } if (NCORNERBLOCK_SEND) { MPI_Request req; MPI_Isend(send.corners[z][y][x], NCORNERBLOCK_SEND, MPIREAL, _rank(neighbor_index), 8*timestamp + 4*(1-z) + 2*(1-y) + (1-x), cartcomm, &req); send.pending.insert(req); } } } } } //3. cube.make_dependencies(isroot); } std::vector<BlockInfo> avail_inner() { std::vector<BlockInfo> retval; const int xorigin = mypeindex[0]*mybpd[0]; const int yorigin = mypeindex[1]*mybpd[1]; const int zorigin = mypeindex[2]*mybpd[2]; std::vector<Region> regions = cube.avail(); for(std::vector<Region>::const_iterator it=regions.begin(); it!=regions.end(); ++it) { std::map<Region, std::vector<BlockInfo> >::const_iterator r2v = region2infos.find(*it); if(r2v!=region2infos.end()) { retval.insert(retval.end(), r2v->second.begin(), r2v->second.end()); blockinfo_counter -= r2v->second.size(); } else { std::vector<BlockInfo> entry; const int sx = it->s[0]; const int sy = it->s[1]; const int sz = it->s[2]; const int ex = it->e[0]; const int ey = it->e[1]; const int ez = it->e[2]; for(int iz=sz; iz<ez; ++iz) for(int iy=sy; iy<ey; ++iy) for(int ix=sx; ix<ex; ++ix, blockinfo_counter--) { assert(c2i.find(I3(ix + xorigin, iy + yorigin, iz + zorigin)) != c2i.end()); entry.push_back(globalinfos[ c2i[I3(ix + xorigin, iy + yorigin, iz + zorigin)] ]); } retval.insert(retval.end(), entry.begin(), entry.end()); region2infos[*it] = entry; } } assert(cube.pendingcount() != 0 || blockinfo_counter == cube.pendingcount()); assert(blockinfo_counter != 0 || blockinfo_counter == cube.pendingcount()); assert(blockinfo_counter != 0 || recv.pending.size() == 0); return retval; } std::vector<BlockInfo> avail_halo() { std::vector<BlockInfo> retval; const int NPENDING = recv.pending.size(); std::vector<MPI_Request> pending(NPENDING); std::copy(recv.pending.begin(), recv.pending.end(), pending.begin()); std::vector<MPI_Request> old = pending; #if 1 MPI_Waitall(NPENDING, &pending.front(), MPI_STATUSES_IGNORE); #else int done = false; while (1) { MPI_Testall(NPENDING, &pending.front(), &done, MPI_STATUSES_IGNORE); if (done) break; pthread_yield(); }; #endif for(int i=0; i<NPENDING; ++i) { cube.received(old[i]); recv.pending.erase(old[i]); } const int xorigin = mypeindex[0]*mybpd[0]; const int yorigin = mypeindex[1]*mybpd[1]; const int zorigin = mypeindex[2]*mybpd[2]; std::vector<Region> regions = cube.avail(); for(std::vector<Region>::const_iterator it=regions.begin(); it!=regions.end(); ++it) { std::map<Region, std::vector<BlockInfo> >::const_iterator r2v = region2infos.find(*it); if(r2v!=region2infos.end()) { retval.insert(retval.end(), r2v->second.begin(), r2v->second.end()); blockinfo_counter -= r2v->second.size(); } else { std::vector<BlockInfo> entry; const int sx = it->s[0]; const int sy = it->s[1]; const int sz = it->s[2]; const int ex = it->e[0]; const int ey = it->e[1]; const int ez = it->e[2]; for(int iz=sz; iz<ez; ++iz) for(int iy=sy; iy<ey; ++iy) for(int ix=sx; ix<ex; ++ix, blockinfo_counter--) { assert(c2i.find(I3(ix + xorigin, iy + yorigin, iz + zorigin)) != c2i.end()); entry.push_back(globalinfos[ c2i[I3(ix + xorigin, iy + yorigin, iz + zorigin)] ]); } retval.insert(retval.end(), entry.begin(), entry.end()); region2infos[*it] = entry; } } assert(cube.pendingcount() != 0 || blockinfo_counter == cube.pendingcount()); assert(blockinfo_counter != 0 || blockinfo_counter == cube.pendingcount()); assert(blockinfo_counter != 0 || recv.pending.size() == 0); return retval; } bool test_halo() { std::vector<BlockInfo> retval; const int NPENDING = recv.pending.size(); if (NPENDING == 0) return true; std::vector<MPI_Request> pending(NPENDING); std::copy(recv.pending.begin(), recv.pending.end(), pending.begin()); int done = false; MPI_Testall(NPENDING, &pending.front(), &done, MPI_STATUSES_IGNORE); return done; } std::vector<BlockInfo> avail() { std::vector<BlockInfo> retval; const int NPENDING = recv.pending.size(); std::vector<MPI_Request> pending(NPENDING); std::copy(recv.pending.begin(), recv.pending.end(), pending.begin()); std::vector<MPI_Request> old = pending; if(NPENDING > 0) { if(mybpd[0]==1 || mybpd[1]==1 || mybpd[2] == 1) //IS THERE SOMETHING MORE INTELLIGENT?! { MPI_Waitall(NPENDING, &pending.front(), MPI_STATUSES_IGNORE); for(int i=0; i<NPENDING; ++i) { cube.received(old[i]); recv.pending.erase(old[i]); } } else { std::vector<int> indices(NPENDING); int NSOLVED = 0; if (blockinfo_counter == globalinfos.size()) MPI_Testsome(NPENDING, &pending.front(), &NSOLVED, &indices.front(), MPI_STATUSES_IGNORE); else { MPI_Waitsome(NPENDING, &pending.front(), &NSOLVED, &indices.front(), MPI_STATUSES_IGNORE); assert(NSOLVED > 0); } for(int i=0; i<NSOLVED; ++i) { cube.received(old[indices[i]]); recv.pending.erase(old[indices[i]]); } } } const int xorigin = mypeindex[0]*mybpd[0]; const int yorigin = mypeindex[1]*mybpd[1]; const int zorigin = mypeindex[2]*mybpd[2]; std::vector<Region> regions = cube.avail(); for(std::vector<Region>::const_iterator it=regions.begin(); it!=regions.end(); ++it) { std::map<Region, std::vector<BlockInfo> >::const_iterator r2v = region2infos.find(*it); if(r2v!=region2infos.end()) { retval.insert(retval.end(), r2v->second.begin(), r2v->second.end()); blockinfo_counter -= r2v->second.size(); } else { std::vector<BlockInfo> entry; const int sx = it->s[0]; const int sy = it->s[1]; const int sz = it->s[2]; const int ex = it->e[0]; const int ey = it->e[1]; const int ez = it->e[2]; for(int iz=sz; iz<ez; ++iz) for(int iy=sy; iy<ey; ++iy) for(int ix=sx; ix<ex; ++ix, blockinfo_counter--) { assert(c2i.find(I3(ix + xorigin, iy + yorigin, iz + zorigin)) != c2i.end()); entry.push_back(globalinfos[ c2i[I3(ix + xorigin, iy + yorigin, iz + zorigin)] ]); } retval.insert(retval.end(), entry.begin(), entry.end()); region2infos[*it] = entry; } } assert(cube.pendingcount() != 0 || blockinfo_counter == cube.pendingcount()); assert(blockinfo_counter != 0 || blockinfo_counter == cube.pendingcount()); assert(blockinfo_counter != 0 || recv.pending.size() == 0); return retval; } std::vector<BlockInfo> avail(const int smallest) { std::vector<BlockInfo> accumulator; while(accumulator.size()<smallest && !done()) { const std::vector<BlockInfo> r = avail(); accumulator.insert(accumulator.end(), r.begin(), r.end()); } return accumulator; } bool done() const { assert(!(blockinfo_counter == 0) || recv.pending.size() == 0); return blockinfo_counter == 0; } StencilInfo getstencil() const { return stencil; } void getpedata(int mypeindex[3], int pesize[3], int mybpd[3]) const { for(int i=0; i<3; ++i) mypeindex[i] = this->mypeindex[i]; for(int i=0; i<3; ++i) pesize[i] = this->pesize[i]; for(int i=0; i<3; ++i) mybpd[i] = this->mybpd[i]; } class MyRange { const int sx, sy, sz, ex, ey, ez; public: MyRange(const int sx, const int ex, const int sy, const int ey, const int sz, const int ez): sx(sx), sy(sy), sz(sz), ex(ex), ey(ey), ez(ez) { } bool outside(MyRange range) const { const int x0 = std::max(sx, range.sx); const int y0 = std::max(sy, range.sy); const int z0 = std::max(sz, range.sz); const int x1 = std::min(ex, range.ex); const int y1 = std::min(ey, range.ey); const int z1 = std::min(ez, range.ez); return (x0 >= x1) || (y0 >= y1) || (z0 >= z1); } }; void fetch(const Real * const ptrBlock, Real * const ptrLab, const int x0, const int y0, const int z0, const int xsize, const int ysize, const int zsize, const int gptfloats, const int rsx, const int rex, const int rsy, const int rey, const int rsz, const int rez) const { //build range MyRange myrange(rsx, rex, rsy, rey, rsz, rez); //packs { std::map<Real *, std::vector<PackInfo> >::const_iterator it = recv_packinfos.find(const_cast<Real *>(ptrBlock)); if( it!=recv_packinfos.end() ) { std::vector<PackInfo> packs = it->second; //assert(!stencil.tensorial || packs.size() <= 7 || mybpd[0]*mybpd[1]*mybpd[2] == 1); //assert(stencil.tensorial || packs.size()<=3 || mybpd[0]*mybpd[1]*mybpd[2] == 1); for(std::vector<PackInfo>::const_iterator itpack=packs.begin(); itpack!=packs.end(); ++itpack) { MyRange packrange(itpack->sx, itpack->ex, itpack->sy, itpack->ey, itpack->sz, itpack->ez); if (myrange.outside(packrange)) continue; const int nsrc = (itpack->ex-itpack->sx)*(itpack->ey-itpack->sy)*(itpack->ez-itpack->sz); unpack(itpack->pack, ptrLab, gptfloats, &stencil.selcomponents.front(), stencil.selcomponents.size(), nsrc, itpack->sx-x0, itpack->sy-y0, itpack->sz-z0, itpack->ex-x0, itpack->ey-y0, itpack->ez-z0, xsize, ysize, zsize); } } } //subregions inside packs if (stencil.tensorial) { std::map<Real *, std::vector<SubpackInfo> >::const_iterator it = recv_subpackinfos.find(const_cast<Real *>(ptrBlock)); assert(stencil.tensorial || it==recv_subpackinfos.end()); if( it!=recv_subpackinfos.end() ) { std::vector<SubpackInfo> subpacks = it->second; // assert(subpacks.size()<=12+8); for(std::vector<SubpackInfo>::const_iterator itsubpack=subpacks.begin(); itsubpack!=subpacks.end(); ++itsubpack) { MyRange packrange(itsubpack->sx, itsubpack->ex, itsubpack->sy, itsubpack->ey, itsubpack->sz, itsubpack->ez); if (myrange.outside(packrange)) continue; unpack_subregion(itsubpack->pack, ptrLab, gptfloats, &stencil.selcomponents.front(), stencil.selcomponents.size(), itsubpack->x0, itsubpack->y0, itsubpack->z0, itsubpack->xpacklenght, itsubpack->ypacklenght, itsubpack->sx-x0, itsubpack->sy-y0, itsubpack->sz-z0, itsubpack->ex-x0, itsubpack->ey-y0, itsubpack->ez-z0, xsize, ysize, zsize); } } } } };
GB_unop__identity_uint32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint32_fp32) // op(A') function: GB (_unop_tran__identity_uint32_fp32) // C type: uint32_t // A type: float // cast: uint32_t cij = GB_cast_to_uint32_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ float #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint32_fp32) ( uint32_t *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; uint32_t z = GB_cast_to_uint32_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Matrix.h
#ifndef MATRIX_H_ #define MATRIX_H_ #include "../Toolbox.h" #include "../Serializable.h" #include "KTuple.h" #include <string.h> #include <stdio.h> #include <string> #include <sys/mman.h> #include <cmath> #include <type_traits> #ifdef LINUX #include <clocale> #endif #ifdef OSX #include <locale.h> #endif namespace Lazarus { // we are using n x m matrices ! //row major format (AS IT SHOULD BE) //transforms 2-dim coordinates into 1-dim memory position //with offset all 2-dim coordinates start at 1 /* In case indices starts from 1 i : row index j : column index ld : size of a row */ #define IDX2ARRAYCOORDINATE_RM_OFFSET_DIM2(i,j,lr) ( (((i)-1)*(lr)) + ((j)-1) ) /* In case indices starts from 0 (AS IT SHOULD BE!) i,j,ld : Same as above */ #define IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,lr) ( ((i)*(lr))+(j) ) //column major format (just as CUDA desires) /* In case indices starts from 1 i : row index j : column index ld : size of a column */ #define IDX2ARRAYCOORDINATE_CM_OFFSET_DIM2(i,j,lc) ( (((j)-1)*(lc)) + ((i)-1) ) /* In case indices starts from 0 i,j,ld : Same as above */ #define IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,lc) ( ((j)*(lc))+(i) ) //analogously for 3 dims #define IDX2ARRAYCOORDINATE_RM_OFFSET_DIM3(i,j,k,lr,lc) ( (((k)-1)*(lr*lc)) + (((i)-1)*(lr)) + ((j)-1) ) #define IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,lr,lc) ( (((k) )*(lr*lc)) + (((i) )*(lr)) + ((j) ) ) #define IDX2ARRAYCOORDINATE_CM_OFFSET_DIM3(i,j,k,lr,lc) ( (((k)-1)*(lr*lc)) + (((j)-1)*(lc)) + ((i)-1) ) #define IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,lr,lc) ( (((k) )*(lr*lc)) + (((j) )*(lc)) + ((i) ) ) //******************* generic base class ************************ class Matrix : public Serializable { public: /*enum MATRIX_DATA_TYPE {MATRIX_DATA_TYPE_DOUBLE, MATRIX_DATA_TYPE_FLOAT, MATRIX_DATA_TYPE_LONG_DOUBLE, MATRIX_DATA_TYPE_INT, MATRIX_DATA_TYPE_LONGLONG, MATRIX_DATA_TYPE_ULONGLONG, MATRIX_DATA_TYPE_LONG, MATRIX_DATA_TYPE_ULONG,MATRIX_DATA_TYPE_CHAR, MATRIX_DATA_TYPE_SHORT,MATRIX_DATA_TYPE_USHORT, MATRIX_DATA_TYPE_UINT, MATRIX_DATA_TYPE_UCHAR, MATRIX_DATA_TYPE_ABSTRACT, MATRIX_DATA_TYPE_UNSPECIFIED};*/ enum MATRIX_DATA_ALIGNMENT {MATRIX_DATA_ALIGNMENT_ROW_MAJOR,MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR}; Matrix( enum MATRIX_DATA_ALIGNMENT data_alignment = MATRIX_DATA_ALIGNMENT_ROW_MAJOR, enum SH_DATA_TYPE data_type = SH_DATA_TYPE_FLOAT ) { this->data_alignment = data_alignment; this->data_type = data_type; } virtual ~Matrix() { } CLASS_RTTI(Lazarus::Matrix) protected: enum MATRIX_DATA_ALIGNMENT data_alignment; enum SH_DATA_TYPE data_type; }; //******************* 2 dims ******************************* template<class T> class Matrix2 : public Matrix { public: /** * This class is merely a state container in order to use the [][] * operator on a matrix, i.e. mat[] returns a row pointer object X * which has the desired row >index< temporarily saved. * X[] will finally deliver the value in this row,column element. * */ class Matrix2RowPointer { friend class Matrix2<T>; public: T& operator[](unsigned int col) { if(m_row>=mp_matrix->m_rows) { printf("ERROR: getData row %d not available\n",m_row); return mp_matrix->data[0]; } if(col>=mp_matrix->m_columns) { printf("ERROR: getData column %d not available\n",col); return mp_matrix->data[0]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->rows)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(m_row,col,mp_matrix->m_rows)]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->columns)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(m_row,col,mp_matrix->m_columns)]; } return mp_matrix->data[0]; } const T& operator[](unsigned int col) const { if(m_row>=mp_matrix->m_rows) { printf("ERROR: getData row %d not available\n",m_row); return mp_matrix->data[0]; } if(col>=mp_matrix->m_columns) { printf("ERROR: getData column %d not available\n",col); return mp_matrix->data[0]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->rows)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(m_row,col,mp_matrix->m_rows)]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->columns)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(m_row,col,mp_matrix->m_columns)]; } return mp_matrix->data[0]; } private: Matrix2RowPointer(Matrix2<T>* mat, int row) { mp_matrix = mat; m_row = row; } Matrix2<T>* mp_matrix;//external ref unsigned int m_row; }; Matrix2(enum MATRIX_DATA_ALIGNMENT data_alignment = MATRIX_DATA_ALIGNMENT_ROW_MAJOR, bool pinned = false) { this->m_columns = 0; this->m_rows = 0; this->data_alignment = data_alignment; this->data = NULL; this->pinned = pinned; //determine the type if(std::is_same<T,int>::value == true) { this->data_type = SH_DATA_TYPE_INT; } else if(std::is_same<T,unsigned int>::value == true) { this->data_type = SH_DATA_TYPE_UINT; } else if(std::is_same<T,char>::value == true) { this->data_type = SH_DATA_TYPE_CHAR; } else if(std::is_same<T,unsigned char>::value == true) { this->data_type = SH_DATA_TYPE_UCHAR; } else if(std::is_same<T,long int>::value == true) { this->data_type = SH_DATA_TYPE_LONG; } else if(std::is_same<T,long long int>::value == true) { this->data_type = SH_DATA_TYPE_LONGLONG; } else if(std::is_same<T,unsigned long>::value == true) { this->data_type = SH_DATA_TYPE_ULONG; } else if(std::is_same<T,unsigned long long>::value == true) { this->data_type = SH_DATA_TYPE_ULONGLONG; } else if(std::is_same<T,float>::value == true) { this->data_type = SH_DATA_TYPE_FLOAT; } else if(std::is_same<T,double>::value == true) { this->data_type = SH_DATA_TYPE_DOUBLE; } else if(std::is_same<T,long double>::value == true) { this->data_type = SH_DATA_TYPE_LONG_DOUBLE; } else if(std::is_same<T,unsigned short>::value == true) { this->data_type = SH_DATA_TYPE_USHORT; } else if(std::is_same<T,short>::value == true) { this->data_type = SH_DATA_TYPE_SHORT; } else { this->data_type = SH_DATA_TYPE_ABSTRACT; } } Matrix2(const Matrix2<T>& matrix) { this->m_columns = 0; this->m_rows = 0; this->data = NULL; this->data_type = matrix.getDataType(); this->data_alignment = matrix.getDataAlignment(); this->pinned = matrix.getPinned(); this->initMatrix(matrix.getRowCount(), matrix.getColumnCount()); //copy data //memcpy(this->data, matrix.getDataPtr(), matrix.getRowCount()* matrix.getColumnCount() * sizeof(T)); //copy data for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j, matrix.getData(i,j) ); } } } Matrix2(Matrix2<T>* matrix) { this->m_columns = 0; this->m_rows = 0; this->data = NULL; this->data_type = matrix->getDataType(); this->data_alignment = matrix->getDataAlignment(); this->pinned = matrix->getPinned(); this->initMatrix(matrix->getRowCount(), matrix->getColumnCount()); //copy data //memcpy(this->data, matrix->getDataPtr(), matrix->getRowCount()* matrix->getColumnCount() * sizeof(T)); //copy data for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j, matrix->getData(i,j) ); } } } Matrix2(Matrix2<T>* matrix, enum MATRIX_DATA_ALIGNMENT data_alignment, bool pinned = false) { this->m_columns = 0; this->m_rows = 0; this->data = NULL; this->data_type = matrix->getDataType(); this->data_alignment = data_alignment; this->pinned = pinned; this->initMatrix(matrix->getRowCount(), matrix->getColumnCount()); //copy data for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j, matrix->getData(i,j) ); } } } virtual ~Matrix2() { if(this->pinned == false) { DELETE_ARRAY_NULL_CHECKING(this->data); } else { if(this->data != NULL) { munlock(this->data,sizeof(T)*this->m_columns*this->m_rows); delete this->data; } } } /** * Keep in mind that the [][] approach induces an overhead since for each * request a new object will be instantiated. **/ inline Matrix2RowPointer operator[](unsigned int row) { return Matrix2RowPointer(this,row); } /** * Keep in mind that the [][] approach induces an overhead since for each * request a new object will be instantiated. **/ inline const Matrix2RowPointer operator[](unsigned int row) const { return Matrix2RowPointer(this,row); } Matrix2<T>& operator =(const Matrix2<T>& matrix) { this->m_columns = 0; this->m_rows = 0; DELETE_ARRAY_NULL_CHECKING(this->data); this->data_type = matrix.getDataType(); this->data_alignment = matrix.getDataAlignment(); this->pinned = matrix.getPinned(); this->initMatrix(matrix.getRowCount(), matrix.getColumnCount()); //copy data //memcpy(this->data, matrix.getDataPtr(), matrix.getRowCount()* matrix.getColumnCount() * sizeof(T)); for(unsigned int i=0;i<this->m_rows;++i) { for(unsigned int j=0;j<this->m_columns;++j) { this->setData(i,j,const_cast<const T&>(matrix.getData(i,j)) ); } } return *this; } Matrix2<T>& operator =(const Matrix2<T>* matrix) { this->m_columns = 0; this->m_rows = 0; DELETE_ARRAY_NULL_CHECKING(this->data); this->data_type = matrix->getDataType(); this->data_alignment = matrix->getDataAlignment(); this->pinned = matrix->getPinned(); this->initMatrix(matrix->getRowCount(), matrix->getColumnCount()); //copy data //memcpy(this->data, matrix->getDataPtr(), matrix->getRowCount()* matrix->getColumnCount() * sizeof(T)); for(unsigned int i=0;i<this->m_rows;++i) { for(unsigned int j=0;j<this->m_columns;++j) { this->setData(i,j,const_cast<const T&>(matrix->getData(i,j)) ); } } return *this; } bool getPinned() const { return this->pinned; } CLASS_RTTI(Lazarus::Matrix2) void serialize() { registerElement<unsigned int>(2); registerElement<enum MATRIX_DATA_ALIGNMENT>(1); registerElement<enum SH_DATA_TYPE>(1); registerUCharA(m_rows*m_columns*sizeof(T)); allocateBuffer(); addElement<enum MATRIX_DATA_ALIGNMENT>(data_alignment); addElement<enum SH_DATA_TYPE>(data_type); addUCharA((unsigned char*)data,m_rows*m_columns*sizeof(T)); addElement<unsigned int>(m_rows); addElement<unsigned int>(m_columns); } void deserialize() { //free any data DELETE_NULL_CHECKING(data); m_columns = getElement<unsigned int>(); m_rows = getElement<unsigned int>(); unsigned long long size; data = (T*)getUCharA(size); data_type = getElement<enum SH_DATA_TYPE>(); data_alignment = getElement<enum MATRIX_DATA_ALIGNMENT>(); resetBuffer(); } /** * On a call to this method the matrix will be resized to the specified dimensions. Any previously * allocated internal memory will be deleted, keep in mind that in case of pointers the corresponding * objects won't be deleted.For truly large matrices set 'parallel' true. */ void initMatrix(unsigned int rows, unsigned int columns, bool parallel = false) { if(rows == 0 || columns == 0) { return; } if(this->pinned == false) { // release memory if already allocated DELETE_ARRAY_NULL_CHECKING(this->data); // reserve memory this->data = new T[rows*columns]; this->m_rows = rows; this->m_columns = columns; } else { // release memory if already allocated if(this->data != NULL) { munlock(this->data,sizeof(T)*this->m_columns*this->m_rows); delete this->data; } // reserve memory and lock it this->data = new T[rows*columns]; mlock(this->data,sizeof(T)*rows*columns); this->m_rows = rows; this->m_columns = columns; } resetMatrix(parallel); } /** * On a call to this method the matrix will be resized to the specified dimensions. Any previously * allocated internal memory will be deleted, keep in mind that in case of pointers the corresponding * objects won't be deleted. The matrix will be initialized to all ZERO! If 'large' is set an optimized * subroutine will be used to initialize the entries, use this only for LARGE dims! For truly large matrices * set 'parallel' true. */ void initMatrixZERO(unsigned int rows, unsigned int columns, bool large = false, bool parallel = false) { if(rows == 0 || columns == 0) { return; } if(this->pinned == false) { // release memory if already allocated DELETE_ARRAY_NULL_CHECKING(this->data); // reserve memory this->data = new T[rows*columns]; this->m_rows = rows; this->m_columns = columns; } else { // release memory if already allocated if(this->data != NULL) { munlock(this->data,sizeof(T)*this->m_columns*this->m_rows); delete this->data; } // reserve memory and lock it this->data = new T[rows*columns]; mlock(this->data,sizeof(T)*rows*columns); this->m_rows = rows; this->m_columns = columns; } globalSetMatrix(0,large,parallel); } /** * Will iterate over all entries and set them to T(). Use parallel for large matrices! * */ void resetMatrix( bool parallel = false ) { globalSetMatrixVal(T(), parallel); } /** * This is a fast method for setting the initial value, if 'large' is set an optimized * subroutine will be used to initialize the entries! For truly large matrices set 'parallel' * true. The optimization heuristic works best if the amount of columns largely exceeds the amount of * rows or vice versa. * Keep in mind that this routine will simply iterate in byte-wise manner over the underlying * array and set each byte to 'val'!! */ void globalSetMatrix(unsigned char val, bool large = false, bool parallel = false) { if(large == true)//set each row via memcpy if(parallel == true) if(m_columns >= m_rows) Toolbox::setArrayValParallel(val, (unsigned char*)this->data, sizeof(T)*m_rows*m_columns, sizeof(T)*m_columns); else Toolbox::setArrayValParallel(val, (unsigned char*)this->data, sizeof(T)*m_rows*m_columns, sizeof(T)*m_rows); else if(m_columns >= m_rows) Toolbox::setArrayVal(val, (unsigned char*)this->data, sizeof(T)*m_rows*m_columns, sizeof(T)*m_columns); else Toolbox::setArrayVal(val, (unsigned char*)this->data, sizeof(T)*m_rows*m_columns, sizeof(T)*m_rows); else memset(this->data,val,sizeof(T)*m_rows*m_columns); } /** * This is a relatively slow method for setting the initial value, yet it should be used * for non-primitive datatypes or primitive types which exceed char size. * Use parallel for large matrices! */ void globalSetMatrixVal(const T& val, bool parallel = false) { if(parallel == true) for(unsigned int i=0; i < this->m_columns * this->m_rows; ++i) { this->data[i] = val; } else #pragma omp parallel for for(unsigned int i=0; i < this->m_columns * this->m_rows; ++i) { this->data[i] = val; } } /** * Returns the specified submatrix. * */ Matrix2<T>* getSubMatrix(unsigned int start_row, unsigned int start_column, unsigned int rows, unsigned int columns) const { Matrix2<T>* output = new Matrix2<T>(this->data_alignment); output->initMatrix(rows,columns); //copy the data if(this->data_alignment == Matrix::MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { for(unsigned int i=0;i<rows;++i) { for(unsigned int j=0;j<columns;++j) { output->setData(i,j, this->getData(start_row+i,start_column+j)); } } } else { for(unsigned int j=0;j<columns;++j) { for(unsigned int i=0;i<rows;++i) { output->setData(i,j, this->getData(start_row+i,start_column+j)); } } } return output; } /** * Returns a copy of the matrix with added rows and columns, each matrix element is set to val. * */ Matrix2<T>* getPaddedMatrix(unsigned int rows, unsigned int columns, const T& val) const { Matrix2<T>* output = new Matrix2<T>(this->data_alignment); output->initMatrix(this->m_rows + rows,this->m_columns + columns); output->globalSetMatrixVal(val); //copy the data if(this->data_alignment == Matrix::MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { for(unsigned int i=0;i<this->m_rows;++i) { for(unsigned int j=0;j<this->m_columns;++j) { output->setData(i, j, this->getData(i,j)); } } } else//column major { for(unsigned int j=0;j<this->m_columns;++j) { for(unsigned int i=0;i<this->m_rows;++i) { output->setData(i, j, this->getData(i,j)); } } } return output; } /** * returns a submatrix by removing row 'row' and column 'column' from the current matrix. * */ Matrix2<T>* removeRowColumn(unsigned int row, unsigned int column) { Matrix2<T>* output = new Matrix2<T>(this->data_alignment); output->initMatrix(m_rows-1,m_columns-1); int v_row = 0; int v_column = 0; for(unsigned int a=0; a < m_rows; a++) { if(a==row) { v_row = -1; continue; } for(unsigned int b=0; b < m_columns; b++) { if(b==column) { v_column = -1; continue; } output->setData(a+v_row, b+v_column, this->getData(a, b)); } v_column = 0; } return output; } /** * Calculates the matrix determinant. Returns 0 in case of abstract matrices. * */ template<typename U> U determinant() { U det = U(); if(this->data_type != SH_DATA_TYPE_ABSTRACT) { det = determinant_<U>(this); } else { printf("ERROR: can not calculate determinant of abstract matrix\n"); } return det; } /** * Calculates the inverse of the matrix. Returns null if matrix is not invertible or of abstract type. * */ template<typename U> Matrix2<T>* getInverse() { Matrix2<T>* inv = NULL; U det = this->determinant<U>(); //abstract matrix if(this->data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can not calculate inverse of an abstract matrix\n"); return inv; } //determinant = 0 if(det == (U)0) { printf("ERROR: matrix is not invertible\n"); return inv; } //not quadratic if(m_rows != m_columns) { printf("ERROR: matrix is not invertible\n"); return inv; } for(int i=0;i<m_rows;++i) { for(int j=0;j<m_columns;++j) { Matrix2<T>* minor_ij = this->removeRowColumn(i, j); U det_minor = minor_ij->determinant<U>(); inv->setData(j, i, (int)( std::pow(-1, i+1+j+1)*(det_minor/det) ) );//implicit transpose, thus inverse delete minor_ij; } } return inv; } /** * Scales entries with a, only applicable for non-abstract matrices. Leaves matrix unchanged otherwise. * */ void scale(T a) { //abstract matrix if(this->data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can not scale an abstract matrix\n"); return; } for(int i=0;i<m_rows;++i) { for(int j=0;j<m_columns;++j) { this->setData(i, j, this->getData(i, j)*a); } } } /** * A pointwise multiplication of the matrix with matrix A. * */ void pointMultiply(Lazarus::Matrix2<T>* A) { //abstract matrix if(this->data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can not pointwise multiply an abstract matrix\n"); return; } if(this->m_rows != A->getRowCount() || this->m_columns != A->getColumnCount()) { printf("ERROR: can not pointwise multiply with matrix of different size\n"); return; } for(unsigned int i=0;i<m_rows;++i) { for(unsigned int j=0;j<m_columns;++j) { this->setData(i,j, this->getData(i,j) * A->getData(i,j) ); } } } /** * A pointwise multiplication of the matrix with matrix A. * */ void pointMultiply(const Lazarus::Matrix2<T>& A) { //abstract matrix if(this->data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can not pointwise multiply an abstract matrix\n"); return; } if(this->m_rows != A.getRowCount() || this->m_columns != A.getColumnCount()) { printf("ERROR: can not pointwise multiply with matrix of different size\n"); return; } for(unsigned int i=0;i<m_rows;++i) { for(unsigned int j=0;j<m_columns;++j) { this->setData(i,j, this->getData(i,j) * A.getData(i,j) ); } } } /** * A pointwise division of the matrix with matrix A. * */ void pointDivide(Lazarus::Matrix2<T>* A) { //abstract matrix if(this->data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can not pointwise divide an abstract matrix\n"); return; } if(this->m_rows != A->getRowCount() || this->m_columns != A->getColumnCount()) { printf("ERROR: can not pointwise divide with matrix of different size\n"); return; } for(unsigned int i=0;i<m_rows;++i) { for(unsigned int j=0;j<m_columns;++j) { this->setData(i,j, this->getData(i,j) / A->getData(i,j) ); } } } /** * A pointwise division of the matrix with matrix A. * */ void pointDivide(const Lazarus::Matrix2<T>& A) { //abstract matrix if(this->data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can not pointwise divide an abstract matrix\n"); return; } if(this->m_rows != A.getRowCount() || this->m_columns != A.getColumnCount()) { printf("ERROR: can not pointwise divide with matrix of different size\n"); return; } for(unsigned int i=0;i<m_rows;++i) { for(unsigned int j=0;j<m_columns;++j) { this->setData(i,j, this->getData(i,j) / A.getData(i,j) ); } } } /** * this should only be used for debug purposes and especially for small matrices, thus we don't care * about the switch within each iteration of the inner most for loop. */ virtual void printData() const { for(unsigned int i=0;i<m_rows;++i) { for(unsigned int j=0;j<m_columns;++j) { switch(this->data_type) { case SH_DATA_TYPE_DOUBLE: printf("%F ", *((double*)&getData(i,j))); break; case SH_DATA_TYPE_FLOAT: printf("%f ", *((float*)&getData(i,j))); break; case SH_DATA_TYPE_LONG_DOUBLE: printf("%Lf ", *((long double*)&getData(i,j))); break; case SH_DATA_TYPE_INT: printf("%d ", *((int*)&getData(i,j))); break; case SH_DATA_TYPE_LONGLONG: printf("%lld ", *((long long int*)&getData(i,j))); break; case SH_DATA_TYPE_ULONGLONG: printf("%llu ", *((unsigned long long*)&getData(i,j))); break; case SH_DATA_TYPE_LONG: printf("%ld ", *(( long int*)&getData(i,j))); break; case SH_DATA_TYPE_ULONG: printf("%lu ", *((unsigned long*)&getData(i,j))); break; case SH_DATA_TYPE_UINT: printf("%u ", *((unsigned int*)&getData(i,j))); break; case SH_DATA_TYPE_UCHAR: printf("%u ", *((unsigned char*)&getData(i,j))); break; case SH_DATA_TYPE_CHAR: printf("%d ", *((char*)&getData(i,j))); break; case SH_DATA_TYPE_SHORT: printf("%hd ", *((short*)&getData(i,j))); break; case SH_DATA_TYPE_USHORT: printf("%hu ", *((unsigned short*)&getData(i,j))); break; case SH_DATA_TYPE_ABSTRACT: printf("ABSTRACT "); break; case SH_DATA_TYPE_UNSPECIFIED: printf(" UNKNOWN "); break; } } printf("\n"); } } void setData(unsigned int i, unsigned int j, const T& value) { if(i>=m_rows) { printf("ERROR: setData row %d not available\n",i); return; } if(j>=m_columns) { printf("ERROR: setData column %d not available\n",j); return; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows)] = value; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns)] = value; } } inline T& getData(unsigned int i, unsigned int j) const { //T output=0; if(i>=m_rows) { printf("ERROR: getData row %d not available\n",i); return this->data[0]; } if(j>=m_columns) { printf("ERROR: getData column %d not available\n",j); return this->data[0]; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows)]; return this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows)]; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns)]; return this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns)]; } return this->data[0]; //return output; } T& operator ()(unsigned int& i, unsigned int& j) { //T output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows)]; return this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows)]; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns)]; return this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns)]; } return this->data[0]; //return output; } inline T* getDataRef(unsigned int i, unsigned int j) { T* output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { output = this->data + IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { output = this->data + IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns); } return output; } /* * This method returns a pointer to row i of the underlying data array. * I.e. one can return a pointer to the i-th row of a row-major matrix */ inline T* getRawDataRow(unsigned int i) { T* output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { printf("ERROR: can not return row reference as the matrix is in column major format\n"); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { output = &(this->data[ i*this->m_columns ] ); } return output; } /* * This method returns a pointer to column i of the underlying data array. * I.e. one can return a pointer to the i-th column of a column-major matrix */ inline T* getRawDataColumn(unsigned int i) { T* output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { printf("ERROR: can not return column reference as the matrix is in row major format\n"); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { output = &(this->data[ i*this->m_rows ] ); } return output; } T* getDataPtr() const { return this->data; } unsigned int getRowCount() const { return this->m_rows; } unsigned int getColumnCount() const { return this->m_columns; } unsigned int rows() const { return this->m_rows; } unsigned int columns() const { return this->m_columns; } void readMatrixFromFile(const std::string& filename) { setlocale(LC_ALL,"C"); FILE* pFile = fopen(filename.c_str(),"r"); std::string s; //read header and init data structures // from now on we assume a correct format of the input file !! std::string rows=""; std::string columns=""; readCSVValue(&pFile,&rows); readCSVValue(&pFile,&columns); this->initMatrix( Toolbox::stringToInt( rows ),Toolbox::stringToInt( columns ) ); //load data into matrix for(unsigned int i=0;i<this->m_rows;i++) { for(unsigned int j=0;j<this->m_columns;j++) { readCSVValue(&pFile,&s); //read token #ifdef DEBUG printf("setting number %s \n",s.c_str()); #endif this->setData(i,j, Toolbox::stringToDouble( s ) ); } } fclose(pFile); } void readMatrixFromBinaryFile(const std::string& filename) { FILE* pFile = fopen(filename.c_str(),"rb"); std::string s; //read header and init data structures // from now on we assume a correct format of the input file !! std::string rows=""; std::string columns=""; readCSVValue(&pFile,&rows); readCSVValue(&pFile,&columns); this->initMatrix( Toolbox::stringToInt( rows ),Toolbox::stringToInt( columns ) ); //load data into matrix fread(this->data, this->m_rows*this->m_columns,sizeof(T), pFile); fclose(pFile); } void writeMatrixToFile(const std::string& filename) const { if(data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can't save abstract matrix in CSV format, use binary export methods\n"); } setlocale(LC_ALL,"C"); FILE* pFile = fopen(filename.c_str(),"w"); std::string s; //header s = Toolbox::intToString( this->m_rows ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(',',pFile); s = Toolbox::intToString( this->m_columns ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(';',pFile); fputc('\n',pFile); //data for(unsigned int i=0;i<this->m_rows;i++) { for(unsigned int j=0;j<this->m_columns;j++) { if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { s = Toolbox::doubleToString( this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM2(i,j,this->m_rows)] ); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { s = Toolbox::doubleToString( this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM2(i,j,this->m_columns)] ); } fwrite(s.c_str(), s.length(),sizeof(char), pFile); if(j+1<this->m_columns) fputc(',',pFile); } fputc(';',pFile); fputc('\n',pFile); } fclose(pFile); } void writeMatrixToBinaryFile(const std::string& filename) const { FILE* pFile = fopen(filename.c_str(),"wb"); std::string s; //header s = Toolbox::intToString( this->m_rows ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(',',pFile); s = Toolbox::intToString( this->m_columns ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(';',pFile); fputc('\n',pFile); //data fwrite(this->data, this->m_columns*this->m_rows,sizeof(T), pFile); fclose(pFile); } void exportMatrix(const std::string& filename) { if(data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can't save abstract matrix in CSV format, use binary export methods\n"); } FILE* pFile = fopen(filename.c_str(),"w"); std::string s; for(unsigned int i=0;i<this->m_rows;i++) { for(unsigned int j=0;j<this->m_columns;j++) { s = Toolbox::doubleToString( (double)getData(i,j) ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); if(j+1<this->m_columns) fputc(',',pFile); } fputc(';',pFile); fputc('\n',pFile); } fclose(pFile); } void exportMatrixBinary(const std::string& filename) { FILE* pFile = fopen(filename.c_str(),"wb"); //data fwrite(this->data, this->m_columns*this->m_rows,sizeof(T), pFile); fclose(pFile); } enum MATRIX_DATA_ALIGNMENT getDataAlignment() const { return this->data_alignment; } enum SH_DATA_TYPE getDataType() const { return this->data_type; } protected: unsigned int m_rows; unsigned int m_columns; T* data; bool pinned; // reads a csv-token, returns false if terminated by , and true if terminated by ; or EOF bool readCSVValue(FILE** pFile, std::string* s) { char c[2]; c[1] = '\0'; *s = ""; //reset string //read chars until , ; or EOF found while(1){ c[0] = fgetc(*pFile); if(c[0]==',') return false; if(c[0]==';' || c[0]==EOF) { fgetc(*pFile); //read ahead one char, i.e. skip line break. Uncomment this if linebreaks are not being used after ; return true; } #ifdef DEBUG printf("appending %s to %s \n",(char*)&c,(*s).c_str()); #endif s->append(c); } } /** * Helper method for determinant calculation. Only applicable for non-abstract template parameters. * */ template<typename U> U determinant_(Matrix2<T>* mat) { int sum=0; int s; if(mat->getColumnCount()==1 && mat->getRowCount()==1) { //bottom case of recursion. size 1 matrix determinant is itself. return(mat->getData(0, 0)); } for(unsigned int i=0;i<mat->getRowCount();i++) { //finds determinant using row-by-row expansion Matrix2<T>* submat = mat->removeRowColumn(i,0); //submat->printData(); if(i%2==0) { s=1; } else { s=-1; } sum += s * mat->getData(i, 0)*(determinant_<U>(submat)); //printf("%u: %d \n",i,sum); delete submat; } //printf("final %d \n",sum); return(sum); //returns determinant value. once stack is finished, returns final determinant. } }; //******************* 3 dims ******************************* template<class T> class Matrix3 : public Matrix{ public: /** * This class is merely a state container in order to use the [][][] * operator on a matrix, i.e. mat[][] returns a row-column pointer object X * which has the desired row and column >indices< temporarily saved. * X[] will deliver the row pointer. * */ class Matrix3RowColumnPointer { friend class Matrix3<T>; public: T& operator[](unsigned int level) { if(m_row>=mp_matrix->m_rows) { printf("ERROR: getData row %d not available\n",m_row); return mp_matrix->data[0]; } if(m_col>=mp_matrix->m_columns) { printf("ERROR: getData column %d not available\n",m_col); return mp_matrix->data[0]; } if(level>=mp_matrix->m_levels) { printf("ERROR: getData level %d not available\n",level); return mp_matrix->data[0]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(m_row,m_col,level,mp_matrix->m_rows,mp_matrix->m_columns)]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(m_row,m_col,level,mp_matrix->m_rows,mp_matrix->m_columns)]; } return mp_matrix->data[0]; } const T& operator[](unsigned int level) const { if(m_row>=mp_matrix->m_rows) { printf("ERROR: getData row %d not available\n",m_row); return mp_matrix->data[0]; } if(m_col>=mp_matrix->m_columns) { printf("ERROR: getData column %d not available\n",m_col); return mp_matrix->data[0]; } if(level>=mp_matrix->m_levels) { printf("ERROR: getData level %d not available\n",level); return mp_matrix->data[0]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(m_row,m_col,level,mp_matrix->m_rows,mp_matrix->m_columns)]; } if(mp_matrix->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return mp_matrix->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(m_row,m_col,level,mp_matrix->m_rows,mp_matrix->m_columns)]; } return mp_matrix->data[0]; } private: Matrix3RowColumnPointer(Matrix3<T>* mat,int row, int column) { mp_matrix = mat; m_col = column; m_row = row; } Matrix3<T>* mp_matrix;//external ref unsigned int m_col; unsigned int m_row; }; /** * This class is merely a state container in order to use the [][][] * operator on a matrix, i.e. mat[][] returns a row-column pointer object X * which has the desired row and column >indices< temporarily saved. * X[] will deliver the row pointer. * */ class Matrix3RowPointer { friend class Matrix3<T>; friend class Matrix3RowColumnPointer; public: Matrix3RowColumnPointer operator[](unsigned int column) { return Matrix3LevelPointer(mp_matrix,m_row,column); } const Matrix3RowColumnPointer operator[](unsigned int column) const { return Matrix3LevelPointer(mp_matrix,m_row,column); } private: Matrix3RowPointer(Matrix3<T>* mat, unsigned int row) { mp_matrix = mat; m_row = row; } Matrix3<T>* mp_matrix;//external ref unsigned int m_row; }; Matrix3(enum MATRIX_DATA_ALIGNMENT data_alignment = MATRIX_DATA_ALIGNMENT_ROW_MAJOR, bool pinned = false) { this->m_columns = 0; this->m_rows = 0; this->m_levels = 0; this->data_alignment = data_alignment; this->data = NULL; this->pinned = pinned; //determine the type if(std::is_same<T,int>::value == true) { this->data_type = SH_DATA_TYPE_INT; } else if(std::is_same<T,unsigned int>::value == true) { this->data_type = SH_DATA_TYPE_UINT; } else if(std::is_same<T,char>::value == true) { this->data_type = SH_DATA_TYPE_CHAR; } else if(std::is_same<T,unsigned char>::value == true) { this->data_type = SH_DATA_TYPE_UCHAR; } else if(std::is_same<T,long int>::value == true) { this->data_type = SH_DATA_TYPE_LONG; } else if(std::is_same<T,long long int>::value == true) { this->data_type = SH_DATA_TYPE_LONGLONG; } else if(std::is_same<T,unsigned long>::value == true) { this->data_type = SH_DATA_TYPE_ULONG; } else if(std::is_same<T,unsigned long long>::value == true) { this->data_type = SH_DATA_TYPE_ULONGLONG; } else if(std::is_same<T,float>::value == true) { this->data_type = SH_DATA_TYPE_FLOAT; } else if(std::is_same<T,double>::value == true) { this->data_type = SH_DATA_TYPE_DOUBLE; } else if(std::is_same<T,long double>::value == true) { this->data_type = SH_DATA_TYPE_LONG_DOUBLE; } else if(std::is_same<T,unsigned short>::value == true) { this->data_type = SH_DATA_TYPE_USHORT; } else if(std::is_same<T,short>::value == true) { this->data_type = SH_DATA_TYPE_SHORT; } else { this->data_type = SH_DATA_TYPE_ABSTRACT; } } Matrix3(Matrix3<T>* matrix, enum MATRIX_DATA_ALIGNMENT data_alignment, bool pinned = false) { this->m_columns = 0; this->m_rows = 0; this->m_levels = 0; this->data = NULL; this->data_type = matrix->getDataType(); this->data_alignment = data_alignment; this->pinned = pinned; this->initMatrix(matrix->getRowCount(), matrix->getColumnCount(), matrix->getLevelCount()); //copy data for(unsigned int k=0;k<this->getLevelCount();k++) { for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j,k, matrix->getData(i,j,k) ); } } } } Matrix3(Matrix3<T>* matrix) { this->m_columns = 0; this->m_rows = 0; this->m_levels = 0; this->data = NULL; this->data_type = matrix->getDataType(); this->data_alignment = matrix->getDataAlignment(); this->pinned = matrix->getPinned(); this->initMatrix(matrix->getRowCount(), matrix->getColumnCount(), matrix->getLevelCount()); //copy data //memcpy(this->data, matrix->getDataPtr(), matrix->getRowCount()* matrix->getColumnCount() * matrix->getLevelCount() * sizeof(T)); //copy data for(unsigned int k=0;k<this->getLevelCount();k++) { for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j,k, matrix->getData(i,j,k) ); } } } } Matrix3(const Matrix3<T>& matrix) { this->m_columns = 0; this->m_rows = 0; this->m_levels = 0; this->data = NULL; this->data_type = matrix.getDataType(); this->data_alignment = matrix.getDataAlignment(); this->pinned = matrix.getPinned(); this->initMatrix(matrix.getRowCount(), matrix.getColumnCount(), matrix.getLevelCount()); //copy data //memcpy(this->data, matrix.getDataPtr(), matrix.getRowCount()* matrix.getColumnCount() * matrix.getLevelCount() * sizeof(T)); //copy data for(unsigned int k=0;k<this->getLevelCount();k++) { for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j,k, matrix.getData(i,j,k) ); } } } } virtual ~Matrix3() { if(this->pinned == false) { DELETE_ARRAY_NULL_CHECKING(this->data); } else { if(this->data != NULL) { munlock(this->data,sizeof(T)*this->m_columns*this->m_rows*this->m_levels); delete this->data; } } } /** * Keep in mind that the [][] approach induces an overhead since for each * request a new object will be instantiated. **/ inline const Matrix3RowPointer operator[](int row) const { return Matrix3RowPointer(this,row); } Matrix3<T>& operator =(const Matrix3<T>& matrix) { this->m_columns = 0; this->m_rows = 0; this->m_levels = 0; DELETE_ARRAY_NULL_CHECKING(this->data); this->data_type = matrix.getDataType(); this->data_alignment = matrix.getDataAlignment(); this->pinned = matrix.getPinned(); this->initMatrix(matrix.getRowCount(), matrix.getColumnCount(), matrix.getLevelCount()); //copy data //memcpy(this->data, matrix.getDataPtr(), matrix.getRowCount()* matrix.getColumnCount() * matrix.getLevelCount() * sizeof(T)); for(unsigned int k=0;k<this->getLevelCount();k++) { for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j,k, const_cast<const T&>(matrix.getData(i,j,k)) ); } } } return *this; } Matrix3<T>& operator =(const Matrix3<T>* matrix) { this->m_columns = 0; this->m_rows = 0; this->m_levels = 0; DELETE_ARRAY_NULL_CHECKING(this->data); this->data_type = matrix->getDataType(); this->data_alignment = matrix->getDataAlignment(); this->pinned = matrix->getPinned(); this->initMatrix(matrix->getRowCount(), matrix->getColumnCount(), matrix->getLevelCount()); //copy data //memcpy(this->data, matrix->getDataPtr(), matrix->getRowCount()* matrix->getColumnCount() * matrix->getLevelCount() * sizeof(T)); for(unsigned int k=0;k<this->getLevelCount();k++) { for(unsigned int i=0;i<this->getRowCount();i++) { for(unsigned int j=0;j<this->getColumnCount();j++) { this->setData(i,j,k, const_cast<const T&>(matrix->getData(i,j,k)) ); } } } return *this; } bool getPinned() const { return this->pinned; } CLASS_RTTI(Lazarus::Matrix3) void serialize() { registerElement<unsigned int>(3); registerElement<enum MATRIX_DATA_ALIGNMENT>(1); registerElement<enum SH_DATA_TYPE>(1); registerUCharA(m_rows*m_columns*m_levels*sizeof(T)); allocateBuffer(); addElement<enum MATRIX_DATA_ALIGNMENT>(data_alignment); addElement<enum SH_DATA_TYPE>(data_type); addUCharA((unsigned char*)data,m_rows*m_columns*m_levels*sizeof(T)); addElement<unsigned int>(m_rows); addElement<unsigned int>(m_columns); addElement<unsigned int>(m_levels); } void deserialize() { //free any data DELETE_NULL_CHECKING(data); m_levels = getElement<unsigned int>(); m_columns = getElement<unsigned int>(); m_rows = getElement<unsigned int>(); unsigned long long size; data = (T*)getUCharA(size); data_type = getElement<enum SH_DATA_TYPE>(); data_alignment = getElement<enum MATRIX_DATA_ALIGNMENT>(); resetBuffer(); } /** * On a call to this method the matrix will be resized to the specified dimensions. Any previously * allocated internal memory will be deleted, keep in mind that in case of pointers the corresponding * objects won't be deleted. Another important fact is that this method merely resizes the matrix, * the "slots" won't be initialized to any default value. */ void initMatrix(int rows, int columns, int levels) { if(rows == 0 || columns == 0 || levels == 0) { return; } if(this->pinned == false) { // release memory if already allocated DELETE_ARRAY_NULL_CHECKING(this->data); // reserve memory this->data = new T[rows*columns*levels]; this->m_rows = rows; this->m_columns = columns; this->m_levels = levels; } else { // release memory if already allocated if(this->data != NULL) { munlock(this->data,sizeof(T)*this->m_columns*this->m_rows*this->m_levels); delete this->data; } // reserve memory and lock it this->data = new T[rows*columns*levels]; mlock(this->data,sizeof(T)*rows*columns*levels); this->m_rows = rows; this->m_columns = columns; this->m_levels = levels; } resetMatrix(); } void resetMatrix() { //memset(this->data,0,sizeof(T)*this->m_columns*this->m_rows*this->m_levels); globalSetMatrixVal(T()); } /** * This is a fast method for setting the initial value */ void globalSetMatrix(unsigned char val) { memset(this->data,val,sizeof(T)*this->m_columns*this->m_rows*this->m_levels); } /** * This is a relatively slow method for setting the initial value */ void globalSetMatrixVal(T val) { for(unsigned int i=0; i < this->m_columns * this->m_rows * this->m_levels; ++i) { this->data[i] = val; } } /** * Returns the specified submatrix * */ Matrix3<T>* getSubMatrix(unsigned int start_row, unsigned int start_column, unsigned int start_level, unsigned int rows, unsigned int columns, unsigned int levels) { Matrix3<T>* output = new Matrix3<T>(this->data_alignment); output->initMatrix(rows,columns,levels); //copy the data if(this->data_alignment == Matrix::MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { for(unsigned int k=0;k<levels;++k) { for(unsigned int i=0;i<rows;++i) { for(unsigned int j=0;j<columns;++j) { output->setData(i,j,k, this->getData(start_row+i,start_column+j,start_level+k)); } } } } else { for(unsigned int k=0;k<levels;++k) { for(unsigned int j=0;j<columns;++j) { for(unsigned int i=0;i<rows;++i) { output->setData(i,j,k, this->getData(start_row+i,start_column+j,start_level+k)); } } } } return output; } /** * Returns a copy of the matrix with added levels, rows and columns, each matrix element is set to val. * */ Matrix3<T>* getPaddedMatrix(unsigned int levels, unsigned int rows, unsigned int columns, const T& val) { Matrix3<T>* output = new Matrix3<T>(this->data_alignment); output->initMatrix(this->m_rows + rows,this->m_columns + columns, this->m_levels + levels); output->globalSetMatrixVal(val); //copy the data if(this->data_alignment == Matrix::MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { for(unsigned int k=0;k< this->m_levels;++k) { for(unsigned int i=0;i<this->m_rows;++i) { for(unsigned int j=0;j<this->m_columns;++j) { output->setData(i, j, k, this->getData(i,j,k)); } } } } else { for(unsigned int k=0;k< this->m_levels;++k) { for(unsigned int j=0;j<this->m_columns;++j) { for(unsigned int i=0;i<this->m_rows;++i) { output->setData(i, j, k, this->getData(i,j,k)); } } } } return output; } void setData(unsigned int i, unsigned int j, unsigned int k, const T& value) { if(i>=m_rows) { printf("ERROR: setData row %d not available\n",i); return ; } if(j>=m_columns) { printf("ERROR: setData column %d not available\n",j); return ; } if(k>=m_columns) { printf("ERROR: setData level %d not available\n",k); return ; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)] = value; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)] = value; } } inline T& getData(unsigned int i, unsigned int j, unsigned int k) const { //T output=0; if(i>=m_rows) { printf("ERROR: getData row %d not available\n",i); return this->data[0]; } if(j>=m_columns) { printf("ERROR: getData column %d not available\n",j); return this->data[0]; } if(k>=m_levels) { printf("ERROR: getData level %d not available\n",k); return this->data[0]; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)]; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)]; } return this->data[0]; //return output; } T& operator () (unsigned int& i, unsigned int& j, unsigned int& k) { //T output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)]; } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { //output = this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->rows,this->columns)]; return this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)]; } return T(); //return output; } inline T* getDataRef(unsigned int i, unsigned int j, unsigned int k) { T* output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { output = &(this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)]); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { output = &(this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)]); } return output; } /* * This method returns a pointer to row i of the underlying data array. * I.e. one can return a pointer to the i-th row of a row-major matrix, * k determines the slice. */ inline T* getRawDataRow(unsigned int i, unsigned int k) { T* output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { printf("ERROR: can not return row reference as the matrix is in column major format\n"); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { output = &(this->data[ k*this->m_columns*this->m_rows + i*this->m_columns ] ); } return output; } /* * This method returns a pointer to column i of the underlying data array. * I.e. one can return a pointer to the i-th column of a column-major matrix, * k determines the slice. */ inline T* getRawDataColumn(unsigned int i, unsigned int k) { T* output=0; if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { printf("ERROR: can not return column reference as the matrix is in row major format\n"); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { output = &(this->data[ k*this->m_columns*this->m_rows + i*this->m_rows ] ); } return output; } T* getDataPtr() const { return this->data; } int getRowCount() const { return this->m_rows; } int getColumnCount() const { return this->m_columns; } int getLevelCount() const { return this->m_levels; } int rows() const { return this->m_rows; } int columns() const { return this->m_columns; } int levels() const { return this->m_levels; } /* * this should only be used for debug purposes and especially for small matrices, thus we don't care * about the switch within each iteration of the inner most for loop. */ void printData() const { for(unsigned int k=0;k<m_levels;++k) { for(unsigned int i=0;i<m_rows;++i) { for(unsigned int j=0;j<m_columns;++j) { switch(this->data_type) { case SH_DATA_TYPE_DOUBLE: printf("%F ", (double)getData(i,j,k)); break; case SH_DATA_TYPE_FLOAT: printf("%f ", (float)getData(i,j,k)); break; case SH_DATA_TYPE_LONG_DOUBLE: printf("%Lf ", (long double)getData(i,j,k)); break; case SH_DATA_TYPE_INT: printf("%d ", (int)getData(i,j,k)); break; case SH_DATA_TYPE_LONGLONG: printf("%lld ", (long long int)getData(i,j,k)); break; case SH_DATA_TYPE_ULONGLONG: printf("%llu ", (unsigned long long)getData(i,j,k)); break; case SH_DATA_TYPE_LONG: printf("%ld ", (long int)getData(i,j,k)); break; case SH_DATA_TYPE_ULONG: printf("%lu ", (unsigned long)getData(i,j,k)); break; case SH_DATA_TYPE_UINT: printf("%u ", (unsigned int)getData(i,j,k)); break; case SH_DATA_TYPE_UCHAR: printf("%c ", (unsigned char)getData(i,j,k)); break; case SH_DATA_TYPE_CHAR: printf("%d ", (char)getData(i,j,k)); break; case SH_DATA_TYPE_SHORT: printf("%hd ", (short)getData(i,j,k)); break; case SH_DATA_TYPE_USHORT: printf("%hu ", (unsigned short)getData(i,j,k)); break; case SH_DATA_TYPE_ABSTRACT: printf("ABSTRACT "); break; case SH_DATA_TYPE_UNSPECIFIED: printf(" UNKNOWN "); break; } } printf("\n"); } printf("\n****************\n"); } } void readMatrixFromFile(const std::string& filename) { setlocale(LC_ALL,"C"); FILE* pFile = fopen(filename.c_str(),"r"); std::string s; //read header and init data structures // from now on we assume a correct format of the input file !! std::string rows=""; std::string columns=""; std::string levels=""; readCSVValue(&pFile,&rows); readCSVValue(&pFile,&columns); readCSVValue(&pFile,&levels); this->initMatrix( Toolbox::stringToInt( rows ),Toolbox::stringToInt( columns ),Toolbox::stringToInt( levels ) ); //load data into matrix for(unsigned int k=0;k<this->m_levels;k++) { for(unsigned int i=0;i<this->m_rows;i++) { for(unsigned int j=0;j<this->m_columns;j++) { readCSVValue(&pFile,&s); //read token #ifdef DEBUG printf("setting number %s \n",s.c_str()); #endif this->setData(i,j,k, Toolbox::stringToDouble( s ) ); } } } fclose(pFile); } void readMatrixFromBinaryFile(const std::string& filename) { FILE* pFile = fopen(filename.c_str(),"rb"); std::string s; //read header and init data structures // from now on we assume a correct format of the input file !! std::string rows=""; std::string columns=""; std::string levels=""; readCSVValue(&pFile,&rows); readCSVValue(&pFile,&columns); readCSVValue(&pFile,&levels); this->initMatrix( Toolbox::stringToInt( rows ),Toolbox::stringToInt( columns ),Toolbox::stringToInt( levels ) ); //load data into matrix fread(this->data, this->m_rows*this->m_columns*this->m_levels,sizeof(T), pFile); fclose(pFile); } void writeMatrixToFile(const std::string& filename) const { if(data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can't save abstract matrix in CSV format, use binary export methods\n"); } setlocale(LC_ALL,"C"); FILE* pFile = fopen(filename.c_str(),"w"); std::string s; //header s = Toolbox::intToString( this->m_rows ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(',',pFile); s = Toolbox::intToString( this->m_columns ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(',',pFile); s = Toolbox::intToString( this->m_levels ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(';',pFile); fputc('\n',pFile); //data for(unsigned int k=0;k<this->m_levels;k++) { for(unsigned int i=0;i<this->m_rows;i++) { for(unsigned int j=0;j<this->m_columns;j++) { if(this->data_alignment == MATRIX_DATA_ALIGNMENT_COLUMN_MAJOR) { s = Toolbox::doubleToString( this->data[IDX2ARRAYCOORDINATE_CM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)] ); } if(this->data_alignment == MATRIX_DATA_ALIGNMENT_ROW_MAJOR) { s = Toolbox::doubleToString( this->data[IDX2ARRAYCOORDINATE_RM_NO_OFFSET_DIM3(i,j,k,this->m_rows,this->m_columns)] ); } fwrite(s.c_str(), s.length(),sizeof(char), pFile); if(j+1<this->columns) fputc(',',pFile); } fputc(';',pFile); fputc('\n',pFile); } } fclose(pFile); } void writeMatrixToBinaryFile(const std::string& filename) const { FILE* pFile = fopen(filename.c_str(),"wb"); std::string s; //header s = Toolbox::intToString( this->m_rows ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(',',pFile); s = Toolbox::intToString( this->m_columns ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(',',pFile); s = Toolbox::intToString( this->m_levels ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); fputc(';',pFile); fputc('\n',pFile); //data fwrite(this->data, this->m_columns*this->m_rows*this->m_levels,sizeof(T), pFile); fclose(pFile); } void exportMatrix(const std::string& filename) { if(data_type == SH_DATA_TYPE_ABSTRACT) { printf("ERROR: can't save abstract matrix in CSV format, use binary export methods\n"); } FILE* pFile = fopen(filename.c_str(),"w"); std::string s; for(unsigned int k=0;k<this->m_levels;k++) { for(unsigned int i=0;i<this->m_rows;i++) { for(unsigned int j=0;j<this->m_columns;j++) { s = Toolbox::doubleToString( (double)getData(i,j,k) ); fwrite(s.c_str(), s.length(),sizeof(char), pFile); if(j+1<this->columns) fputc(',',pFile); } fputc(';',pFile); fputc('\n',pFile); } } fclose(pFile); } void exportMatrixBinary(const std::string& filename) { FILE* pFile = fopen(filename.c_str(),"wb"); //data fwrite(this->data, this->m_columns*this->m_rows*this->m_levels,sizeof(T), pFile); fclose(pFile); } enum MATRIX_DATA_ALIGNMENT getDataAlignment() const { return this->data_alignment; } enum SH_DATA_TYPE getDataType() const { return this->data_type; } protected: int m_rows; int m_columns; int m_levels; T* data; bool pinned; // reads a csv-token, returns false if terminated by , and true if terminated by ; or EOF bool readCSVValue(FILE** pFile, std::string* s) { char c[2]; c[1] = '\0'; *s = ""; //reset string //read chars until , ; or EOF found while(1){ c[0] = fgetc(*pFile); if(c[0]==',') return false; if(c[0]==';' || c[0]==EOF) { fgetc(*pFile); //read ahead one char, i.e. skip line break. Uncomment this if linebreaks are not being used after ; return true; } #ifdef DEBUG printf("appending %s to %s \n",(char*)&c,(*s).c_str()); #endif s->append(c); } } }; } #endif /* MATRIX_H_ */