source
stringlengths
3
92
c
stringlengths
26
2.25M
draw.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD RRRR AAA W W % % D D R R A A W W % % D D RRRR AAAAA W W W % % D D R RN A A WW WW % % DDDD R R A A W W % % % % % % MagickCore Image Drawing Methods % % % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Bill Radcliffe of Corbis (www.corbis.com) contributed the polygon % rendering code based on Paul Heckbert's "Concave Polygon Scan Conversion", % Graphics Gems, 1990. Leonard Rosenthal and David Harr of Appligent % (www.appligent.com) contributed the dash pattern, linecap stroking % algorithm, and minor rendering improvements. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/annotate.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform-private.h" #include "MagickCore/utility.h" /* Define declarations. */ #define BezierQuantum 200 #define DrawEpsilon (1.0e-10) /* Typedef declarations. */ typedef struct _EdgeInfo { SegmentInfo bounds; double scanline; PointInfo *points; size_t number_points; ssize_t direction; MagickBooleanType ghostline; size_t highwater; } EdgeInfo; typedef struct _ElementInfo { double cx, cy, major, minor, angle; } ElementInfo; typedef struct _PolygonInfo { EdgeInfo *edges; size_t number_edges; } PolygonInfo; typedef enum { MoveToCode, OpenCode, GhostlineCode, LineToCode, EndCode } PathInfoCode; typedef struct _PathInfo { PointInfo point; PathInfoCode code; } PathInfo; /* Forward declarations. */ static MagickBooleanType DrawStrokePolygon(Image *,const DrawInfo *,const PrimitiveInfo *, ExceptionInfo *); static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *,const PrimitiveInfo *); static size_t TracePath(PrimitiveInfo *,const char *); static void TraceArc(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo), TraceArcPath(PrimitiveInfo *,const PointInfo,const PointInfo,const PointInfo, const double,const MagickBooleanType,const MagickBooleanType), TraceBezier(PrimitiveInfo *,const size_t), TraceCircle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceEllipse(PrimitiveInfo *,const PointInfo,const PointInfo, const PointInfo), TraceLine(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRectangle(PrimitiveInfo *,const PointInfo,const PointInfo), TraceRoundRectangle(PrimitiveInfo *,const PointInfo,const PointInfo, PointInfo), TraceSquareLinecap(PrimitiveInfo *,const size_t,const double); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireDrawInfo() returns a DrawInfo structure properly initialized. % % The format of the AcquireDrawInfo method is: % % DrawInfo *AcquireDrawInfo(void) % */ MagickExport DrawInfo *AcquireDrawInfo(void) { DrawInfo *draw_info; draw_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*draw_info)); GetDrawInfo((ImageInfo *) NULL,draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneDrawInfo() makes a copy of the given draw_info structure. If NULL % is specified, a new DrawInfo structure is created initialized to default % values. % % The format of the CloneDrawInfo method is: % % DrawInfo *CloneDrawInfo(const ImageInfo *image_info, % const DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info. % % o draw_info: the draw info. % */ MagickExport DrawInfo *CloneDrawInfo(const ImageInfo *image_info, const DrawInfo *draw_info) { DrawInfo *clone_info; ExceptionInfo *exception; clone_info=(DrawInfo *) AcquireCriticalMemory(sizeof(*clone_info)); GetDrawInfo(image_info,clone_info); if (draw_info == (DrawInfo *) NULL) return(clone_info); exception=AcquireExceptionInfo(); if (clone_info->primitive != (char *) NULL) (void) CloneString(&clone_info->primitive,draw_info->primitive); if (draw_info->geometry != (char *) NULL) (void) CloneString(&clone_info->geometry,draw_info->geometry); clone_info->viewbox=draw_info->viewbox; clone_info->affine=draw_info->affine; clone_info->gravity=draw_info->gravity; clone_info->fill=draw_info->fill; clone_info->stroke=draw_info->stroke; clone_info->stroke_width=draw_info->stroke_width; if (draw_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(draw_info->fill_pattern,0,0,MagickTrue, exception); if (draw_info->stroke_pattern != (Image *) NULL) clone_info->stroke_pattern=CloneImage(draw_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke_antialias=draw_info->stroke_antialias; clone_info->text_antialias=draw_info->text_antialias; clone_info->fill_rule=draw_info->fill_rule; clone_info->linecap=draw_info->linecap; clone_info->linejoin=draw_info->linejoin; clone_info->miterlimit=draw_info->miterlimit; clone_info->dash_offset=draw_info->dash_offset; clone_info->decorate=draw_info->decorate; clone_info->compose=draw_info->compose; if (draw_info->text != (char *) NULL) (void) CloneString(&clone_info->text,draw_info->text); if (draw_info->font != (char *) NULL) (void) CloneString(&clone_info->font,draw_info->font); if (draw_info->metrics != (char *) NULL) (void) CloneString(&clone_info->metrics,draw_info->metrics); if (draw_info->family != (char *) NULL) (void) CloneString(&clone_info->family,draw_info->family); clone_info->style=draw_info->style; clone_info->stretch=draw_info->stretch; clone_info->weight=draw_info->weight; if (draw_info->encoding != (char *) NULL) (void) CloneString(&clone_info->encoding,draw_info->encoding); clone_info->pointsize=draw_info->pointsize; clone_info->kerning=draw_info->kerning; clone_info->interline_spacing=draw_info->interline_spacing; clone_info->interword_spacing=draw_info->interword_spacing; clone_info->direction=draw_info->direction; if (draw_info->density != (char *) NULL) (void) CloneString(&clone_info->density,draw_info->density); clone_info->align=draw_info->align; clone_info->undercolor=draw_info->undercolor; clone_info->border_color=draw_info->border_color; if (draw_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) { register ssize_t x; for (x=0; fabs(draw_info->dash_pattern[x]) >= DrawEpsilon; x++) ; clone_info->dash_pattern=(double *) AcquireQuantumMemory((size_t) x+1UL, sizeof(*clone_info->dash_pattern)); if (clone_info->dash_pattern == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) CopyMagickMemory(clone_info->dash_pattern,draw_info->dash_pattern, (size_t) (x+1)*sizeof(*clone_info->dash_pattern)); } clone_info->gradient=draw_info->gradient; if (draw_info->gradient.stops != (StopInfo *) NULL) { size_t number_stops; number_stops=clone_info->gradient.number_stops; clone_info->gradient.stops=(StopInfo *) AcquireQuantumMemory((size_t) number_stops,sizeof(*clone_info->gradient.stops)); if (clone_info->gradient.stops == (StopInfo *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDashPattern"); (void) CopyMagickMemory(clone_info->gradient.stops, draw_info->gradient.stops,(size_t) number_stops* sizeof(*clone_info->gradient.stops)); } if (draw_info->clip_mask != (char *) NULL) (void) CloneString(&clone_info->clip_mask,draw_info->clip_mask); clone_info->bounds=draw_info->bounds; clone_info->clip_units=draw_info->clip_units; clone_info->render=draw_info->render; clone_info->fill_alpha=draw_info->fill_alpha; clone_info->stroke_alpha=draw_info->stroke_alpha; clone_info->element_reference=draw_info->element_reference; clone_info->debug=IsEventLogging(); exception=DestroyExceptionInfo(exception); return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P a t h T o P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPathToPolygon() converts a path to the more efficient sorted % rendering form. % % The format of the ConvertPathToPolygon method is: % % PolygonInfo *ConvertPathToPolygon(const DrawInfo *draw_info, % const PathInfo *path_info) % % A description of each parameter follows: % % o Method ConvertPathToPolygon returns the path in a more efficient sorted % rendering form of type PolygonInfo. % % o draw_info: Specifies a pointer to an DrawInfo structure. % % o path_info: Specifies a pointer to an PathInfo structure. % % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int CompareEdges(const void *x,const void *y) { register const EdgeInfo *p, *q; /* Compare two edges. */ p=(const EdgeInfo *) x; q=(const EdgeInfo *) y; if ((p->points[0].y-DrawEpsilon) > q->points[0].y) return(1); if ((p->points[0].y+DrawEpsilon) < q->points[0].y) return(-1); if ((p->points[0].x-DrawEpsilon) > q->points[0].x) return(1); if ((p->points[0].x+DrawEpsilon) < q->points[0].x) return(-1); if (((p->points[1].x-p->points[0].x)*(q->points[1].y-q->points[0].y)- (p->points[1].y-p->points[0].y)*(q->points[1].x-q->points[0].x)) > 0.0) return(1); return(-1); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static void LogPolygonInfo(const PolygonInfo *polygon_info) { register EdgeInfo *p; register ssize_t i, j; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin active-edge"); p=polygon_info->edges; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { (void) LogMagickEvent(DrawEvent,GetMagickModule()," edge %.20g:", (double) i); (void) LogMagickEvent(DrawEvent,GetMagickModule()," direction: %s", p->direction != MagickFalse ? "down" : "up"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," ghostline: %s", p->ghostline != MagickFalse ? "transparent" : "opaque"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " bounds: %g,%g - %g,%g",p->bounds.x1,p->bounds.y1, p->bounds.x2,p->bounds.y2); for (j=0; j < (ssize_t) p->number_points; j++) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %g,%g", p->points[j].x,p->points[j].y); p++; } (void) LogMagickEvent(DrawEvent,GetMagickModule()," end active-edge"); } static void ReversePoints(PointInfo *points,const size_t number_points) { PointInfo point; register ssize_t i; for (i=0; i < (ssize_t) (number_points >> 1); i++) { point=points[i]; points[i]=points[number_points-(i+1)]; points[number_points-(i+1)]=point; } } static PolygonInfo *ConvertPathToPolygon(const PathInfo *path_info) { long direction, next_direction; PointInfo point, *points; PolygonInfo *polygon_info; SegmentInfo bounds; register ssize_t i, n; MagickBooleanType ghostline; size_t edge, number_edges, number_points; /* Convert a path to the more efficient sorted rendering form. */ polygon_info=(PolygonInfo *) AcquireMagickMemory(sizeof(*polygon_info)); if (polygon_info == (PolygonInfo *) NULL) return((PolygonInfo *) NULL); number_edges=16; polygon_info->edges=(EdgeInfo *) AcquireQuantumMemory(number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); (void) ResetMagickMemory(polygon_info->edges,0,number_edges* sizeof(*polygon_info->edges)); direction=0; edge=0; ghostline=MagickFalse; n=0; number_points=0; points=(PointInfo *) NULL; (void) ResetMagickMemory(&point,0,sizeof(point)); (void) ResetMagickMemory(&bounds,0,sizeof(bounds)); for (i=0; path_info[i].code != EndCode; i++) { if ((path_info[i].code == MoveToCode) || (path_info[i].code == OpenCode) || (path_info[i].code == GhostlineCode)) { /* Move to. */ if ((points != (PointInfo *) NULL) && (n >= 2)) { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; points=(PointInfo *) NULL; ghostline=MagickFalse; edge++; } if (points == (PointInfo *) NULL) { number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } ghostline=path_info[i].code == GhostlineCode ? MagickTrue : MagickFalse; point=path_info[i].point; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; direction=0; n=1; continue; } /* Line to. */ next_direction=((path_info[i].point.y > point.y) || ((fabs(path_info[i].point.y-point.y) < DrawEpsilon) && (path_info[i].point.x > point.x))) ? 1 : -1; if ((points != (PointInfo *) NULL) && (direction != 0) && (direction != next_direction)) { /* New edge. */ point=points[n-1]; if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; number_points=16; points=(PointInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); n=1; ghostline=MagickFalse; points[0]=point; bounds.x1=point.x; bounds.x2=point.x; edge++; } direction=next_direction; if (points == (PointInfo *) NULL) continue; if (n == (ssize_t) number_points) { number_points<<=1; points=(PointInfo *) ResizeQuantumMemory(points,(size_t) number_points, sizeof(*points)); if (points == (PointInfo *) NULL) return((PolygonInfo *) NULL); } point=path_info[i].point; points[n]=point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.x > bounds.x2) bounds.x2=point.x; n++; } if (points != (PointInfo *) NULL) { if (n < 2) points=(PointInfo *) RelinquishMagickMemory(points); else { if (edge == number_edges) { number_edges<<=1; polygon_info->edges=(EdgeInfo *) ResizeQuantumMemory( polygon_info->edges,(size_t) number_edges, sizeof(*polygon_info->edges)); if (polygon_info->edges == (EdgeInfo *) NULL) return((PolygonInfo *) NULL); } polygon_info->edges[edge].number_points=(size_t) n; polygon_info->edges[edge].scanline=(-1.0); polygon_info->edges[edge].highwater=0; polygon_info->edges[edge].ghostline=ghostline; polygon_info->edges[edge].direction=(ssize_t) (direction > 0); if (direction < 0) ReversePoints(points,(size_t) n); polygon_info->edges[edge].points=points; polygon_info->edges[edge].bounds=bounds; polygon_info->edges[edge].bounds.y1=points[0].y; polygon_info->edges[edge].bounds.y2=points[n-1].y; ghostline=MagickFalse; edge++; } } polygon_info->number_edges=edge; qsort(polygon_info->edges,(size_t) polygon_info->number_edges, sizeof(*polygon_info->edges),CompareEdges); if (IsEventLogging() != MagickFalse) LogPolygonInfo(polygon_info); return(polygon_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n v e r t P r i m i t i v e T o P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConvertPrimitiveToPath() converts a PrimitiveInfo structure into a vector % path structure. % % The format of the ConvertPrimitiveToPath method is: % % PathInfo *ConvertPrimitiveToPath(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o Method ConvertPrimitiveToPath returns a vector path structure of type % PathInfo. % % o draw_info: a structure of type DrawInfo. % % o primitive_info: Specifies a pointer to an PrimitiveInfo structure. % % */ static void LogPathInfo(const PathInfo *path_info) { register const PathInfo *p; (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin vector-path"); for (p=path_info; p->code != EndCode; p++) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %g,%g %s",p->point.x,p->point.y,p->code == GhostlineCode ? "moveto ghostline" : p->code == OpenCode ? "moveto open" : p->code == MoveToCode ? "moveto" : p->code == LineToCode ? "lineto" : "?"); (void) LogMagickEvent(DrawEvent,GetMagickModule()," end vector-path"); } static PathInfo *ConvertPrimitiveToPath(const PrimitiveInfo *primitive_info) { PathInfo *path_info; PathInfoCode code; PointInfo p, q; register ssize_t i, n; ssize_t coordinates, start; /* Converts a PrimitiveInfo structure into a vector path structure. */ switch (primitive_info->primitive) { case AlphaPrimitive: case ColorPrimitive: case ImagePrimitive: case PointPrimitive: case TextPrimitive: return((PathInfo *) NULL); default: break; } for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; path_info=(PathInfo *) AcquireQuantumMemory((size_t) (2UL*i+3UL), sizeof(*path_info)); if (path_info == (PathInfo *) NULL) return((PathInfo *) NULL); coordinates=0; n=0; p.x=(-1.0); p.y=(-1.0); q.x=(-1.0); q.y=(-1.0); start=0; for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { code=LineToCode; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; p=primitive_info[i].point; start=n; code=MoveToCode; } coordinates--; /* Eliminate duplicate points. */ if ((i == 0) || (fabs(q.x-primitive_info[i].point.x) >= DrawEpsilon) || (fabs(q.y-primitive_info[i].point.y) >= DrawEpsilon)) { path_info[n].code=code; path_info[n].point=primitive_info[i].point; q=primitive_info[i].point; n++; } if (coordinates > 0) continue; if ((fabs(p.x-primitive_info[i].point.x) < DrawEpsilon) && (fabs(p.y-primitive_info[i].point.y) < DrawEpsilon)) continue; /* Mark the p point as open if it does not match the q. */ path_info[start].code=OpenCode; path_info[n].code=GhostlineCode; path_info[n].point=primitive_info[i].point; n++; path_info[n].code=LineToCode; path_info[n].point=p; n++; } path_info[n].code=EndCode; path_info[n].point.x=0.0; path_info[n].point.y=0.0; if (IsEventLogging() != MagickFalse) LogPathInfo(path_info); return(path_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyDrawInfo() deallocates memory associated with an DrawInfo % structure. % % The format of the DestroyDrawInfo method is: % % DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) % % A description of each parameter follows: % % o draw_info: the draw info. % */ MagickExport DrawInfo *DestroyDrawInfo(DrawInfo *draw_info) { if (draw_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (draw_info->primitive != (char *) NULL) draw_info->primitive=DestroyString(draw_info->primitive); if (draw_info->text != (char *) NULL) draw_info->text=DestroyString(draw_info->text); if (draw_info->geometry != (char *) NULL) draw_info->geometry=DestroyString(draw_info->geometry); if (draw_info->fill_pattern != (Image *) NULL) draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern); if (draw_info->stroke_pattern != (Image *) NULL) draw_info->stroke_pattern=DestroyImage(draw_info->stroke_pattern); if (draw_info->font != (char *) NULL) draw_info->font=DestroyString(draw_info->font); if (draw_info->metrics != (char *) NULL) draw_info->metrics=DestroyString(draw_info->metrics); if (draw_info->family != (char *) NULL) draw_info->family=DestroyString(draw_info->family); if (draw_info->encoding != (char *) NULL) draw_info->encoding=DestroyString(draw_info->encoding); if (draw_info->density != (char *) NULL) draw_info->density=DestroyString(draw_info->density); if (draw_info->server_name != (char *) NULL) draw_info->server_name=(char *) RelinquishMagickMemory(draw_info->server_name); if (draw_info->dash_pattern != (double *) NULL) draw_info->dash_pattern=(double *) RelinquishMagickMemory( draw_info->dash_pattern); if (draw_info->gradient.stops != (StopInfo *) NULL) draw_info->gradient.stops=(StopInfo *) RelinquishMagickMemory( draw_info->gradient.stops); if (draw_info->clip_mask != (char *) NULL) draw_info->clip_mask=DestroyString(draw_info->clip_mask); draw_info->signature=(~MagickCoreSignature); draw_info=(DrawInfo *) RelinquishMagickMemory(draw_info); return(draw_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y E d g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyEdge() destroys the specified polygon edge. % % The format of the DestroyEdge method is: % % ssize_t DestroyEdge(PolygonInfo *polygon_info,const int edge) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % % o edge: the polygon edge number to destroy. % */ static size_t DestroyEdge(PolygonInfo *polygon_info, const size_t edge) { assert(edge < polygon_info->number_edges); polygon_info->edges[edge].points=(PointInfo *) RelinquishMagickMemory( polygon_info->edges[edge].points); polygon_info->number_edges--; if (edge < polygon_info->number_edges) (void) CopyMagickMemory(polygon_info->edges+edge,polygon_info->edges+edge+1, (size_t) (polygon_info->number_edges-edge)*sizeof(*polygon_info->edges)); return(polygon_info->number_edges); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P o l y g o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPolygonInfo() destroys the PolygonInfo data structure. % % The format of the DestroyPolygonInfo method is: % % PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) % % A description of each parameter follows: % % o polygon_info: Specifies a pointer to an PolygonInfo structure. % */ static PolygonInfo *DestroyPolygonInfo(PolygonInfo *polygon_info) { register ssize_t i; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) polygon_info->edges[i].points=(PointInfo *) RelinquishMagickMemory(polygon_info->edges[i].points); polygon_info->edges=(EdgeInfo *) RelinquishMagickMemory(polygon_info->edges); return((PolygonInfo *) RelinquishMagickMemory(polygon_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w A f f i n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawAffineImage() composites the source over the destination image as % dictated by the affine transform. % % The format of the DrawAffineImage method is: % % MagickBooleanType DrawAffineImage(Image *image,const Image *source, % const AffineMatrix *affine,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o source: the source image. % % o affine: the affine transform. % % o exception: return any errors or warnings in this structure. % */ static SegmentInfo AffineEdge(const Image *image,const AffineMatrix *affine, const double y,const SegmentInfo *edge) { double intercept, z; register double x; SegmentInfo inverse_edge; /* Determine left and right edges. */ inverse_edge.x1=edge->x1; inverse_edge.y1=edge->y1; inverse_edge.x2=edge->x2; inverse_edge.y2=edge->y2; z=affine->ry*y+affine->tx; if (affine->sx >= DrawEpsilon) { intercept=(-z/affine->sx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->sx < -DrawEpsilon) { intercept=(-z+(double) image->columns)/affine->sx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->sx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->columns)) { inverse_edge.x2=edge->x1; return(inverse_edge); } /* Determine top and bottom edges. */ z=affine->sy*y+affine->ty; if (affine->rx >= DrawEpsilon) { intercept=(-z/affine->rx); x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if (affine->rx < -DrawEpsilon) { intercept=(-z+(double) image->rows)/affine->rx; x=intercept; if (x > inverse_edge.x1) inverse_edge.x1=x; intercept=(-z/affine->rx); x=intercept; if (x < inverse_edge.x2) inverse_edge.x2=x; } else if ((z < 0.0) || ((size_t) floor(z+0.5) >= image->rows)) { inverse_edge.x2=edge->x2; return(inverse_edge); } return(inverse_edge); } static AffineMatrix InverseAffineMatrix(const AffineMatrix *affine) { AffineMatrix inverse_affine; double determinant; determinant=PerceptibleReciprocal(affine->sx*affine->sy-affine->rx* affine->ry); inverse_affine.sx=determinant*affine->sy; inverse_affine.rx=determinant*(-affine->rx); inverse_affine.ry=determinant*(-affine->ry); inverse_affine.sy=determinant*affine->sx; inverse_affine.tx=(-affine->tx)*inverse_affine.sx-affine->ty* inverse_affine.ry; inverse_affine.ty=(-affine->tx)*inverse_affine.rx-affine->ty* inverse_affine.sy; return(inverse_affine); } MagickExport MagickBooleanType DrawAffineImage(Image *image, const Image *source,const AffineMatrix *affine,ExceptionInfo *exception) { AffineMatrix inverse_affine; CacheView *image_view, *source_view; MagickBooleanType status; PixelInfo zero; PointInfo extent[4], min, max; register ssize_t i; SegmentInfo edge; ssize_t start, stop, y; /* Determine bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(source != (const Image *) NULL); assert(source->signature == MagickCoreSignature); assert(affine != (AffineMatrix *) NULL); extent[0].x=0.0; extent[0].y=0.0; extent[1].x=(double) source->columns-1.0; extent[1].y=0.0; extent[2].x=(double) source->columns-1.0; extent[2].y=(double) source->rows-1.0; extent[3].x=0.0; extent[3].y=(double) source->rows-1.0; for (i=0; i < 4; i++) { PointInfo point; point=extent[i]; extent[i].x=point.x*affine->sx+point.y*affine->ry+affine->tx; extent[i].y=point.x*affine->rx+point.y*affine->sy+affine->ty; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } /* Affine transform image. */ if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; edge.x1=MagickMax(min.x,0.0); edge.y1=MagickMax(min.y,0.0); edge.x2=MagickMin(max.x,(double) image->columns-1.0); edge.y2=MagickMin(max.y,(double) image->rows-1.0); inverse_affine=InverseAffineMatrix(affine); GetPixelInfo(image,&zero); start=(ssize_t) ceil(edge.y1-0.5); stop=(ssize_t) floor(edge.y2+0.5); source_view=AcquireVirtualCacheView(source,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(source,image,stop-start,1) #endif for (y=start; y <= stop; y++) { PixelInfo composite, pixel; PointInfo point; register ssize_t x; register Quantum *magick_restrict q; SegmentInfo inverse_edge; ssize_t x_offset; inverse_edge=AffineEdge(source,&inverse_affine,(double) y,&edge); if (inverse_edge.x2 < inverse_edge.x1) continue; q=GetCacheViewAuthenticPixels(image_view,(ssize_t) ceil(inverse_edge.x1- 0.5),y,(size_t) (floor(inverse_edge.x2+0.5)-ceil(inverse_edge.x1-0.5)+1), 1,exception); if (q == (Quantum *) NULL) continue; pixel=zero; composite=zero; x_offset=0; for (x=(ssize_t) ceil(inverse_edge.x1-0.5); x <= (ssize_t) floor(inverse_edge.x2+0.5); x++) { point.x=(double) x*inverse_affine.sx+y*inverse_affine.ry+ inverse_affine.tx; point.y=(double) x*inverse_affine.rx+y*inverse_affine.sy+ inverse_affine.ty; status=InterpolatePixelInfo(source,source_view,UndefinedInterpolatePixel, point.x,point.y,&pixel,exception); if (status == MagickFalse) break; GetPixelInfoPixel(image,q,&composite); CompositePixelInfoOver(&pixel,pixel.alpha,&composite,composite.alpha, &composite); SetPixelViaPixelInfo(image,&composite,q); x_offset++; q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w B o u n d i n g R e c t a n g l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawBoundingRectangles() draws the bounding rectangles on the image. This % is only useful for developers debugging the rendering algorithm. % % The format of the DrawBoundingRectangles method is: % % void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info, % PolygonInfo *polygon_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o polygon_info: Specifies a pointer to a PolygonInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void DrawBoundingRectangles(Image *image,const DrawInfo *draw_info, const PolygonInfo *polygon_info,ExceptionInfo *exception) { DrawInfo *clone_info; double mid; PointInfo end, resolution, start; PrimitiveInfo primitive_info[6]; register ssize_t i; SegmentInfo bounds; ssize_t coordinates; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) QueryColorCompliance("#000F",AllCompliance,&clone_info->fill, exception); resolution.x=96.0; resolution.y=96.0; if (clone_info->density != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; flags=ParseGeometry(clone_info->density,&geometry_info); resolution.x=geometry_info.rho; resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == MagickFalse) resolution.y=resolution.x; } mid=(resolution.x/96.0)*ExpandAffine(&clone_info->affine)* clone_info->stroke_width/2.0; bounds.x1=0.0; bounds.y1=0.0; bounds.x2=0.0; bounds.y2=0.0; if (polygon_info != (PolygonInfo *) NULL) { bounds=polygon_info->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].bounds.x1 < (double) bounds.x1) bounds.x1=polygon_info->edges[i].bounds.x1; if (polygon_info->edges[i].bounds.y1 < (double) bounds.y1) bounds.y1=polygon_info->edges[i].bounds.y1; if (polygon_info->edges[i].bounds.x2 > (double) bounds.x2) bounds.x2=polygon_info->edges[i].bounds.x2; if (polygon_info->edges[i].bounds.y2 > (double) bounds.y2) bounds.y2=polygon_info->edges[i].bounds.y2; } bounds.x1-=mid; bounds.x1=bounds.x1 < 0.0 ? 0.0 : bounds.x1 >= (double) image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=mid; bounds.y1=bounds.y1 < 0.0 ? 0.0 : bounds.y1 >= (double) image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=mid; bounds.x2=bounds.x2 < 0.0 ? 0.0 : bounds.x2 >= (double) image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=mid; bounds.y2=bounds.y2 < 0.0 ? 0.0 : bounds.y2 >= (double) image->rows ? (double) image->rows-1 : bounds.y2; for (i=0; i < (ssize_t) polygon_info->number_edges; i++) { if (polygon_info->edges[i].direction != 0) (void) QueryColorCompliance("red",AllCompliance,&clone_info->stroke, exception); else (void) QueryColorCompliance("green",AllCompliance,&clone_info->stroke, exception); start.x=(double) (polygon_info->edges[i].bounds.x1-mid); start.y=(double) (polygon_info->edges[i].bounds.y1-mid); end.x=(double) (polygon_info->edges[i].bounds.x2+mid); end.y=(double) (polygon_info->edges[i].bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; (void) DrawPrimitive(image,clone_info,primitive_info,exception); } } (void) QueryColorCompliance("blue",AllCompliance,&clone_info->stroke, exception); start.x=(double) (bounds.x1-mid); start.y=(double) (bounds.y1-mid); end.x=(double) (bounds.x2+mid); end.y=(double) (bounds.y2+mid); primitive_info[0].primitive=RectanglePrimitive; TraceRectangle(primitive_info,start,end); primitive_info[0].method=ReplaceMethod; coordinates=(ssize_t) primitive_info[0].coordinates; primitive_info[coordinates].primitive=UndefinedPrimitive; (void) DrawPrimitive(image,clone_info,primitive_info,exception); clone_info=DestroyDrawInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w C l i p P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawClipPath() draws the clip path on the image mask. % % The format of the DrawClipPath method is: % % MagickBooleanType DrawClipPath(Image *image,const DrawInfo *draw_info, % const char *name,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the name of the clip path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawClipPath(Image *image, const DrawInfo *draw_info,const char *name,ExceptionInfo *exception) { char filename[MagickPathExtent]; Image *clip_mask; const char *value; DrawInfo *clone_info; MagickStatusType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); (void) FormatLocaleString(filename,MagickPathExtent,"%s",name); value=GetImageArtifact(image,filename); if (value == (const char *) NULL) return(MagickFalse); clip_mask=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (clip_mask == (Image *) NULL) return(MagickFalse); (void) QueryColorCompliance("#0000",AllCompliance, &clip_mask->background_color,exception); clip_mask->background_color.alpha=(MagickRealType) TransparentAlpha; (void) SetImageBackgroundColor(clip_mask,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"\nbegin clip-path %s", draw_info->clip_mask); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->primitive,value); (void) QueryColorCompliance("#ffffff",AllCompliance,&clone_info->fill, exception); clone_info->clip_mask=(char *) NULL; status=NegateImage(clip_mask,MagickFalse,exception); (void) SetImageMask(image,ReadPixelMask,clip_mask,exception); clip_mask=DestroyImage(clip_mask); status&=DrawImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end clip-path"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w D a s h P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawDashPolygon() draws a dashed polygon (line, rectangle, ellipse) on the % image while respecting the dash offset and dash pattern attributes. % % The format of the DrawDashPolygon method is: % % MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, % const PrimitiveInfo *primitive_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType DrawDashPolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,Image *image,ExceptionInfo *exception) { DrawInfo *clone_info; double length, maximum_length, offset, scale, total_length; MagickStatusType status; PrimitiveInfo *dash_polygon; register ssize_t i; register double dx, dy; size_t number_vertices; ssize_t j, n; assert(draw_info != (const DrawInfo *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-dash"); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; number_vertices=(size_t) i; dash_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (2UL*number_vertices+1UL),sizeof(*dash_polygon)); if (dash_polygon == (PrimitiveInfo *) NULL) return(MagickFalse); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->miterlimit=0; dash_polygon[0]=primitive_info[0]; scale=ExpandAffine(&draw_info->affine); length=scale*(draw_info->dash_pattern[0]-0.5); offset=fabs(draw_info->dash_offset) >= DrawEpsilon ? scale*draw_info->dash_offset : 0.0; j=1; for (n=0; offset > 0.0; j=0) { if (draw_info->dash_pattern[n] <= 0.0) break; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); if (offset > length) { offset-=length; n++; length=scale*(draw_info->dash_pattern[n]+0.5); continue; } if (offset < length) { length-=offset; offset=0.0; break; } offset=0.0; n++; } status=MagickTrue; maximum_length=0.0; total_length=0.0; for (i=1; (i < (ssize_t) number_vertices) && (length >= 0.0); i++) { dx=primitive_info[i].point.x-primitive_info[i-1].point.x; dy=primitive_info[i].point.y-primitive_info[i-1].point.y; maximum_length=hypot((double) dx,dy); if (fabs(length) < DrawEpsilon) { n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n=0; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); } for (total_length=0.0; (length >= 0.0) && (maximum_length >= (total_length+length)); ) { total_length+=length; if ((n & 0x01) != 0) { dash_polygon[0]=primitive_info[0]; dash_polygon[0].point.x=(double) (primitive_info[i-1].point.x+dx* total_length/maximum_length); dash_polygon[0].point.y=(double) (primitive_info[i-1].point.y+dy* total_length/maximum_length); j=1; } else { if ((j+1) > (ssize_t) (2*number_vertices)) break; dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x=(double) (primitive_info[i-1].point.x+dx* total_length/maximum_length); dash_polygon[j].point.y=(double) (primitive_info[i-1].point.y+dy* total_length/maximum_length); dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } n++; if (fabs(draw_info->dash_pattern[n]) < DrawEpsilon) n=0; length=scale*(draw_info->dash_pattern[n]+(n == 0 ? -0.5 : 0.5)); } length-=(maximum_length-total_length); if ((n & 0x01) != 0) continue; dash_polygon[j]=primitive_info[i]; dash_polygon[j].coordinates=1; j++; } if ((total_length <= maximum_length) && ((n & 0x01) == 0) && (j > 1)) { dash_polygon[j]=primitive_info[i-1]; dash_polygon[j].point.x+=DrawEpsilon; dash_polygon[j].point.y+=DrawEpsilon; dash_polygon[j].coordinates=1; j++; dash_polygon[0].coordinates=(size_t) j; dash_polygon[j].primitive=UndefinedPrimitive; status&=DrawStrokePolygon(image,clone_info,dash_polygon,exception); } dash_polygon=(PrimitiveInfo *) RelinquishMagickMemory(dash_polygon); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-dash"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawImage() draws a graphic primitive on your image. The primitive % may be represented as a string or filename. Precede the filename with an % "at" sign (@) and the contents of the file are drawn on the image. You % can affect how text is drawn by setting one or more members of the draw % info structure. % % The format of the DrawImage method is: % % MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType IsPoint(const char *point) { char *p; double value; value=StringToDouble(point,&p); return((fabs(value) < DrawEpsilon) && (p == point) ? MagickFalse : MagickTrue); } static inline void TracePoint(PrimitiveInfo *primitive_info, const PointInfo point) { primitive_info->coordinates=1; primitive_info->point=point; } MagickExport MagickBooleanType DrawImage(Image *image,const DrawInfo *draw_info, ExceptionInfo *exception) { #define RenderImageTag "Render/Image" AffineMatrix affine, current; char keyword[MagickPathExtent], geometry[MagickPathExtent], *next_token, pattern[MagickPathExtent], *primitive, *token; const char *q; double angle, factor, points_extent, primitive_extent; DrawInfo **graphic_context; MagickBooleanType proceed; MagickSizeType number_points; MagickStatusType status; PointInfo point; PrimitiveInfo *primitive_info; PrimitiveType primitive_type; register const char *p; register ssize_t i, x; SegmentInfo bounds; size_t extent, number_stops; ssize_t j, k, n; StopInfo *stops; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); if ((draw_info->primitive == (char *) NULL) || (*draw_info->primitive == '\0')) return(MagickFalse); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"begin draw-image"); if (*draw_info->primitive != '@') primitive=AcquireString(draw_info->primitive); else primitive=FileToString(draw_info->primitive+1,~0UL,exception); if (primitive == (char *) NULL) return(MagickFalse); primitive_extent=(double) strlen(primitive); (void) SetImageArtifact(image,"MVG",primitive); n=0; number_stops=0; stops=(StopInfo *) NULL; /* Allocate primitive info memory. */ graphic_context=(DrawInfo **) AcquireMagickMemory(sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { primitive=DestroyString(primitive); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } number_points=6553; primitive_info=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_points, sizeof(*primitive_info)); if (primitive_info == (PrimitiveInfo *) NULL) { primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL,draw_info); graphic_context[n]->viewbox=image->page; if ((image->page.width == 0) || (image->page.height == 0)) { graphic_context[n]->viewbox.width=image->columns; graphic_context[n]->viewbox.height=image->rows; } token=AcquireString(primitive); extent=strlen(token)+MagickPathExtent; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; for (q=primitive; *q != '\0'; ) { /* Interpret graphic primitive. */ GetNextToken(q,&q,MagickPathExtent,keyword); if (*keyword == '\0') break; if (*keyword == '#') { /* Comment. */ while ((*q != '\n') && (*q != '\0')) q++; continue; } p=q-strlen(keyword)-1; primitive_type=UndefinedPrimitive; current=graphic_context[n]->affine; GetAffineMatrix(&affine); switch (*keyword) { case ';': break; case 'a': case 'A': { if (LocaleCompare("affine",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.rx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ry=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("alpha",keyword) == 0) { primitive_type=AlphaPrimitive; break; } if (LocaleCompare("arc",keyword) == 0) { primitive_type=ArcPrimitive; break; } status=MagickFalse; break; } case 'b': case 'B': { if (LocaleCompare("bezier",keyword) == 0) { primitive_type=BezierPrimitive; break; } if (LocaleCompare("border-color",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance, &graphic_context[n]->border_color,exception); break; } status=MagickFalse; break; } case 'c': case 'C': { if (LocaleCompare("clip-path",keyword) == 0) { /* Create clip mask. */ GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->clip_mask,token); (void) DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); break; } if (LocaleCompare("clip-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) status=MagickFalse; else graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("clip-units",keyword) == 0) { ssize_t clip_units; GetNextToken(q,&q,extent,token); clip_units=ParseCommandOption(MagickClipPathOptions,MagickFalse, token); if (clip_units == -1) { status=MagickFalse; break; } graphic_context[n]->clip_units=(ClipPathUnits) clip_units; if (clip_units == ObjectBoundingBox) { GetAffineMatrix(&current); affine.sx=draw_info->bounds.x2; affine.sy=draw_info->bounds.y2; affine.tx=draw_info->bounds.x1; affine.ty=draw_info->bounds.y1; break; } break; } if (LocaleCompare("circle",keyword) == 0) { primitive_type=CirclePrimitive; break; } if (LocaleCompare("color",keyword) == 0) { primitive_type=ColorPrimitive; break; } status=MagickFalse; break; } case 'd': case 'D': { if (LocaleCompare("decorate",keyword) == 0) { ssize_t decorate; GetNextToken(q,&q,extent,token); decorate=ParseCommandOption(MagickDecorateOptions,MagickFalse, token); if (decorate == -1) status=MagickFalse; else graphic_context[n]->decorate=(DecorationType) decorate; break; } if (LocaleCompare("density",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->density,token); break; } if (LocaleCompare("direction",keyword) == 0) { ssize_t direction; GetNextToken(q,&q,extent,token); direction=ParseCommandOption(MagickDirectionOptions,MagickFalse, token); if (direction == -1) status=MagickFalse; else graphic_context[n]->direction=(DirectionType) direction; break; } status=MagickFalse; break; } case 'e': case 'E': { if (LocaleCompare("ellipse",keyword) == 0) { primitive_type=EllipsePrimitive; break; } if (LocaleCompare("encoding",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->encoding,token); break; } status=MagickFalse; break; } case 'f': case 'F': { if (LocaleCompare("fill",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->fill_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->fill,exception); if (graphic_context[n]->fill_alpha != OpaqueAlpha) graphic_context[n]->fill.alpha=graphic_context[n]->fill_alpha; if (status == MagickFalse) { ImageInfo *pattern_info; pattern_info=AcquireImageInfo(); (void) CopyMagickString(pattern_info->filename,token, MagickPathExtent); graphic_context[n]->fill_pattern=ReadImage(pattern_info, exception); CatchException(exception); pattern_info=DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("fill-opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->fill.alpha=(MagickRealType) (QuantumRange- ClampToQuantum((MagickRealType) QuantumRange*(1.0-factor* StringToDouble(token,&next_token)))); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("fill-rule",keyword) == 0) { ssize_t fill_rule; GetNextToken(q,&q,extent,token); fill_rule=ParseCommandOption(MagickFillRuleOptions,MagickFalse, token); if (fill_rule == -1) status=MagickFalse; else graphic_context[n]->fill_rule=(FillRule) fill_rule; break; } if (LocaleCompare("font",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->font,token); if (LocaleCompare("none",token) == 0) graphic_context[n]->font=(char *) RelinquishMagickMemory( graphic_context[n]->font); break; } if (LocaleCompare("font-family",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) CloneString(&graphic_context[n]->family,token); break; } if (LocaleCompare("font-size",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->pointsize=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("font-stretch",keyword) == 0) { ssize_t stretch; GetNextToken(q,&q,extent,token); stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,token); if (stretch == -1) status=MagickFalse; else graphic_context[n]->stretch=(StretchType) stretch; break; } if (LocaleCompare("font-style",keyword) == 0) { ssize_t style; GetNextToken(q,&q,extent,token); style=ParseCommandOption(MagickStyleOptions,MagickFalse,token); if (style == -1) status=MagickFalse; else graphic_context[n]->style=(StyleType) style; break; } if (LocaleCompare("font-weight",keyword) == 0) { ssize_t weight; GetNextToken(q,&q,extent,token); weight=ParseCommandOption(MagickWeightOptions,MagickFalse,token); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(token); graphic_context[n]->weight=(size_t) weight; break; } status=MagickFalse; break; } case 'g': case 'G': { if (LocaleCompare("gradient-units",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gravity",keyword) == 0) { ssize_t gravity; GetNextToken(q,&q,extent,token); gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,token); if (gravity == -1) status=MagickFalse; else graphic_context[n]->gravity=(GravityType) gravity; break; } status=MagickFalse; break; } case 'i': case 'I': { if (LocaleCompare("image",keyword) == 0) { ssize_t compose; primitive_type=ImagePrimitive; GetNextToken(q,&q,extent,token); compose=ParseCommandOption(MagickComposeOptions,MagickFalse,token); if (compose == -1) status=MagickFalse; else graphic_context[n]->compose=(CompositeOperator) compose; break; } if (LocaleCompare("interline-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interline_spacing=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("interword-spacing",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->interword_spacing=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'k': case 'K': { if (LocaleCompare("kerning",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->kerning=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'l': case 'L': { if (LocaleCompare("line",keyword) == 0) primitive_type=LinePrimitive; else status=MagickFalse; break; } case 'o': case 'O': { if (LocaleCompare("offset",keyword) == 0) { GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->alpha=(Quantum) (QuantumRange*(1.0- (QuantumScale*graphic_context[n]->alpha*(1.0-factor* StringToDouble(token,&next_token))))); graphic_context[n]->fill_alpha=QuantumRange*(1.0-(QuantumScale* graphic_context[n]->fill_alpha*(1.0-factor*StringToDouble(token, &next_token)))); graphic_context[n]->stroke_alpha=QuantumRange*(1.0-(QuantumScale* graphic_context[n]->stroke_alpha*(1.0-factor*StringToDouble(token, &next_token)))); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'p': case 'P': { if (LocaleCompare("path",keyword) == 0) { primitive_type=PathPrimitive; break; } if (LocaleCompare("point",keyword) == 0) { primitive_type=PointPrimitive; break; } if (LocaleCompare("polyline",keyword) == 0) { primitive_type=PolylinePrimitive; break; } if (LocaleCompare("polygon",keyword) == 0) { primitive_type=PolygonPrimitive; break; } if (LocaleCompare("pop",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("clip-path",token) == 0) break; if (LocaleCompare("defs",token) == 0) break; if (LocaleCompare("gradient",token) == 0) break; if (LocaleCompare("graphic-context",token) == 0) { if (n <= 0) { (void) ThrowMagickException(exception,GetMagickModule(), DrawError,"UnbalancedGraphicContextPushPop","`%s'",token); status=MagickFalse; n=0; break; } if (graphic_context[n]->clip_mask != (char *) NULL) if (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0) (void) SetImageMask(image,ReadPixelMask,(Image *) NULL, exception); graphic_context[n]=DestroyDrawInfo(graphic_context[n]); n--; break; } if (LocaleCompare("pattern",token) == 0) break; status=MagickFalse; break; } if (LocaleCompare("push",keyword) == 0) { GetNextToken(q,&q,extent,token); if (LocaleCompare("clip-path",token) == 0) { char name[MagickPathExtent]; GetNextToken(q,&q,extent,token); (void) FormatLocaleString(name,MagickPathExtent,"%s",token); for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"clip-path") != 0) continue; break; } if ((size_t) (q-p-4+1) > 0) { (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) SetImageArtifact(image,name,token); } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("gradient",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent], type[MagickPathExtent]; SegmentInfo segment; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); (void) CopyMagickString(type,token,MagickPathExtent); GetNextToken(q,&q,extent,token); segment.x1=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y1=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.x2=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); segment.y2=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; if (LocaleCompare(type,"radial") == 0) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); } for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"gradient") != 0) continue; break; } if ((size_t) (q-p-4+1) > 0) { (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); bounds.x1=graphic_context[n]->affine.sx*segment.x1+ graphic_context[n]->affine.ry*segment.y1+ graphic_context[n]->affine.tx; bounds.y1=graphic_context[n]->affine.rx*segment.x1+ graphic_context[n]->affine.sy*segment.y1+ graphic_context[n]->affine.ty; bounds.x2=graphic_context[n]->affine.sx*segment.x2+ graphic_context[n]->affine.ry*segment.y2+ graphic_context[n]->affine.tx; bounds.y2=graphic_context[n]->affine.rx*segment.x2+ graphic_context[n]->affine.sy*segment.y2+ graphic_context[n]->affine.ty; (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent,"%s-type", name); (void) SetImageArtifact(image,key,type); (void) FormatLocaleString(key,MagickPathExtent, "%s-geometry",name); (void) FormatLocaleString(geometry,MagickPathExtent, "%gx%g%+.15g%+.15g", MagickMax(fabs(bounds.x2-bounds.x1+1.0),1.0), MagickMax(fabs(bounds.y2-bounds.y1+1.0),1.0), bounds.x1,bounds.y1); (void) SetImageArtifact(image,key,geometry); } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("pattern",token) == 0) { char key[2*MagickPathExtent], name[MagickPathExtent]; RectangleInfo pattern_bounds; GetNextToken(q,&q,extent,token); (void) CopyMagickString(name,token,MagickPathExtent); GetNextToken(q,&q,extent,token); pattern_bounds.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.width=(size_t) floor(StringToDouble(token, &next_token)+0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); pattern_bounds.height=(size_t) floor(StringToDouble(token, &next_token)+0.5); if (token == next_token) status=MagickFalse; for (p=q; *q != '\0'; ) { GetNextToken(q,&q,extent,token); if (LocaleCompare(token,"pop") != 0) continue; GetNextToken(q,(const char **) NULL,extent,token); if (LocaleCompare(token,"pattern") != 0) continue; break; } if ((size_t) (q-p-4+1) > 0) { (void) CopyMagickString(token,p,(size_t) (q-p-4+1)); (void) FormatLocaleString(key,MagickPathExtent,"%s",name); (void) SetImageArtifact(image,key,token); (void) FormatLocaleString(key,MagickPathExtent, "%s-geometry",name); (void) FormatLocaleString(geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double)pattern_bounds.width, (double)pattern_bounds.height,(double)pattern_bounds.x, (double)pattern_bounds.y); (void) SetImageArtifact(image,key,geometry); } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("graphic-context",token) == 0) { n++; graphic_context=(DrawInfo **) ResizeQuantumMemory( graphic_context,(size_t) (n+1),sizeof(*graphic_context)); if (graphic_context == (DrawInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } graphic_context[n]=CloneDrawInfo((ImageInfo *) NULL, graphic_context[n-1]); break; } if (LocaleCompare("defs",token) == 0) break; status=MagickFalse; break; } status=MagickFalse; break; } case 'r': case 'R': { if (LocaleCompare("rectangle",keyword) == 0) { primitive_type=RectanglePrimitive; break; } if (LocaleCompare("rotate",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.sx=cos(DegreesToRadians(fmod((double) angle,360.0))); affine.rx=sin(DegreesToRadians(fmod((double) angle,360.0))); affine.ry=(-sin(DegreesToRadians(fmod((double) angle,360.0)))); affine.sy=cos(DegreesToRadians(fmod((double) angle,360.0))); break; } if (LocaleCompare("roundRectangle",keyword) == 0) { primitive_type=RoundRectanglePrimitive; break; } status=MagickFalse; break; } case 's': case 'S': { if (LocaleCompare("scale",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.sx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.sy=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("skewX",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.ry=sin(DegreesToRadians(angle)); break; } if (LocaleCompare("skewY",keyword) == 0) { GetNextToken(q,&q,extent,token); angle=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; affine.rx=(-tan(DegreesToRadians(angle)/2.0)); break; } if (LocaleCompare("stop-color",keyword) == 0) { PixelInfo stop_color; number_stops++; if (number_stops == 1) stops=(StopInfo *) AcquireQuantumMemory(2,sizeof(*stops)); else if (number_stops > 2) stops=(StopInfo *) ResizeQuantumMemory(stops,number_stops, sizeof(*stops)); if (stops == (StopInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance,&stop_color, exception); stops[number_stops-1].color=stop_color; GetNextToken(q,&q,extent,token); stops[number_stops-1].offset=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) FormatLocaleString(pattern,MagickPathExtent,"%s",token); if (GetImageArtifact(image,pattern) != (const char *) NULL) (void) DrawPatternPath(image,draw_info,token, &graphic_context[n]->stroke_pattern,exception); else { status&=QueryColorCompliance(token,AllCompliance, &graphic_context[n]->stroke,exception); if (graphic_context[n]->stroke_alpha != OpaqueAlpha) graphic_context[n]->stroke.alpha= graphic_context[n]->stroke_alpha; if (status == MagickFalse) { ImageInfo *pattern_info; pattern_info=AcquireImageInfo(); (void) CopyMagickString(pattern_info->filename,token, MagickPathExtent); graphic_context[n]->stroke_pattern=ReadImage(pattern_info, exception); CatchException(exception); pattern_info=DestroyImageInfo(pattern_info); } } break; } if (LocaleCompare("stroke-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_antialias= StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("stroke-dasharray",keyword) == 0) { if (graphic_context[n]->dash_pattern != (double *) NULL) graphic_context[n]->dash_pattern=(double *) RelinquishMagickMemory(graphic_context[n]->dash_pattern); if (IsPoint(q) != MagickFalse) { const char *r; r=q; GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); for (x=0; IsPoint(token) != MagickFalse; x++) { GetNextToken(r,&r,extent,token); if (*token == ',') GetNextToken(r,&r,extent,token); } graphic_context[n]->dash_pattern=(double *) AcquireQuantumMemory((size_t) (2UL*x+2UL), sizeof(*graphic_context[n]->dash_pattern)); if (graphic_context[n]->dash_pattern == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); status=MagickFalse; break; } for (j=0; j < x; j++) { GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->dash_pattern[j]=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; if (graphic_context[n]->dash_pattern[j] < 0.0) status=MagickFalse; } if ((x & 0x01) != 0) for ( ; j < (2*x); j++) graphic_context[n]->dash_pattern[j]= graphic_context[n]->dash_pattern[j-x]; graphic_context[n]->dash_pattern[j]=0.0; break; } GetNextToken(q,&q,extent,token); break; } if (LocaleCompare("stroke-dashoffset",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->dash_offset=StringToDouble(token, &next_token); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke-linecap",keyword) == 0) { ssize_t linecap; GetNextToken(q,&q,extent,token); linecap=ParseCommandOption(MagickLineCapOptions,MagickFalse,token); if (linecap == -1) status=MagickFalse; else graphic_context[n]->linecap=(LineCap) linecap; break; } if (LocaleCompare("stroke-linejoin",keyword) == 0) { ssize_t linejoin; GetNextToken(q,&q,extent,token); linejoin=ParseCommandOption(MagickLineJoinOptions,MagickFalse, token); if (linejoin == -1) status=MagickFalse; else graphic_context[n]->linejoin=(LineJoin) linejoin; break; } if (LocaleCompare("stroke-miterlimit",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->miterlimit=StringToUnsignedLong(token); break; } if (LocaleCompare("stroke-opacity",keyword) == 0) { GetNextToken(q,&q,extent,token); factor=strchr(token,'%') != (char *) NULL ? 0.01 : 1.0; graphic_context[n]->stroke.alpha=(MagickRealType) (QuantumRange- ClampToQuantum((MagickRealType) QuantumRange*(1.0-factor* StringToDouble(token,&next_token)))); if (token == next_token) status=MagickFalse; break; } if (LocaleCompare("stroke-width",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->stroke_width=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 't': case 'T': { if (LocaleCompare("text",keyword) == 0) { primitive_type=TextPrimitive; break; } if (LocaleCompare("text-align",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) status=MagickFalse; else graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-anchor",keyword) == 0) { ssize_t align; GetNextToken(q,&q,extent,token); align=ParseCommandOption(MagickAlignOptions,MagickFalse,token); if (align == -1) status=MagickFalse; else graphic_context[n]->align=(AlignType) align; break; } if (LocaleCompare("text-antialias",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->text_antialias=StringToLong(token) != 0 ? MagickTrue : MagickFalse; break; } if (LocaleCompare("text-undercolor",keyword) == 0) { GetNextToken(q,&q,extent,token); (void) QueryColorCompliance(token,AllCompliance, &graphic_context[n]->undercolor,exception); break; } if (LocaleCompare("translate",keyword) == 0) { GetNextToken(q,&q,extent,token); affine.tx=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); affine.ty=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } case 'v': case 'V': { if (LocaleCompare("viewbox",keyword) == 0) { GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.x=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.y=(ssize_t) ceil(StringToDouble(token, &next_token)-0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.width=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); graphic_context[n]->viewbox.height=(size_t) floor(StringToDouble( token,&next_token)+0.5); if (token == next_token) status=MagickFalse; break; } status=MagickFalse; break; } default: { status=MagickFalse; break; } } if (status == MagickFalse) break; if ((fabs(affine.sx-1.0) >= DrawEpsilon) || (fabs(affine.rx) >= DrawEpsilon) || (fabs(affine.ry) >= DrawEpsilon) || (fabs(affine.sy-1.0) >= DrawEpsilon) || (fabs(affine.tx) >= DrawEpsilon) || (fabs(affine.ty) >= DrawEpsilon)) { graphic_context[n]->affine.sx=current.sx*affine.sx+current.ry*affine.rx; graphic_context[n]->affine.rx=current.rx*affine.sx+current.sy*affine.rx; graphic_context[n]->affine.ry=current.sx*affine.ry+current.ry*affine.sy; graphic_context[n]->affine.sy=current.rx*affine.ry+current.sy*affine.sy; graphic_context[n]->affine.tx=current.sx*affine.tx+current.ry*affine.ty+ current.tx; graphic_context[n]->affine.ty=current.rx*affine.tx+current.sy*affine.ty+ current.ty; } if (primitive_type == UndefinedPrimitive) { if (*q == '\0') { if (number_stops > 1) { GradientType type; type=LinearGradient; if (draw_info->gradient.type == RadialGradient) type=RadialGradient; (void) GradientImage(image,type,PadSpread,stops,number_stops, exception); } if (number_stops > 0) stops=(StopInfo *) RelinquishMagickMemory(stops); } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p); continue; } /* Parse the primitive attributes. */ i=0; j=0; primitive_info[0].point.x=0.0; primitive_info[0].point.y=0.0; for (x=0; *q != '\0'; x++) { /* Define points. */ if (IsPoint(q) == MagickFalse) break; GetNextToken(q,&q,extent,token); point.x=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,&q,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); point.y=StringToDouble(token,&next_token); if (token == next_token) status=MagickFalse; GetNextToken(q,(const char **) NULL,extent,token); if (*token == ',') GetNextToken(q,&q,extent,token); primitive_info[i].primitive=primitive_type; primitive_info[i].point=point; primitive_info[i].coordinates=0; primitive_info[i].method=FloodfillMethod; i++; if (i < (ssize_t) number_points) continue; number_points<<=1; primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points,sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } primitive_info[j].primitive=primitive_type; primitive_info[j].coordinates=(size_t) x; primitive_info[j].method=FloodfillMethod; primitive_info[j].text=(char *) NULL; /* Circumscribe primitive within a circle. */ bounds.x1=primitive_info[j].point.x; bounds.y1=primitive_info[j].point.y; bounds.x2=primitive_info[j].point.x; bounds.y2=primitive_info[j].point.y; for (k=1; k < (ssize_t) primitive_info[j].coordinates; k++) { point=primitive_info[j+k].point; if (point.x < bounds.x1) bounds.x1=point.x; if (point.y < bounds.y1) bounds.y1=point.y; if (point.x > bounds.x2) bounds.x2=point.x; if (point.y > bounds.y2) bounds.y2=point.y; } /* Speculate how many points our primitive might consume. */ points_extent=(double) primitive_info[j].coordinates; switch (primitive_type) { case RectanglePrimitive: { points_extent*=5; break; } case RoundRectanglePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); points_extent*=5; points_extent+=2*ceil((double) MagickPI*radius)+6*BezierQuantum+360; break; } case BezierPrimitive: { if (primitive_info[j].coordinates > 107) (void) ThrowMagickException(exception,GetMagickModule(),DrawError, "TooManyBezierCoordinates","`%s'",token); points_extent=(double) (BezierQuantum*primitive_info[j].coordinates); break; } case PathPrimitive: { char *s, *t; GetNextToken(q,&q,extent,token); points_extent=1; t=token; for (s=token; *s != '\0'; s=t) { double value; value=StringToDouble(s,&t); (void) value; if (s == t) { t++; continue; } points_extent++; } points_extent=points_extent*BezierQuantum; break; } case CirclePrimitive: case ArcPrimitive: case EllipsePrimitive: { double alpha, beta, radius; alpha=bounds.x2-bounds.x1; beta=bounds.y2-bounds.y1; radius=hypot((double) alpha,(double) beta); points_extent=2*ceil((double) MagickPI*radius)+6*BezierQuantum+360; break; } default: break; } if (((double) ((size_t) points_extent)) < points_extent) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); break; } if (((MagickSizeType) (i+points_extent)) >= number_points) { /* Resize based on speculative points required by primitive. */ number_points+=points_extent+1; primitive_info=(PrimitiveInfo *) ResizeQuantumMemory(primitive_info, (size_t) number_points,sizeof(*primitive_info)); if ((primitive_info == (PrimitiveInfo *) NULL) || (number_points != (MagickSizeType) ((size_t) number_points))) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); break; } } switch (primitive_type) { case PointPrimitive: default: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } TracePoint(primitive_info+j,primitive_info[j].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case LinePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceLine(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RectanglePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case RoundRectanglePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } TraceRoundRectangle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case ArcPrimitive: { if (primitive_info[j].coordinates != 3) { primitive_type=UndefinedPrimitive; break; } TraceArc(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case EllipsePrimitive: { if (primitive_info[j].coordinates != 3) { status=MagickFalse; break; } TraceEllipse(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point,primitive_info[j+2].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case CirclePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } TraceCircle(primitive_info+j,primitive_info[j].point, primitive_info[j+1].point); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PolylinePrimitive: break; case PolygonPrimitive: { primitive_info[i]=primitive_info[j]; primitive_info[i].coordinates=0; primitive_info[j].coordinates++; i++; break; } case BezierPrimitive: { if (primitive_info[j].coordinates < 3) { status=MagickFalse; break; } TraceBezier(primitive_info+j,primitive_info[j].coordinates); i=(ssize_t) (j+primitive_info[j].coordinates); break; } case PathPrimitive: { i=(ssize_t) (j+TracePath(primitive_info+j,token)); break; } case AlphaPrimitive: case ColorPrimitive: { ssize_t method; if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); method=ParseCommandOption(MagickMethodOptions,MagickFalse,token); if (method == -1) status=MagickFalse; else primitive_info[j].method=(PaintMethod) method; break; } case TextPrimitive: { if (primitive_info[j].coordinates != 1) { status=MagickFalse; break; } if (*token != ',') GetNextToken(q,&q,extent,token); primitive_info[j].text=AcquireString(token); break; } case ImagePrimitive: { if (primitive_info[j].coordinates != 2) { status=MagickFalse; break; } GetNextToken(q,&q,extent,token); primitive_info[j].text=AcquireString(token); break; } } if (primitive_info == (PrimitiveInfo *) NULL) break; if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," %.*s",(int) (q-p),p); if (status == MagickFalse) break; primitive_info[i].primitive=UndefinedPrimitive; if (i == 0) continue; /* Transform points. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; primitive_info[i].point.x=graphic_context[n]->affine.sx*point.x+ graphic_context[n]->affine.ry*point.y+graphic_context[n]->affine.tx; primitive_info[i].point.y=graphic_context[n]->affine.rx*point.x+ graphic_context[n]->affine.sy*point.y+graphic_context[n]->affine.ty; point=primitive_info[i].point; if (point.x < graphic_context[n]->bounds.x1) graphic_context[n]->bounds.x1=point.x; if (point.y < graphic_context[n]->bounds.y1) graphic_context[n]->bounds.y1=point.y; if (point.x > graphic_context[n]->bounds.x2) graphic_context[n]->bounds.x2=point.x; if (point.y > graphic_context[n]->bounds.y2) graphic_context[n]->bounds.y2=point.y; if (primitive_info[i].primitive == ImagePrimitive) break; if (i >= (ssize_t) number_points) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } if (graphic_context[n]->render != MagickFalse) { if ((n != 0) && (graphic_context[n]->clip_mask != (char *) NULL) && (LocaleCompare(graphic_context[n]->clip_mask, graphic_context[n-1]->clip_mask) != 0)) status&=DrawClipPath(image,graphic_context[n], graphic_context[n]->clip_mask,exception); status&=DrawPrimitive(image,graphic_context[n],primitive_info, exception); } if (primitive_info->text != (char *) NULL) primitive_info->text=(char *) RelinquishMagickMemory( primitive_info->text); proceed=SetImageProgress(image,RenderImageTag,q-primitive,(MagickSizeType) primitive_extent); if (proceed == MagickFalse) break; if (status == 0) break; } if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end draw-image"); /* Relinquish resources. */ token=DestroyString(token); if (primitive_info != (PrimitiveInfo *) NULL) primitive_info=(PrimitiveInfo *) RelinquishMagickMemory(primitive_info); primitive=DestroyString(primitive); for ( ; n >= 0; n--) graphic_context[n]=DestroyDrawInfo(graphic_context[n]); graphic_context=(DrawInfo **) RelinquishMagickMemory(graphic_context); if (status == MagickFalse) ThrowBinaryException(DrawError,"NonconformingDrawingPrimitiveDefinition", keyword); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w G r a d i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawGradientImage() draws a linear gradient on the image. % % The format of the DrawGradientImage method is: % % MagickBooleanType DrawGradientImage(Image *image, % const DrawInfo *draw_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o exception: return any errors or warnings in this structure. % */ static inline double GetStopColorOffset(const GradientInfo *gradient, const ssize_t x,const ssize_t y) { switch (gradient->type) { case UndefinedGradient: case LinearGradient: { double gamma, length, offset, scale; PointInfo p, q; const SegmentInfo *gradient_vector; gradient_vector=(&gradient->gradient_vector); p.x=gradient_vector->x2-gradient_vector->x1; p.y=gradient_vector->y2-gradient_vector->y1; q.x=(double) x-gradient_vector->x1; q.y=(double) y-gradient_vector->y1; length=sqrt(q.x*q.x+q.y*q.y); gamma=sqrt(p.x*p.x+p.y*p.y)*length; gamma=PerceptibleReciprocal(gamma); scale=p.x*q.x+p.y*q.y; offset=gamma*scale*length; return(offset); } case RadialGradient: { PointInfo v; if (gradient->spread == RepeatSpread) { v.x=(double) x-gradient->center.x; v.y=(double) y-gradient->center.y; return(sqrt(v.x*v.x+v.y*v.y)); } v.x=(double) (((x-gradient->center.x)*cos(DegreesToRadians( gradient->angle)))+((y-gradient->center.y)*sin(DegreesToRadians( gradient->angle))))/gradient->radii.x; v.y=(double) (((x-gradient->center.x)*sin(DegreesToRadians( gradient->angle)))-((y-gradient->center.y)*cos(DegreesToRadians( gradient->angle))))/gradient->radii.y; return(sqrt(v.x*v.x+v.y*v.y)); } } return(0.0); } static int StopInfoCompare(const void *x,const void *y) { StopInfo *stop_1, *stop_2; stop_1=(StopInfo *) x; stop_2=(StopInfo *) y; if (stop_1->offset > stop_2->offset) return(1); if (fabs(stop_1->offset-stop_2->offset) <= DrawEpsilon) return(0); return(-1); } MagickExport MagickBooleanType DrawGradientImage(Image *image, const DrawInfo *draw_info,ExceptionInfo *exception) { CacheView *image_view; const GradientInfo *gradient; const SegmentInfo *gradient_vector; double length; MagickBooleanType status; PixelInfo zero; PointInfo point; RectangleInfo bounding_box; ssize_t y; /* Draw linear or radial gradient on image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); gradient=(&draw_info->gradient); qsort(gradient->stops,gradient->number_stops,sizeof(StopInfo), StopInfoCompare); gradient_vector=(&gradient->gradient_vector); point.x=gradient_vector->x2-gradient_vector->x1; point.y=gradient_vector->y2-gradient_vector->y1; length=sqrt(point.x*point.x+point.y*point.y); bounding_box=gradient->bounding_box; status=MagickTrue; GetPixelInfo(image,&zero); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,bounding_box.height-bounding_box.y,1) #endif for (y=bounding_box.y; y < (ssize_t) bounding_box.height; y++) { PixelInfo composite, pixel; double alpha, offset; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t j; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; composite=zero; offset=GetStopColorOffset(gradient,0,y); if (gradient->type != RadialGradient) offset/=length; for (x=bounding_box.x; x < (ssize_t) bounding_box.width; x++) { GetPixelInfoPixel(image,q,&pixel); switch (gradient->spread) { case UndefinedSpread: case PadSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset/=length; } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if ((offset < 0.0) || (i == 0)) composite=gradient->stops[0].color; else if ((offset > 1.0) || (i == (ssize_t) gradient->number_stops)) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case ReflectSpread: { if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type != RadialGradient) offset/=length; } if (offset < 0.0) offset=(-offset); if ((ssize_t) fmod(offset,2.0) == 0) offset=fmod(offset,1.0); else offset=1.0-fmod(offset,1.0); for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } case RepeatSpread: { MagickBooleanType antialias; double repeat; antialias=MagickFalse; repeat=0.0; if ((x != (ssize_t) ceil(gradient_vector->x1-0.5)) || (y != (ssize_t) ceil(gradient_vector->y1-0.5))) { offset=GetStopColorOffset(gradient,x,y); if (gradient->type == LinearGradient) { repeat=fmod(offset,length); if (repeat < 0.0) repeat=length-fmod(-repeat,length); else repeat=fmod(offset,length); antialias=(repeat < length) && ((repeat+1.0) > length) ? MagickTrue : MagickFalse; offset=repeat/length; } else { repeat=fmod(offset,gradient->radius); if (repeat < 0.0) repeat=gradient->radius-fmod(-repeat,gradient->radius); else repeat=fmod(offset,gradient->radius); antialias=repeat+1.0 > gradient->radius ? MagickTrue : MagickFalse; offset=repeat/gradient->radius; } } for (i=0; i < (ssize_t) gradient->number_stops; i++) if (offset < gradient->stops[i].offset) break; if (i == 0) composite=gradient->stops[0].color; else if (i == (ssize_t) gradient->number_stops) composite=gradient->stops[gradient->number_stops-1].color; else { j=i; i--; alpha=(offset-gradient->stops[i].offset)/ (gradient->stops[j].offset-gradient->stops[i].offset); if (antialias != MagickFalse) { if (gradient->type == LinearGradient) alpha=length-repeat; else alpha=gradient->radius-repeat; i=0; j=(ssize_t) gradient->number_stops-1L; } CompositePixelInfoBlend(&gradient->stops[i].color,1.0-alpha, &gradient->stops[j].color,alpha,&composite); } break; } } CompositePixelInfoOver(&composite,composite.alpha,&pixel,pixel.alpha, &pixel); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P a t t e r n P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPatternPath() draws a pattern. % % The format of the DrawPatternPath method is: % % MagickBooleanType DrawPatternPath(Image *image,const DrawInfo *draw_info, % const char *name,Image **pattern,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o name: the pattern name. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType DrawPatternPath(Image *image, const DrawInfo *draw_info,const char *name,Image **pattern, ExceptionInfo *exception) { char property[MagickPathExtent]; const char *geometry, *path, *type; DrawInfo *clone_info; ImageInfo *image_info; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (const DrawInfo *) NULL); assert(name != (const char *) NULL); (void) FormatLocaleString(property,MagickPathExtent,"%s",name); path=GetImageArtifact(image,property); if (path == (const char *) NULL) return(MagickFalse); (void) FormatLocaleString(property,MagickPathExtent,"%s-geometry",name); geometry=GetImageArtifact(image,property); if (geometry == (const char *) NULL) return(MagickFalse); if ((*pattern) != (Image *) NULL) *pattern=DestroyImage(*pattern); image_info=AcquireImageInfo(); image_info->size=AcquireString(geometry); *pattern=AcquireImage(image_info,exception); image_info=DestroyImageInfo(image_info); (void) QueryColorCompliance("#000000ff",AllCompliance, &(*pattern)->background_color,exception); (void) SetImageBackgroundColor(*pattern,exception); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), "begin pattern-path %s %s",name,geometry); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill_pattern=NewImageList(); clone_info->stroke_pattern=NewImageList(); (void) FormatLocaleString(property,MagickPathExtent,"%s-type",name); type=GetImageArtifact(image,property); if (type != (const char *) NULL) clone_info->gradient.type=(GradientType) ParseCommandOption( MagickGradientOptions,MagickFalse,type); (void) CloneString(&clone_info->primitive,path); status=DrawImage(*pattern,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(),"end pattern-path"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w P o l y g o n P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPolygonPrimitive() draws a polygon on the image. % % The format of the DrawPolygonPrimitive method is: % % MagickBooleanType DrawPolygonPrimitive(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static PolygonInfo **DestroyPolygonThreadSet(PolygonInfo **polygon_info) { register ssize_t i; assert(polygon_info != (PolygonInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (polygon_info[i] != (PolygonInfo *) NULL) polygon_info[i]=DestroyPolygonInfo(polygon_info[i]); polygon_info=(PolygonInfo **) RelinquishMagickMemory(polygon_info); return(polygon_info); } static PolygonInfo **AcquirePolygonThreadSet( const PrimitiveInfo *primitive_info) { PathInfo *magick_restrict path_info; PolygonInfo **polygon_info; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); polygon_info=(PolygonInfo **) AcquireQuantumMemory(number_threads, sizeof(*polygon_info)); if (polygon_info == (PolygonInfo **) NULL) return((PolygonInfo **) NULL); (void) ResetMagickMemory(polygon_info,0,number_threads*sizeof(*polygon_info)); path_info=ConvertPrimitiveToPath(primitive_info); if (path_info == (PathInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); for (i=0; i < (ssize_t) number_threads; i++) { polygon_info[i]=ConvertPathToPolygon(path_info); if (polygon_info[i] == (PolygonInfo *) NULL) return(DestroyPolygonThreadSet(polygon_info)); } path_info=(PathInfo *) RelinquishMagickMemory(path_info); return(polygon_info); } static double GetFillAlpha(PolygonInfo *polygon_info,const double mid, const MagickBooleanType fill,const FillRule fill_rule,const ssize_t x, const ssize_t y,double *stroke_alpha) { double alpha, beta, distance, subpath_alpha; PointInfo delta; register const PointInfo *q; register EdgeInfo *p; register ssize_t i; ssize_t j, winding_number; /* Compute fill & stroke opacity for this (x,y) point. */ *stroke_alpha=0.0; subpath_alpha=0.0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= (p->bounds.y1-mid-0.5)) break; if ((double) y > (p->bounds.y2+mid+0.5)) { (void) DestroyEdge(polygon_info,(size_t) j); continue; } if (((double) x <= (p->bounds.x1-mid-0.5)) || ((double) x > (p->bounds.x2+mid+0.5))) continue; i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) { if ((double) y <= (p->points[i-1].y-mid-0.5)) break; if ((double) y > (p->points[i].y+mid+0.5)) continue; if (p->scanline != (double) y) { p->scanline=(double) y; p->highwater=(size_t) i; } /* Compute distance between a point and an edge. */ q=p->points+i-1; delta.x=(q+1)->x-q->x; delta.y=(q+1)->y-q->y; beta=delta.x*(x-q->x)+delta.y*(y-q->y); if (beta < 0.0) { delta.x=(double) x-q->x; delta.y=(double) y-q->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=delta.x*delta.x+delta.y*delta.y; if (beta > alpha) { delta.x=(double) x-(q+1)->x; delta.y=(double) y-(q+1)->y; distance=delta.x*delta.x+delta.y*delta.y; } else { alpha=1.0/alpha; beta=delta.x*(y-q->y)-delta.y*(x-q->x); distance=alpha*beta*beta; } } /* Compute stroke & subpath opacity. */ beta=0.0; if (p->ghostline == MagickFalse) { alpha=mid+0.5; if ((*stroke_alpha < 1.0) && (distance <= ((alpha+0.25)*(alpha+0.25)))) { alpha=mid-0.5; if (distance <= ((alpha+0.25)*(alpha+0.25))) *stroke_alpha=1.0; else { beta=1.0; if (fabs(distance-1.0) >= DrawEpsilon) beta=sqrt((double) distance); alpha=beta-mid-0.5; if (*stroke_alpha < ((alpha-0.25)*(alpha-0.25))) *stroke_alpha=(alpha-0.25)*(alpha-0.25); } } } if ((fill == MagickFalse) || (distance > 1.0) || (subpath_alpha >= 1.0)) continue; if (distance <= 0.0) { subpath_alpha=1.0; continue; } if (distance > 1.0) continue; if (fabs(beta) < DrawEpsilon) { beta=1.0; if (fabs(distance-1.0) >= DrawEpsilon) beta=sqrt(distance); } alpha=beta-1.0; if (subpath_alpha < (alpha*alpha)) subpath_alpha=alpha*alpha; } } /* Compute fill opacity. */ if (fill == MagickFalse) return(0.0); if (subpath_alpha >= 1.0) return(1.0); /* Determine winding number. */ winding_number=0; p=polygon_info->edges; for (j=0; j < (ssize_t) polygon_info->number_edges; j++, p++) { if ((double) y <= p->bounds.y1) break; if (((double) y > p->bounds.y2) || ((double) x <= p->bounds.x1)) continue; if ((double) x > p->bounds.x2) { winding_number+=p->direction ? 1 : -1; continue; } i=(ssize_t) MagickMax((double) p->highwater,1.0); for ( ; i < (ssize_t) p->number_points; i++) if ((double) y <= p->points[i].y) break; q=p->points+i-1; if ((((q+1)->x-q->x)*(y-q->y)) <= (((q+1)->y-q->y)*(x-q->x))) winding_number+=p->direction ? 1 : -1; } if (fill_rule != NonZeroRule) { if ((MagickAbsoluteValue(winding_number) & 0x01) != 0) return(1.0); } else if (MagickAbsoluteValue(winding_number) != 0) return(1.0); return(subpath_alpha); } static MagickBooleanType DrawPolygonPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType fill, status; double mid; PolygonInfo **magick_restrict polygon_info; register EdgeInfo *p; register ssize_t i; SegmentInfo bounds; ssize_t start_y, stop_y, y; /* Compute bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); assert(primitive_info != (PrimitiveInfo *) NULL); if (primitive_info->coordinates == 0) return(MagickTrue); polygon_info=AcquirePolygonThreadSet(primitive_info); if (polygon_info == (PolygonInfo **) NULL) return(MagickFalse); DisableMSCWarning(4127) if (0) DrawBoundingRectangles(image,draw_info,polygon_info[0],exception); RestoreMSCWarning if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," begin draw-polygon"); fill=(primitive_info->method == FillToBorderMethod) || (primitive_info->method == FloodfillMethod) ? MagickTrue : MagickFalse; mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; bounds=polygon_info[0]->edges[0].bounds; for (i=1; i < (ssize_t) polygon_info[0]->number_edges; i++) { p=polygon_info[0]->edges+i; if (p->bounds.x1 < bounds.x1) bounds.x1=p->bounds.x1; if (p->bounds.y1 < bounds.y1) bounds.y1=p->bounds.y1; if (p->bounds.x2 > bounds.x2) bounds.x2=p->bounds.x2; if (p->bounds.y2 > bounds.y2) bounds.y2=p->bounds.y2; } bounds.x1-=(mid+1.0); bounds.x1=bounds.x1 < 0.0 ? 0.0 : (size_t) ceil(bounds.x1-0.5) >= image->columns ? (double) image->columns-1 : bounds.x1; bounds.y1-=(mid+1.0); bounds.y1=bounds.y1 < 0.0 ? 0.0 : (size_t) ceil(bounds.y1-0.5) >= image->rows ? (double) image->rows-1 : bounds.y1; bounds.x2+=(mid+1.0); bounds.x2=bounds.x2 < 0.0 ? 0.0 : (size_t) floor(bounds.x2+0.5) >= image->columns ? (double) image->columns-1 : bounds.x2; bounds.y2+=(mid+1.0); bounds.y2=bounds.y2 < 0.0 ? 0.0 : (size_t) floor(bounds.y2+0.5) >= image->rows ? (double) image->rows-1 : bounds.y2; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); if ((primitive_info->coordinates == 1) || (polygon_info[0]->number_edges == 0)) { /* Draw point. */ start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { MagickBooleanType sync; PixelInfo pixel; register ssize_t x; register Quantum *magick_restrict q; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); x=start_x; q=GetCacheViewAuthenticPixels(image_view,x,y,(size_t) (stop_x-x+1),1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for ( ; x <= stop_x; x++) { if ((x == (ssize_t) ceil(primitive_info->point.x-0.5)) && (y == (ssize_t) ceil(primitive_info->point.y-0.5))) { GetFillColor(draw_info,x-start_x,y-start_y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-polygon"); return(status); } /* Draw polygon or line. */ if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); start_y=(ssize_t) ceil(bounds.y1-0.5); stop_y=(ssize_t) floor(bounds.y2+0.5); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,stop_y-start_y+1,1) #endif for (y=start_y; y <= stop_y; y++) { const int id = GetOpenMPThreadId(); double fill_alpha, stroke_alpha; PixelInfo fill_color, stroke_color; register Quantum *magick_restrict q; register ssize_t x; ssize_t start_x, stop_x; if (status == MagickFalse) continue; start_x=(ssize_t) ceil(bounds.x1-0.5); stop_x=(ssize_t) floor(bounds.x2+0.5); q=GetCacheViewAuthenticPixels(image_view,start_x,y,(size_t) (stop_x-start_x+ 1),1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=start_x; x <= stop_x; x++) { /* Fill and/or stroke. */ fill_alpha=GetFillAlpha(polygon_info[id],mid,fill,draw_info->fill_rule, x,y,&stroke_alpha); if (draw_info->stroke_antialias == MagickFalse) { fill_alpha=fill_alpha > 0.25 ? 1.0 : 0.0; stroke_alpha=stroke_alpha > 0.25 ? 1.0 : 0.0; } GetFillColor(draw_info,x-start_x,y-start_y,&fill_color,exception); fill_alpha=fill_alpha*fill_color.alpha; CompositePixelOver(image,&fill_color,fill_alpha,q,(double) GetPixelAlpha(image,q),q); GetStrokeColor(draw_info,x-start_x,y-start_y,&stroke_color,exception); stroke_alpha=stroke_alpha*stroke_color.alpha; CompositePixelOver(image,&stroke_color,stroke_alpha,q,(double) GetPixelAlpha(image,q),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); polygon_info=DestroyPolygonThreadSet(polygon_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-polygon"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D r a w P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawPrimitive() draws a primitive (line, rectangle, ellipse) on the image. % % The format of the DrawPrimitive method is: % % MagickBooleanType DrawPrimitive(Image *image,const DrawInfo *draw_info, % PrimitiveInfo *primitive_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % o exception: return any errors or warnings in this structure. % */ static void LogPrimitiveInfo(const PrimitiveInfo *primitive_info) { const char *methods[] = { "point", "replace", "floodfill", "filltoborder", "reset", "?" }; PointInfo p, q, point; register ssize_t i, x; ssize_t coordinates, y; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); switch (primitive_info->primitive) { case AlphaPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "AlphaPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ColorPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ColorPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case ImagePrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "ImagePrimitive %.20g,%.20g",(double) x,(double) y); return; } case PointPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "PointPrimitive %.20g,%.20g %s",(double) x,(double) y, methods[primitive_info->method]); return; } case TextPrimitive: { (void) LogMagickEvent(DrawEvent,GetMagickModule(), "TextPrimitive %.20g,%.20g",(double) x,(double) y); return; } default: break; } coordinates=0; p=primitive_info[0].point; q.x=(-1.0); q.y=(-1.0); for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) { point=primitive_info[i].point; if (coordinates <= 0) { coordinates=(ssize_t) primitive_info[i].coordinates; (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin open (%.20g)",(double) coordinates); p=point; } point=primitive_info[i].point; if ((fabs(q.x-point.x) >= DrawEpsilon) || (fabs(q.y-point.y) >= DrawEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %.18g,%.18g",(double) coordinates,point.x,point.y); else (void) LogMagickEvent(DrawEvent,GetMagickModule(), " %.20g: %g %g (duplicate)",(double) coordinates,point.x,point.y); q=point; coordinates--; if (coordinates > 0) continue; if ((fabs(p.x-point.x) >= DrawEpsilon) || (fabs(p.y-point.y) >= DrawEpsilon)) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end last (%.20g)", (double) coordinates); else (void) LogMagickEvent(DrawEvent,GetMagickModule()," end open (%.20g)", (double) coordinates); } } MagickExport MagickBooleanType DrawPrimitive(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { CacheView *image_view; MagickStatusType status; register ssize_t i, x; ssize_t y; if (image->debug != MagickFalse) { (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-primitive"); (void) LogMagickEvent(DrawEvent,GetMagickModule(), " affine: %g,%g,%g,%g,%g,%g",draw_info->affine.sx, draw_info->affine.rx,draw_info->affine.ry,draw_info->affine.sy, draw_info->affine.tx,draw_info->affine.ty); } if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsPixelInfoGray(&draw_info->fill) == MagickFalse) || (IsPixelInfoGray(&draw_info->stroke) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; x=(ssize_t) ceil(primitive_info->point.x-0.5); y=(ssize_t) ceil(primitive_info->point.y-0.5); image_view=AcquireAuthenticCacheView(image,exception); switch (primitive_info->primitive) { case AlphaPrimitive: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { ChannelType channel_mask; PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } channel_mask=SetImageChannelMask(image,AlphaChannel); status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); (void) SetImageChannelMask(image,channel_mask); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ColorPrimitive: { switch (primitive_info->method) { case PointMethod: default: { PixelInfo pixel; register Quantum *q; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetPixelInfo(image,&pixel); GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case ReplaceMethod: { MagickBooleanType sync; PixelInfo pixel, target; (void) GetOneCacheViewVirtualPixelInfo(image_view,x,y,&target, exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { q+=GetPixelChannels(image); continue; } GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } case FloodfillMethod: case FillToBorderMethod: { PixelInfo target; (void) GetOneVirtualPixelInfo(image,TileVirtualPixelMethod,x,y, &target,exception); if (primitive_info->method == FillToBorderMethod) { target.red=(double) draw_info->border_color.red; target.green=(double) draw_info->border_color.green; target.blue=(double) draw_info->border_color.blue; } status&=FloodfillPaintImage(image,draw_info,&target,x,y, primitive_info->method == FloodfillMethod ? MagickFalse : MagickTrue,exception); break; } case ResetMethod: { MagickBooleanType sync; PixelInfo pixel; GetPixelInfo(image,&pixel); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { GetFillColor(draw_info,x,y,&pixel,exception); SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) break; } break; } } break; } case ImagePrimitive: { AffineMatrix affine; char composite_geometry[MagickPathExtent]; Image *composite_image; ImageInfo *clone_info; RectangleInfo geometry; ssize_t x1, y1; if (primitive_info->text == (char *) NULL) break; clone_info=AcquireImageInfo(); if (LocaleNCompare(primitive_info->text,"data:",5) == 0) composite_image=ReadInlineImage(clone_info,primitive_info->text, exception); else { (void) CopyMagickString(clone_info->filename,primitive_info->text, MagickPathExtent); composite_image=ReadImage(clone_info,exception); } clone_info=DestroyImageInfo(clone_info); if (composite_image == (Image *) NULL) break; (void) SetImageProgressMonitor(composite_image,(MagickProgressMonitor) NULL,(void *) NULL); x1=(ssize_t) ceil(primitive_info[1].point.x-0.5); y1=(ssize_t) ceil(primitive_info[1].point.y-0.5); if (((x1 != 0L) && (x1 != (ssize_t) composite_image->columns)) || ((y1 != 0L) && (y1 != (ssize_t) composite_image->rows))) { /* Resize image. */ (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%gx%g!",primitive_info[1].point.x,primitive_info[1].point.y); composite_image->filter=image->filter; (void) TransformImage(&composite_image,(char *) NULL, composite_geometry,exception); } if (composite_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(composite_image,OpaqueAlphaChannel, exception); if (draw_info->alpha != OpaqueAlpha) (void) SetImageAlpha(composite_image,draw_info->alpha,exception); SetGeometry(image,&geometry); image->gravity=draw_info->gravity; geometry.x=x; geometry.y=y; (void) FormatLocaleString(composite_geometry,MagickPathExtent, "%.20gx%.20g%+.20g%+.20g",(double) composite_image->columns,(double) composite_image->rows,(double) geometry.x,(double) geometry.y); (void) ParseGravityGeometry(image,composite_geometry,&geometry,exception); affine=draw_info->affine; affine.tx=(double) geometry.x; affine.ty=(double) geometry.y; composite_image->interpolate=image->interpolate; status&=DrawAffineImage(image,composite_image,&affine,exception); composite_image=DestroyImage(composite_image); break; } case PointPrimitive: { PixelInfo fill_color; register Quantum *q; if ((y < 0) || (y >= (ssize_t) image->rows)) break; if ((x < 0) || (x >= (ssize_t) image->columns)) break; q=GetCacheViewAuthenticPixels(image_view,x,y,1,1,exception); if (q == (Quantum *) NULL) break; GetFillColor(draw_info,x,y,&fill_color,exception); CompositePixelOver(image,&fill_color,(double) fill_color.alpha,q, (double) GetPixelAlpha(image,q),q); (void) SyncCacheViewAuthenticPixels(image_view,exception); break; } case TextPrimitive: { char geometry[MagickPathExtent]; DrawInfo *clone_info; if (primitive_info->text == (char *) NULL) break; clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); (void) CloneString(&clone_info->text,primitive_info->text); (void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f", primitive_info->point.x,primitive_info->point.y); (void) CloneString(&clone_info->geometry,geometry); status&=AnnotateImage(image,clone_info,exception); clone_info=DestroyDrawInfo(clone_info); break; } default: { double mid, scale; DrawInfo *clone_info; if (IsEventLogging() != MagickFalse) LogPrimitiveInfo(primitive_info); scale=ExpandAffine(&draw_info->affine); if ((draw_info->dash_pattern != (double *) NULL) && (fabs(draw_info->dash_pattern[0]) >= DrawEpsilon) && (fabs(scale*draw_info->stroke_width) >= DrawEpsilon) && (draw_info->stroke.alpha != (Quantum) TransparentAlpha)) { /* Draw dash polygon. */ clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); (void) DrawDashPolygon(draw_info,primitive_info,image,exception); break; } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; if ((mid > 1.0) && ((draw_info->stroke.alpha != (Quantum) TransparentAlpha) || (draw_info->stroke_pattern != (Image *) NULL))) { MagickBooleanType closed_path; /* Draw strokes while respecting line cap/join attributes. */ for (i=0; primitive_info[i].primitive != UndefinedPrimitive; i++) ; closed_path= (fabs(primitive_info[i-1].point.x-primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[i-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; i=(ssize_t) primitive_info[0].coordinates; if ((((draw_info->linecap == RoundCap) || (closed_path != MagickFalse)) && (draw_info->linejoin == RoundJoin)) || (primitive_info[i].primitive != UndefinedPrimitive)) { (void) DrawPolygonPrimitive(image,draw_info,primitive_info, exception); break; } clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->stroke_width=0.0; clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; status&=DrawPolygonPrimitive(image,clone_info,primitive_info, exception); clone_info=DestroyDrawInfo(clone_info); status&=DrawStrokePolygon(image,draw_info,primitive_info,exception); break; } status&=DrawPolygonPrimitive(image,draw_info,primitive_info,exception); break; } } image_view=DestroyCacheView(image_view); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule()," end draw-primitive"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D r a w S t r o k e P o l y g o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DrawStrokePolygon() draws a stroked polygon (line, rectangle, ellipse) on % the image while respecting the line cap and join attributes. % % The format of the DrawStrokePolygon method is: % % MagickBooleanType DrawStrokePolygon(Image *image, % const DrawInfo *draw_info,const PrimitiveInfo *primitive_info) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o primitive_info: Specifies a pointer to a PrimitiveInfo structure. % % */ static void DrawRoundLinecap(Image *image,const DrawInfo *draw_info, const PrimitiveInfo *primitive_info,ExceptionInfo *exception) { PrimitiveInfo linecap[5]; register ssize_t i; for (i=0; i < 4; i++) linecap[i]=(*primitive_info); linecap[0].coordinates=4; linecap[1].point.x+=2.0*DrawEpsilon; linecap[2].point.x+=2.0*DrawEpsilon; linecap[2].point.y+=2.0*DrawEpsilon; linecap[3].point.y+=2.0*DrawEpsilon; linecap[4].primitive=UndefinedPrimitive; (void) DrawPolygonPrimitive(image,draw_info,linecap,exception); } static MagickBooleanType DrawStrokePolygon(Image *image, const DrawInfo *draw_info,const PrimitiveInfo *primitive_info, ExceptionInfo *exception) { DrawInfo *clone_info; MagickBooleanType closed_path; MagickStatusType status; PrimitiveInfo *stroke_polygon; register const PrimitiveInfo *p, *q; /* Draw stroked polygon. */ if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " begin draw-stroke-polygon"); clone_info=CloneDrawInfo((ImageInfo *) NULL,draw_info); clone_info->fill=draw_info->stroke; if (clone_info->fill_pattern != (Image *) NULL) clone_info->fill_pattern=DestroyImage(clone_info->fill_pattern); if (clone_info->stroke_pattern != (Image *) NULL) clone_info->fill_pattern=CloneImage(clone_info->stroke_pattern,0,0, MagickTrue,exception); clone_info->stroke.alpha=(MagickRealType) TransparentAlpha; clone_info->stroke_width=0.0; clone_info->fill_rule=NonZeroRule; status=MagickTrue; for (p=primitive_info; p->primitive != UndefinedPrimitive; p+=p->coordinates) { stroke_polygon=TraceStrokePolygon(draw_info,p); if (stroke_polygon == (PrimitiveInfo *) NULL) { status=0; break; } status&=DrawPolygonPrimitive(image,clone_info,stroke_polygon,exception); if (status == 0) break; stroke_polygon=(PrimitiveInfo *) RelinquishMagickMemory(stroke_polygon); q=p+p->coordinates-1; closed_path=(fabs(q->point.x-p->point.x) < DrawEpsilon) && (fabs(q->point.y-p->point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if ((draw_info->linecap == RoundCap) && (closed_path == MagickFalse)) { DrawRoundLinecap(image,draw_info,p,exception); DrawRoundLinecap(image,draw_info,q,exception); } } clone_info=DestroyDrawInfo(clone_info); if (image->debug != MagickFalse) (void) LogMagickEvent(DrawEvent,GetMagickModule(), " end draw-stroke-polygon"); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A f f i n e M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAffineMatrix() returns an AffineMatrix initialized to the identity % matrix. % % The format of the GetAffineMatrix method is: % % void GetAffineMatrix(AffineMatrix *affine_matrix) % % A description of each parameter follows: % % o affine_matrix: the affine matrix. % */ MagickExport void GetAffineMatrix(AffineMatrix *affine_matrix) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(affine_matrix != (AffineMatrix *) NULL); (void) ResetMagickMemory(affine_matrix,0,sizeof(*affine_matrix)); affine_matrix->sx=1.0; affine_matrix->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t D r a w I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetDrawInfo() initializes draw_info to default values from image_info. % % The format of the GetDrawInfo method is: % % void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) % % A description of each parameter follows: % % o image_info: the image info.. % % o draw_info: the draw info. % */ MagickExport void GetDrawInfo(const ImageInfo *image_info,DrawInfo *draw_info) { char *next_token; const char *option; ExceptionInfo *exception; ImageInfo *clone_info; /* Initialize draw attributes. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(draw_info != (DrawInfo *) NULL); (void) ResetMagickMemory(draw_info,0,sizeof(*draw_info)); clone_info=CloneImageInfo(image_info); GetAffineMatrix(&draw_info->affine); exception=AcquireExceptionInfo(); (void) QueryColorCompliance("#000F",AllCompliance,&draw_info->fill, exception); (void) QueryColorCompliance("#0000",AllCompliance,&draw_info->stroke, exception); draw_info->stroke_width=1.0; draw_info->fill_rule=EvenOddRule; draw_info->alpha=OpaqueAlpha; draw_info->fill_alpha=OpaqueAlpha; draw_info->stroke_alpha=OpaqueAlpha; draw_info->linecap=ButtCap; draw_info->linejoin=MiterJoin; draw_info->miterlimit=10; draw_info->decorate=NoDecoration; draw_info->pointsize=12.0; draw_info->undercolor.alpha=(MagickRealType) TransparentAlpha; draw_info->compose=OverCompositeOp; draw_info->render=MagickTrue; draw_info->debug=IsEventLogging(); draw_info->stroke_antialias=clone_info->antialias; if (clone_info->font != (char *) NULL) draw_info->font=AcquireString(clone_info->font); if (clone_info->density != (char *) NULL) draw_info->density=AcquireString(clone_info->density); draw_info->text_antialias=clone_info->antialias; if (fabs(clone_info->pointsize) >= DrawEpsilon) draw_info->pointsize=clone_info->pointsize; draw_info->border_color=clone_info->border_color; if (clone_info->server_name != (char *) NULL) draw_info->server_name=AcquireString(clone_info->server_name); option=GetImageOption(clone_info,"direction"); if (option != (const char *) NULL) draw_info->direction=(DirectionType) ParseCommandOption( MagickDirectionOptions,MagickFalse,option); else draw_info->direction=UndefinedDirection; option=GetImageOption(clone_info,"encoding"); if (option != (const char *) NULL) (void) CloneString(&draw_info->encoding,option); option=GetImageOption(clone_info,"family"); if (option != (const char *) NULL) (void) CloneString(&draw_info->family,option); option=GetImageOption(clone_info,"fill"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->fill, exception); option=GetImageOption(clone_info,"gravity"); if (option != (const char *) NULL) draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(clone_info,"interline-spacing"); if (option != (const char *) NULL) draw_info->interline_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"interword-spacing"); if (option != (const char *) NULL) draw_info->interword_spacing=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"kerning"); if (option != (const char *) NULL) draw_info->kerning=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"stroke"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->stroke, exception); option=GetImageOption(clone_info,"strokewidth"); if (option != (const char *) NULL) draw_info->stroke_width=StringToDouble(option,&next_token); option=GetImageOption(clone_info,"style"); if (option != (const char *) NULL) draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions, MagickFalse,option); option=GetImageOption(clone_info,"undercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&draw_info->undercolor, exception); option=GetImageOption(clone_info,"weight"); if (option != (const char *) NULL) { ssize_t weight; weight=ParseCommandOption(MagickWeightOptions,MagickFalse,option); if (weight == -1) weight=(ssize_t) StringToUnsignedLong(option); draw_info->weight=(size_t) weight; } exception=DestroyExceptionInfo(exception); draw_info->signature=MagickCoreSignature; clone_info=DestroyImageInfo(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r m u t a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Permutate() returns the permuation of the (n,k). % % The format of the Permutate method is: % % void Permutate(ssize_t n,ssize_t k) % % A description of each parameter follows: % % o n: % % o k: % % */ static inline double Permutate(const ssize_t n,const ssize_t k) { double r; register ssize_t i; r=1.0; for (i=k+1; i <= n; i++) r*=i; for (i=1; i <= (n-k); i++) r/=i; return(r); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a c e P r i m i t i v e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TracePrimitive is a collection of methods for generating graphic % primitives such as arcs, ellipses, paths, etc. % */ static void TraceArc(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end,const PointInfo degrees) { PointInfo center, radii; center.x=0.5*(end.x+start.x); center.y=0.5*(end.y+start.y); radii.x=fabs(center.x-start.x); radii.y=fabs(center.y-start.y); TraceEllipse(primitive_info,center,radii,degrees); } static void TraceArcPath(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end,const PointInfo arc,const double angle, const MagickBooleanType large_arc,const MagickBooleanType sweep) { double alpha, beta, delta, factor, gamma, theta; PointInfo center, points[3], radii; register double cosine, sine; register PrimitiveInfo *p; register ssize_t i; size_t arc_segments; if ((fabs(start.x-end.x) < DrawEpsilon) && (fabs(start.y-end.y) < DrawEpsilon)) { TracePoint(primitive_info,end); return; } radii.x=fabs(arc.x); radii.y=fabs(arc.y); if ((fabs(radii.x) < DrawEpsilon) || (fabs(radii.y) < DrawEpsilon)) { TraceLine(primitive_info,start,end); return; } cosine=cos(DegreesToRadians(fmod((double) angle,360.0))); sine=sin(DegreesToRadians(fmod((double) angle,360.0))); center.x=(double) (cosine*(end.x-start.x)/2+sine*(end.y-start.y)/2); center.y=(double) (cosine*(end.y-start.y)/2-sine*(end.x-start.x)/2); delta=(center.x*center.x)/(radii.x*radii.x)+(center.y*center.y)/ (radii.y*radii.y); if (delta < DrawEpsilon) { TraceLine(primitive_info,start,end); return; } if (delta > 1.0) { radii.x*=sqrt((double) delta); radii.y*=sqrt((double) delta); } points[0].x=(double) (cosine*start.x/radii.x+sine*start.y/radii.x); points[0].y=(double) (cosine*start.y/radii.y-sine*start.x/radii.y); points[1].x=(double) (cosine*end.x/radii.x+sine*end.y/radii.x); points[1].y=(double) (cosine*end.y/radii.y-sine*end.x/radii.y); alpha=points[1].x-points[0].x; beta=points[1].y-points[0].y; factor=PerceptibleReciprocal(alpha*alpha+beta*beta)-0.25; if (factor <= 0.0) factor=0.0; else { factor=sqrt((double) factor); if (sweep == large_arc) factor=(-factor); } center.x=(double) ((points[0].x+points[1].x)/2-factor*beta); center.y=(double) ((points[0].y+points[1].y)/2+factor*alpha); alpha=atan2(points[0].y-center.y,points[0].x-center.x); theta=atan2(points[1].y-center.y,points[1].x-center.x)-alpha; if ((theta < 0.0) && (sweep != MagickFalse)) theta+=2.0*MagickPI; else if ((theta > 0.0) && (sweep == MagickFalse)) theta-=2.0*MagickPI; arc_segments=(size_t) ceil(fabs((double) (theta/(0.5*MagickPI+DrawEpsilon)))); p=primitive_info; for (i=0; i < (ssize_t) arc_segments; i++) { beta=0.5*((alpha+(i+1)*theta/arc_segments)-(alpha+i*theta/arc_segments)); gamma=(8.0/3.0)*sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))* sin(fmod((double) (0.5*beta),DegreesToRadians(360.0)))/ sin(fmod((double) beta,DegreesToRadians(360.0))); points[0].x=(double) (center.x+cos(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))-gamma*sin(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[0].y=(double) (center.y+sin(fmod((double) (alpha+(double) i*theta/ arc_segments),DegreesToRadians(360.0)))+gamma*cos(fmod((double) (alpha+ (double) i*theta/arc_segments),DegreesToRadians(360.0)))); points[2].x=(double) (center.x+cos(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[2].y=(double) (center.y+sin(fmod((double) (alpha+(double) (i+1)* theta/arc_segments),DegreesToRadians(360.0)))); points[1].x=(double) (points[2].x+gamma*sin(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); points[1].y=(double) (points[2].y-gamma*cos(fmod((double) (alpha+(double) (i+1)*theta/arc_segments),DegreesToRadians(360.0)))); p->point.x=(p == primitive_info) ? start.x : (p-1)->point.x; p->point.y=(p == primitive_info) ? start.y : (p-1)->point.y; (p+1)->point.x=(double) (cosine*radii.x*points[0].x-sine*radii.y* points[0].y); (p+1)->point.y=(double) (sine*radii.x*points[0].x+cosine*radii.y* points[0].y); (p+2)->point.x=(double) (cosine*radii.x*points[1].x-sine*radii.y* points[1].y); (p+2)->point.y=(double) (sine*radii.x*points[1].x+cosine*radii.y* points[1].y); (p+3)->point.x=(double) (cosine*radii.x*points[2].x-sine*radii.y* points[2].y); (p+3)->point.y=(double) (sine*radii.x*points[2].x+cosine*radii.y* points[2].y); if (i == (ssize_t) (arc_segments-1)) (p+3)->point=end; TraceBezier(p,4); p+=p->coordinates; } primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceBezier(PrimitiveInfo *primitive_info, const size_t number_coordinates) { double alpha, *coefficients, weight; PointInfo end, point, *points; register PrimitiveInfo *p; register ssize_t i, j; size_t control_points, quantum; /* Allocate coeficients. */ quantum=number_coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { for (j=i+1; j < (ssize_t) number_coordinates; j++) { alpha=fabs(primitive_info[j].point.x-primitive_info[i].point.x); if (alpha > (double) quantum) quantum=(size_t) alpha; alpha=fabs(primitive_info[j].point.y-primitive_info[i].point.y); if (alpha > (double) quantum) quantum=(size_t) alpha; } } quantum=(size_t) MagickMin((double) quantum/number_coordinates, (double) BezierQuantum); control_points=quantum*number_coordinates; coefficients=(double *) AcquireQuantumMemory((size_t) number_coordinates,sizeof(*coefficients)); points=(PointInfo *) AcquireQuantumMemory((size_t) control_points, sizeof(*points)); if ((coefficients == (double *) NULL) || (points == (PointInfo *) NULL)) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); /* Compute bezier points. */ end=primitive_info[number_coordinates-1].point; for (i=0; i < (ssize_t) number_coordinates; i++) coefficients[i]=Permutate((ssize_t) number_coordinates-1,i); weight=0.0; for (i=0; i < (ssize_t) control_points; i++) { p=primitive_info; point.x=0.0; point.y=0.0; alpha=pow((double) (1.0-weight),(double) number_coordinates-1.0); for (j=0; j < (ssize_t) number_coordinates; j++) { point.x+=alpha*coefficients[j]*p->point.x; point.y+=alpha*coefficients[j]*p->point.y; alpha*=weight/(1.0-weight); p++; } points[i]=point; weight+=1.0/control_points; } /* Bezier curves are just short segmented polys. */ p=primitive_info; for (i=0; i < (ssize_t) control_points; i++) { TracePoint(p,points[i]); p+=p->coordinates; } TracePoint(p,end); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } points=(PointInfo *) RelinquishMagickMemory(points); coefficients=(double *) RelinquishMagickMemory(coefficients); } static void TraceCircle(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { double alpha, beta, radius; PointInfo offset, degrees; alpha=end.x-start.x; beta=end.y-start.y; radius=hypot((double) alpha,(double) beta); offset.x=(double) radius; offset.y=(double) radius; degrees.x=0.0; degrees.y=360.0; TraceEllipse(primitive_info,start,offset,degrees); } static void TraceEllipse(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo stop,const PointInfo degrees) { double delta, step, y; PointInfo angle, point; register PrimitiveInfo *p; register ssize_t i; /* Ellipses are just short segmented polys. */ if ((fabs(stop.x) < DrawEpsilon) && (fabs(stop.y) < DrawEpsilon)) { TracePoint(primitive_info,start); return; } delta=2.0/MagickMax(stop.x,stop.y); step=MagickPI/8.0; if ((delta >= 0.0) && (delta < (MagickPI/8.0))) step=MagickPI/(4*(MagickPI/delta/2+0.5)); angle.x=DegreesToRadians(degrees.x); y=degrees.y; while (y < degrees.x) y+=360.0; angle.y=DegreesToRadians(y); for (p=primitive_info; angle.x < angle.y; angle.x+=step) { point.x=cos(fmod(angle.x,DegreesToRadians(360.0)))*stop.x+start.x; point.y=sin(fmod(angle.x,DegreesToRadians(360.0)))*stop.y+start.y; TracePoint(p,point); p+=p->coordinates; } point.x=cos(fmod(angle.y,DegreesToRadians(360.0)))*stop.x+start.x; point.y=sin(fmod(angle.y,DegreesToRadians(360.0)))*stop.y+start.y; TracePoint(p,point); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceLine(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { TracePoint(primitive_info,start); if ((fabs(start.x-end.x) < DrawEpsilon) && (fabs(start.y-end.y) < DrawEpsilon)) { primitive_info->primitive=PointPrimitive; primitive_info->coordinates=1; return; } TracePoint(primitive_info+1,end); (primitive_info+1)->primitive=primitive_info->primitive; primitive_info->coordinates=2; } static size_t TracePath(PrimitiveInfo *primitive_info,const char *path) { char *next_token, token[MagickPathExtent]; const char *p; double x, y; int attribute, last_attribute; PointInfo end = {0.0, 0.0}, points[4] = { {0.0,0.0}, {0.0,0.0}, {0.0,0.0}, {0.0,0.0} }, point = {0.0, 0.0}, start = {0.0, 0.0}; PrimitiveType primitive_type; register PrimitiveInfo *q; register ssize_t i; size_t number_coordinates, z_count; attribute=0; number_coordinates=0; z_count=0; primitive_type=primitive_info->primitive; q=primitive_info; for (p=path; *p != '\0'; ) { while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == '\0') break; last_attribute=attribute; attribute=(int) (*p++); switch (attribute) { case 'a': case 'A': { double angle; MagickBooleanType large_arc, sweep; PointInfo arc; /* Compute arc points. */ do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); arc.y=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); angle=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); large_arc=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); sweep=StringToLong(token) != 0 ? MagickTrue : MagickFalse; GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'A' ? x : point.x+x); end.y=(double) (attribute == (int) 'A' ? y : point.y+y); TraceArcPath(q,point,end,arc,angle,large_arc,sweep); q+=q->coordinates; point=end; while (isspace((int) ((unsigned char) *p)) != 0) p++; if (*p == ',') p++; } while (IsPoint(p) != MagickFalse); break; } case 'c': case 'C': { /* Compute bezier points. */ do { points[0]=point; for (i=1; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'C' ? x : point.x+x); end.y=(double) (attribute == (int) 'C' ? y : point.y+y); points[i]=end; } for (i=0; i < 4; i++) (q+i)->point=points[i]; TraceBezier(q,4); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 'H': case 'h': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'H' ? x: point.x+x); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'l': case 'L': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'L' ? x : point.x+x); point.y=(double) (attribute == (int) 'L' ? y : point.y+y); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'M': case 'm': { if (q != primitive_info) { primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; } i=0; do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.x=(double) (attribute == (int) 'M' ? x : point.x+x); point.y=(double) (attribute == (int) 'M' ? y : point.y+y); if (i == 0) start=point; i++; TracePoint(q,point); q+=q->coordinates; if ((i != 0) && (attribute == (int) 'M')) { TracePoint(q,point); q+=q->coordinates; } } while (IsPoint(p) != MagickFalse); break; } case 'q': case 'Q': { /* Compute bezier points. */ do { points[0]=point; for (i=1; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'Q' ? x : point.x+x); end.y=(double) (attribute == (int) 'Q' ? y : point.y+y); points[i]=end; } for (i=0; i < 3; i++) (q+i)->point=points[i]; TraceBezier(q,3); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 's': case 'S': { /* Compute bezier points. */ do { points[0]=points[3]; points[1].x=2.0*points[3].x-points[2].x; points[1].y=2.0*points[3].y-points[2].y; for (i=2; i < 4; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); if (*p == ',') p++; end.x=(double) (attribute == (int) 'S' ? x : point.x+x); end.y=(double) (attribute == (int) 'S' ? y : point.y+y); points[i]=end; } if (strchr("CcSs",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 4; i++) (q+i)->point=points[i]; TraceBezier(q,4); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 't': case 'T': { /* Compute bezier points. */ do { points[0]=points[2]; points[1].x=2.0*points[2].x-points[1].x; points[1].y=2.0*points[2].y-points[1].y; for (i=2; i < 3; i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); x=StringToDouble(token,&next_token); GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); end.x=(double) (attribute == (int) 'T' ? x : point.x+x); end.y=(double) (attribute == (int) 'T' ? y : point.y+y); points[i]=end; } if (strchr("QqTt",last_attribute) == (char *) NULL) { points[0]=point; points[1]=point; } for (i=0; i < 3; i++) (q+i)->point=points[i]; TraceBezier(q,3); q+=q->coordinates; point=end; } while (IsPoint(p) != MagickFalse); break; } case 'v': case 'V': { do { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); y=StringToDouble(token,&next_token); point.y=(double) (attribute == (int) 'V' ? y : point.y+y); TracePoint(q,point); q+=q->coordinates; } while (IsPoint(p) != MagickFalse); break; } case 'z': case 'Z': { point=start; TracePoint(q,point); q+=q->coordinates; primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; primitive_info=q; z_count++; break; } default: { if (isalpha((int) ((unsigned char) attribute)) != 0) (void) FormatLocaleFile(stderr,"attribute not recognized: %c\n", attribute); break; } } } primitive_info->coordinates=(size_t) (q-primitive_info); number_coordinates+=primitive_info->coordinates; for (i=0; i < (ssize_t) number_coordinates; i++) { q--; q->primitive=primitive_type; if (z_count > 1) q->method=FillToBorderMethod; } q=primitive_info; return(number_coordinates); } static void TraceRectangle(PrimitiveInfo *primitive_info,const PointInfo start, const PointInfo end) { PointInfo point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; TracePoint(p,start); p+=p->coordinates; point.x=start.x; point.y=end.y; TracePoint(p,point); p+=p->coordinates; TracePoint(p,end); p+=p->coordinates; point.x=end.x; point.y=start.y; TracePoint(p,point); p+=p->coordinates; TracePoint(p,start); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceRoundRectangle(PrimitiveInfo *primitive_info, const PointInfo start,const PointInfo end,PointInfo arc) { PointInfo degrees, offset, point; register PrimitiveInfo *p; register ssize_t i; p=primitive_info; offset.x=fabs(end.x-start.x); offset.y=fabs(end.y-start.y); if (arc.x > (0.5*offset.x)) arc.x=0.5*offset.x; if (arc.y > (0.5*offset.y)) arc.y=0.5*offset.y; point.x=start.x+offset.x-arc.x; point.y=start.y+arc.y; degrees.x=270.0; degrees.y=360.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+offset.x-arc.x; point.y=start.y+offset.y-arc.y; degrees.x=0.0; degrees.y=90.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+offset.y-arc.y; degrees.x=90.0; degrees.y=180.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; point.x=start.x+arc.x; point.y=start.y+arc.y; degrees.x=180.0; degrees.y=270.0; TraceEllipse(p,point,arc,degrees); p+=p->coordinates; TracePoint(p,primitive_info->point); p+=p->coordinates; primitive_info->coordinates=(size_t) (p-primitive_info); for (i=0; i < (ssize_t) primitive_info->coordinates; i++) { p->primitive=primitive_info->primitive; p--; } } static void TraceSquareLinecap(PrimitiveInfo *primitive_info, const size_t number_vertices,const double offset) { double distance; register double dx, dy; register ssize_t i; ssize_t j; dx=0.0; dy=0.0; for (i=1; i < (ssize_t) number_vertices; i++) { dx=primitive_info[0].point.x-primitive_info[i].point.x; dy=primitive_info[0].point.y-primitive_info[i].point.y; if ((fabs((double) dx) >= DrawEpsilon) || (fabs((double) dy) >= DrawEpsilon)) break; } if (i == (ssize_t) number_vertices) i=(ssize_t) number_vertices-1L; distance=hypot((double) dx,(double) dy); primitive_info[0].point.x=(double) (primitive_info[i].point.x+ dx*(distance+offset)/distance); primitive_info[0].point.y=(double) (primitive_info[i].point.y+ dy*(distance+offset)/distance); for (j=(ssize_t) number_vertices-2; j >= 0; j--) { dx=primitive_info[number_vertices-1].point.x-primitive_info[j].point.x; dy=primitive_info[number_vertices-1].point.y-primitive_info[j].point.y; if ((fabs((double) dx) >= DrawEpsilon) || (fabs((double) dy) >= DrawEpsilon)) break; } distance=hypot((double) dx,(double) dy); primitive_info[number_vertices-1].point.x=(double) (primitive_info[j].point.x+ dx*(distance+offset)/distance); primitive_info[number_vertices-1].point.y=(double) (primitive_info[j].point.y+ dy*(distance+offset)/distance); } static PrimitiveInfo *TraceStrokePolygon(const DrawInfo *draw_info, const PrimitiveInfo *primitive_info) { typedef struct _LineSegment { double p, q; } LineSegment; double delta_theta, dot_product, mid, miterlimit; LineSegment dx, dy, inverse_slope, slope, theta; MagickBooleanType closed_path; PointInfo box_p[5], box_q[5], center, offset, *path_p, *path_q; PrimitiveInfo *polygon_primitive, *stroke_polygon; register ssize_t i; size_t arc_segments, max_strokes, number_vertices; ssize_t j, n, p, q; /* Allocate paths. */ number_vertices=primitive_info->coordinates; max_strokes=2*number_vertices+6*BezierQuantum+360; path_p=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_p)); path_q=(PointInfo *) AcquireQuantumMemory((size_t) max_strokes, sizeof(*path_q)); polygon_primitive=(PrimitiveInfo *) AcquireQuantumMemory((size_t) number_vertices+2UL,sizeof(*polygon_primitive)); if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL) || (polygon_primitive == (PrimitiveInfo *) NULL)) { if (path_p != (PointInfo *) NULL) path_p=(PointInfo *) RelinquishMagickMemory(path_p); if (path_q != (PointInfo *) NULL) path_q=(PointInfo *) RelinquishMagickMemory(path_q); if (polygon_primitive != (PrimitiveInfo *) NULL) polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory( polygon_primitive); return((PrimitiveInfo *) NULL); } (void) CopyMagickMemory(polygon_primitive,primitive_info,(size_t) number_vertices*sizeof(*polygon_primitive)); closed_path= (fabs(primitive_info[number_vertices-1].point.x-primitive_info[0].point.x) < DrawEpsilon) && (fabs(primitive_info[number_vertices-1].point.y-primitive_info[0].point.y) < DrawEpsilon) ? MagickTrue : MagickFalse; if (((draw_info->linejoin == RoundJoin) || (draw_info->linejoin == MiterJoin)) && (closed_path != MagickFalse)) { polygon_primitive[number_vertices]=primitive_info[1]; number_vertices++; } polygon_primitive[number_vertices].primitive=UndefinedPrimitive; /* Compute the slope for the first line segment, p. */ dx.p=0.0; dy.p=0.0; for (n=1; n < (ssize_t) number_vertices; n++) { dx.p=polygon_primitive[n].point.x-polygon_primitive[0].point.x; dy.p=polygon_primitive[n].point.y-polygon_primitive[0].point.y; if ((fabs(dx.p) >= DrawEpsilon) || (fabs(dy.p) >= DrawEpsilon)) break; } if (n == (ssize_t) number_vertices) n=(ssize_t) number_vertices-1L; slope.p=0.0; inverse_slope.p=0.0; if (fabs(dx.p) < DrawEpsilon) { if (dx.p >= 0.0) slope.p=dy.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else slope.p=dy.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else if (fabs(dy.p) < DrawEpsilon) { if (dy.p >= 0.0) inverse_slope.p=dx.p < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else inverse_slope.p=dx.p < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else { slope.p=dy.p/dx.p; inverse_slope.p=(-1.0/slope.p); } mid=ExpandAffine(&draw_info->affine)*draw_info->stroke_width/2.0; miterlimit=(double) (draw_info->miterlimit*draw_info->miterlimit*mid*mid); if ((draw_info->linecap == SquareCap) && (closed_path == MagickFalse)) TraceSquareLinecap(polygon_primitive,number_vertices,mid); offset.x=sqrt((double) (mid*mid/(inverse_slope.p*inverse_slope.p+1.0))); offset.y=(double) (offset.x*inverse_slope.p); if ((dy.p*offset.x-dx.p*offset.y) > 0.0) { box_p[0].x=polygon_primitive[0].point.x-offset.x; box_p[0].y=polygon_primitive[0].point.y-offset.x*inverse_slope.p; box_p[1].x=polygon_primitive[n].point.x-offset.x; box_p[1].y=polygon_primitive[n].point.y-offset.x*inverse_slope.p; box_q[0].x=polygon_primitive[0].point.x+offset.x; box_q[0].y=polygon_primitive[0].point.y+offset.x*inverse_slope.p; box_q[1].x=polygon_primitive[n].point.x+offset.x; box_q[1].y=polygon_primitive[n].point.y+offset.x*inverse_slope.p; } else { box_p[0].x=polygon_primitive[0].point.x+offset.x; box_p[0].y=polygon_primitive[0].point.y+offset.y; box_p[1].x=polygon_primitive[n].point.x+offset.x; box_p[1].y=polygon_primitive[n].point.y+offset.y; box_q[0].x=polygon_primitive[0].point.x-offset.x; box_q[0].y=polygon_primitive[0].point.y-offset.y; box_q[1].x=polygon_primitive[n].point.x-offset.x; box_q[1].y=polygon_primitive[n].point.y-offset.y; } /* Create strokes for the line join attribute: bevel, miter, round. */ p=0; q=0; path_q[p++]=box_q[0]; path_p[q++]=box_p[0]; for (i=(ssize_t) n+1; i < (ssize_t) number_vertices; i++) { /* Compute the slope for this line segment, q. */ dx.q=polygon_primitive[i].point.x-polygon_primitive[n].point.x; dy.q=polygon_primitive[i].point.y-polygon_primitive[n].point.y; dot_product=dx.q*dx.q+dy.q*dy.q; if (dot_product < 0.25) continue; slope.q=0.0; inverse_slope.q=0.0; if (fabs(dx.q) < DrawEpsilon) { if (dx.q >= 0.0) slope.q=dy.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else slope.q=dy.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else if (fabs(dy.q) < DrawEpsilon) { if (dy.q >= 0.0) inverse_slope.q=dx.q < 0.0 ? -1.0/DrawEpsilon : 1.0/DrawEpsilon; else inverse_slope.q=dx.q < 0.0 ? 1.0/DrawEpsilon : -1.0/DrawEpsilon; } else { slope.q=dy.q/dx.q; inverse_slope.q=(-1.0/slope.q); } offset.x=sqrt((double) (mid*mid/(inverse_slope.q*inverse_slope.q+1.0))); offset.y=(double) (offset.x*inverse_slope.q); dot_product=dy.q*offset.x-dx.q*offset.y; if (dot_product > 0.0) { box_p[2].x=polygon_primitive[n].point.x-offset.x; box_p[2].y=polygon_primitive[n].point.y-offset.y; box_p[3].x=polygon_primitive[i].point.x-offset.x; box_p[3].y=polygon_primitive[i].point.y-offset.y; box_q[2].x=polygon_primitive[n].point.x+offset.x; box_q[2].y=polygon_primitive[n].point.y+offset.y; box_q[3].x=polygon_primitive[i].point.x+offset.x; box_q[3].y=polygon_primitive[i].point.y+offset.y; } else { box_p[2].x=polygon_primitive[n].point.x+offset.x; box_p[2].y=polygon_primitive[n].point.y+offset.y; box_p[3].x=polygon_primitive[i].point.x+offset.x; box_p[3].y=polygon_primitive[i].point.y+offset.y; box_q[2].x=polygon_primitive[n].point.x-offset.x; box_q[2].y=polygon_primitive[n].point.y-offset.y; box_q[3].x=polygon_primitive[i].point.x-offset.x; box_q[3].y=polygon_primitive[i].point.y-offset.y; } if (fabs((double) (slope.p-slope.q)) < DrawEpsilon) { box_p[4]=box_p[1]; box_q[4]=box_q[1]; } else { box_p[4].x=(double) ((slope.p*box_p[0].x-box_p[0].y-slope.q*box_p[3].x+ box_p[3].y)/(slope.p-slope.q)); box_p[4].y=(double) (slope.p*(box_p[4].x-box_p[0].x)+box_p[0].y); box_q[4].x=(double) ((slope.p*box_q[0].x-box_q[0].y-slope.q*box_q[3].x+ box_q[3].y)/(slope.p-slope.q)); box_q[4].y=(double) (slope.p*(box_q[4].x-box_q[0].x)+box_q[0].y); } if (q >= (ssize_t) (max_strokes-6*BezierQuantum-360)) { if (~max_strokes < (6*BezierQuantum+360)) { path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); } else { max_strokes+=6*BezierQuantum+360; path_p=(PointInfo *) ResizeQuantumMemory(path_p,max_strokes, sizeof(*path_p)); path_q=(PointInfo *) ResizeQuantumMemory(path_q,max_strokes, sizeof(*path_q)); } if ((path_p == (PointInfo *) NULL) || (path_q == (PointInfo *) NULL)) { if (path_p != (PointInfo *) NULL) path_p=(PointInfo *) RelinquishMagickMemory(path_p); if (path_q != (PointInfo *) NULL) path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return((PrimitiveInfo *) NULL); } } dot_product=dx.q*dy.p-dx.p*dy.q; if (dot_product <= 0.0) switch (draw_info->linejoin) { case BevelJoin: { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_p[p++]=box_p[4]; else { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_q[1].y-center.y,box_q[1].x-center.x); theta.q=atan2(box_q[2].y-center.y,box_q[2].x-center.x); if (theta.q < theta.p) theta.q+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.q-theta.p)/ (2.0*sqrt((double) (1.0/mid))))); path_q[q].x=box_q[1].x; path_q[q].y=box_q[1].y; q++; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_q[q].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_q[q].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); q++; } path_q[q++]=box_q[2]; break; } default: break; } else switch (draw_info->linejoin) { case BevelJoin: { path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } break; } case MiterJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) { path_q[q++]=box_q[4]; path_p[p++]=box_p[4]; } else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; path_p[p++]=box_p[1]; path_p[p++]=box_p[2]; } break; } case RoundJoin: { dot_product=(box_q[4].x-box_p[4].x)*(box_q[4].x-box_p[4].x)+ (box_q[4].y-box_p[4].y)*(box_q[4].y-box_p[4].y); if (dot_product <= miterlimit) path_q[q++]=box_q[4]; else { path_q[q++]=box_q[1]; path_q[q++]=box_q[2]; } center=polygon_primitive[n].point; theta.p=atan2(box_p[1].y-center.y,box_p[1].x-center.x); theta.q=atan2(box_p[2].y-center.y,box_p[2].x-center.x); if (theta.p < theta.q) theta.p+=2.0*MagickPI; arc_segments=(size_t) ceil((double) ((theta.p-theta.q)/ (2.0*sqrt((double) (1.0/mid))))); path_p[p++]=box_p[1]; for (j=1; j < (ssize_t) arc_segments; j++) { delta_theta=(double) (j*(theta.q-theta.p)/arc_segments); path_p[p].x=(double) (center.x+mid*cos(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); path_p[p].y=(double) (center.y+mid*sin(fmod((double) (theta.p+delta_theta),DegreesToRadians(360.0)))); p++; } path_p[p++]=box_p[2]; break; } default: break; } slope.p=slope.q; inverse_slope.p=inverse_slope.q; box_p[0]=box_p[2]; box_p[1]=box_p[3]; box_q[0]=box_q[2]; box_q[1]=box_q[3]; dx.p=dx.q; dy.p=dy.q; n=i; } path_p[p++]=box_p[1]; path_q[q++]=box_q[1]; /* Trace stroked polygon. */ stroke_polygon=(PrimitiveInfo *) AcquireQuantumMemory((size_t) (p+q+2UL*closed_path+2UL),sizeof(*stroke_polygon)); if (stroke_polygon != (PrimitiveInfo *) NULL) { for (i=0; i < (ssize_t) p; i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_p[i]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; } for ( ; i < (ssize_t) (p+q+closed_path); i++) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=path_q[p+q+closed_path-(i+1)]; } if (closed_path != MagickFalse) { stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[p+closed_path].point; i++; } stroke_polygon[i]=polygon_primitive[0]; stroke_polygon[i].point=stroke_polygon[0].point; i++; stroke_polygon[i].primitive=UndefinedPrimitive; stroke_polygon[0].coordinates=(size_t) (p+q+2*closed_path+1); } path_p=(PointInfo *) RelinquishMagickMemory(path_p); path_q=(PointInfo *) RelinquishMagickMemory(path_q); polygon_primitive=(PrimitiveInfo *) RelinquishMagickMemory(polygon_primitive); return(stroke_polygon); }
ab-totient-omp-13.c
// Distributed and parallel technologies, Andrew Beveridge, 03/03/2014 // To Compile: gcc -Wall -O -o ab-totient-omp -fopenmp ab-totient-omp.c // To Run / Time: /usr/bin/time -v ./ab-totient-omp range_start range_end #include <stdio.h> #include <omp.h> /* When input is a prime number, the totient is simply the prime number - 1. Totient is always even (except for 1). If n is a positive integer, then φ(n) is the number of integers k in the range 1 ≤ k ≤ n for which gcd(n, k) = 1 */ long getTotient (long number) { long result = number; // Check every prime number below the square root for divisibility if(number % 2 == 0){ result -= result / 2; do number /= 2; while(number %2 == 0); } // Primitive replacement for a list of primes, looping through every odd number long prime; for(prime = 3; prime * prime <= number; prime += 2){ if(number %prime == 0){ result -= result / prime; do number /= prime; while(number % prime == 0); } } // Last common factor if(number > 1) result -= result / number; // Return the result. return result; } // Main method. int main(int argc, char ** argv) { // Load inputs long lower, upper; sscanf(argv[1], "%ld", &lower); sscanf(argv[2], "%ld", &upper); int i; long result = 0.0; // We know the answer if it's 1; no need to execute the function if(lower == 1) { result = 1.0; lower = 2; } #pragma omp parallel for default(shared) private(i) schedule(auto) reduction(+:result) num_threads(13) // Sum all totients in the specified range for (i = lower; i <= upper; i++) { result = result + getTotient(i); } // Print the result printf("Sum of Totients between [%ld..%ld] is %ld \n", lower, upper, result); // A-OK! return 0; }
GB_binop__first_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__first_fp32 // A.*B function (eWiseMult): GB_AemultB__first_fp32 // A*D function (colscale): GB_AxD__first_fp32 // D*A function (rowscale): GB_DxB__first_fp32 // C+=B function (dense accum): GB_Cdense_accumB__first_fp32 // C+=b function (dense accum): GB_Cdense_accumb__first_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__first_fp32 // C=scalar+B GB_bind1st__first_fp32 // C=scalar+B' GB_bind1st_tran__first_fp32 // C=A+scalar (none) // C=A'+scalar (none) // C type: float // A type: float // B,b type: float // BinaryOp: cij = aij #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = x ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_FP32 || GxB_NO_FIRST_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__first_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__first_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__first_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__first_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__first_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *GB_RESTRICT Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__first_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__first_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__first_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB_bind1st_tran__first_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = aij ; \ } GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
actionAngleStaeckel.c
/* C code for Binney (2012)'s Staeckel approximation code */ #ifdef _WIN32 #include <Python.h> #endif #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <gsl/gsl_math.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_roots.h> #include <gsl/gsl_min.h> #include <gsl/gsl_integration.h> #ifdef _OPENMP #include <omp.h> #endif #define CHUNKSIZE 10 //Potentials #include <galpy_potentials.h> #include <integrateFullOrbit.h> #include <actionAngle.h> #ifndef M_PI #define M_PI 3.14159265358979323846 #endif //Macros to export functions in DLL on different OS #if defined(_WIN32) #define EXPORT __declspec(dllexport) #elif defined(__GNUC__) #define EXPORT __attribute__((visibility("default"))) #else // Just do nothing? #define EXPORT #endif #ifdef _WIN32 // On Windows, *need* to define this function to allow the package to be imported #if PY_MAJOR_VERSION >= 3 PyMODINIT_FUNC PyInit_galpy_actionAngle_c(void) { // Python 3 return NULL; } #else PyMODINIT_FUNC initgalpy_actionAngle_c(void) {} // Python 2 #endif #endif /* Structure Declarations */ struct JRStaeckelArg{ double E; double Lz22delta; double I3U; double delta; double u0; double sinh2u0; double v0; double sin2v0; double potu0v0; int nargs; struct potentialArg * actionAngleArgs; }; struct JzStaeckelArg{ double E; double Lz22delta; double I3V; double delta; double u0; double cosh2u0; double sinh2u0; double potupi2; int nargs; struct potentialArg * actionAngleArgs; }; struct dJRStaeckelArg{ double E; double Lz22delta; double I3U; double delta; double u0; double sinh2u0; double v0; double sin2v0; double potu0v0; double umin; double umax; int nargs; struct potentialArg * actionAngleArgs; }; struct dJzStaeckelArg{ double E; double Lz22delta; double I3V; double delta; double u0; double cosh2u0; double sinh2u0; double potupi2; double vmin; int nargs; struct potentialArg * actionAngleArgs; }; struct u0EqArg{ double E; double Lz22delta; double delta; int nargs; struct potentialArg * actionAngleArgs; }; /* Function Declarations */ EXPORT void calcu0(int,double *,double *,int,int *,double *,int,double*, double *,int *); EXPORT void actionAngleStaeckel_uminUmaxVmin(int,double *,double *,double *,double *, double *,double *,int,int *,double *, int,double *,double *, double *,double *,int *); EXPORT void actionAngleStaeckel_actions(int,double *,double *,double *,double *, double *,double *,int,int *,double *,int, double *,int,double *,double *,int *); EXPORT void actionAngleStaeckel_actionsFreqsAngles(int,double *,double *,double *, double *,double *,double *, int,int *,double *, int,double *,int,double *,double *, double *,double *,double *, double *,double *,double *,int *); EXPORT void actionAngleStaeckel_actionsFreqs(int,double *,double *,double *,double *, double *,double *,int,int *,double *, int,double *,int,double *,double *, double *,double *,double *,int *); void calcAnglesStaeckel(int,double *,double *,double *,double *,double *, double *,double *,double *,double *,double *,double *, double *,double *,double *,double *,double *,double *, double *,double *,double *,double *,double *,double *, double *,int,double *,double *,double *,double *, double *,double *,double *,double *,double *,double *, int,struct potentialArg *,int); void calcFreqsFromDerivsStaeckel(int,double *,double *,double *, double *,double *,double *, double *,double *,double *,double *); void calcdI3dJFromDerivsStaeckel(int,double *,double *,double *,double *, double *,double *,double *,double *); void calcJRStaeckel(int,double *,double *,double *,double *,double *,double *, int,double *,double *,double *,double *,double *,double *, int,struct potentialArg *,int); void calcJzStaeckel(int,double *,double *,double *,double *,double *,int, double *,double *,double *,double *,double *,int, struct potentialArg *,int); void calcdJRStaeckel(int,double *,double *,double *,double *,double *, double *,double *,double *,int, double *,double *,double *,double *,double *,double *,int, struct potentialArg *,int); void calcdJzStaeckel(int,double *,double *,double *,double *,double *, double *,double *,int,double *,double *,double *,double *, double *,int, struct potentialArg *,int); void calcUminUmax(int,double *,double *,double *,double *,double *,double *, double *,int,double *,double *,double *,double *,double *, double *,int,struct potentialArg *); void calcVmin(int,double *,double *,double *,double *,double *,double *,int, double *,double *,double *,double *,double *,int, struct potentialArg *); double JRStaeckelIntegrandSquared(double,void *); double JRStaeckelIntegrand(double,void *); double JzStaeckelIntegrandSquared(double,void *); double JzStaeckelIntegrand(double,void *); double dJRdEStaeckelIntegrand(double,void *); double dJRdELowStaeckelIntegrand(double,void *); double dJRdEHighStaeckelIntegrand(double,void *); double dJRdLzStaeckelIntegrand(double,void *); double dJRdLzLowStaeckelIntegrand(double,void *); double dJRdLzHighStaeckelIntegrand(double,void *); double dJRdI3StaeckelIntegrand(double,void *); double dJRdI3LowStaeckelIntegrand(double,void *); double dJRdI3HighStaeckelIntegrand(double,void *); double dJzdEStaeckelIntegrand(double,void *); double dJzdELowStaeckelIntegrand(double,void *); double dJzdEHighStaeckelIntegrand(double,void *); double dJzdLzStaeckelIntegrand(double,void *); double dJzdLzLowStaeckelIntegrand(double,void *); double dJzdLzHighStaeckelIntegrand(double,void *); double dJzdI3StaeckelIntegrand(double,void *); double dJzdI3LowStaeckelIntegrand(double,void *); double dJzdI3HighStaeckelIntegrand(double,void *); double u0Equation(double,void *); double evaluatePotentials(double,double,int, struct potentialArg *); double evaluatePotentialsUV(double,double,double,int,struct potentialArg *); /* Actual functions, inlines first */ static inline void uv_to_Rz(double u, double v, double * R, double *z, double delta){ *R= delta * sinh(u) * sin(v); *z= delta * cosh(u) * cos(v); } static inline void Rz_to_uv_vec(int ndata, double *R, double *z, double *u, double *v, int ndelta, double * delta){ int ii; double d12, d22, coshu, cosv,tdelta; int delta_stride= ndelta == 1 ? 0 : 1; for (ii=0; ii < ndata; ii++) { tdelta= *(delta+ii*delta_stride); d12= (*(z+ii)+tdelta)*(*(z+ii)+tdelta)+(*(R+ii))*(*(R+ii)); d22= (*(z+ii)-tdelta)*(*(z+ii)-tdelta)+(*(R+ii))*(*(R+ii)); coshu= 0.5/tdelta*(sqrt(d12)+sqrt(d22)); cosv= 0.5/tdelta*(sqrt(d12)-sqrt(d22)); *u++= acosh(coshu); *v++= acos(cosv); } u-= ndata; v-= ndata; } static inline void calcEL(int ndata, double *R, double *vR, double *vT, double *z, double *vz, double *E, double *Lz, int nargs, struct potentialArg * actionAngleArgs){ int ii; for (ii=0; ii < ndata; ii++){ *(E+ii)= evaluatePotentials(*(R+ii),*(z+ii), nargs,actionAngleArgs) + 0.5 * *(vR+ii) * *(vR+ii) + 0.5 * *(vT+ii) * *(vT+ii) + 0.5 * *(vz+ii) * *(vz+ii); *(Lz+ii)= *(R+ii) * *(vT+ii); } } /* MAIN FUNCTIONS */ void calcu0(int ndata, double *E, double *Lz, int npot, int * pot_type, double * pot_args, int ndelta, double * delta, double *u0, int * err){ int ii; //Set up the potentials struct potentialArg * actionAngleArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(npot,actionAngleArgs,&pot_type,&pot_args); //setup the function to be minimized gsl_function u0Eq; struct u0EqArg * params= (struct u0EqArg *) malloc ( sizeof (struct u0EqArg) ); params->nargs= npot; params->actionAngleArgs= actionAngleArgs; //Setup solver int status; int iter, max_iter = 100; const gsl_min_fminimizer_type *T; gsl_min_fminimizer *s; double u_guess, u_lo, u_hi; T = gsl_min_fminimizer_brent; s = gsl_min_fminimizer_alloc (T); u0Eq.function = &u0Equation; int delta_stride= ndelta == 1 ? 0 : 1; for (ii=0; ii < ndata; ii++){ //Setup function params->delta= *(delta+ii*delta_stride); params->E= *(E+ii); params->Lz22delta= 0.5 * *(Lz+ii) * *(Lz+ii) / *(delta+ii*delta_stride) / *(delta+ii*delta_stride); u0Eq.params = params; //Find starting points for minimum u_guess= 1.; u_lo= 0.001; u_hi= 100.; gsl_set_error_handler_off(); status = gsl_min_fminimizer_set (s, &u0Eq, u_guess, u_lo, u_hi); if (status == GSL_EINVAL) { *(u0+ii)= u_hi; gsl_set_error_handler (NULL); continue; } gsl_set_error_handler (NULL); iter= 0; do { iter++; status = gsl_min_fminimizer_iterate (s); u_guess = gsl_min_fminimizer_x_minimum (s); u_lo = gsl_min_fminimizer_x_lower (s); u_hi = gsl_min_fminimizer_x_upper (s); status = gsl_min_test_interval (u_lo, u_hi, 9.9999999999999998e-13, 4.4408920985006262e-16); } while (status == GSL_CONTINUE && iter < max_iter); *(u0+ii)= gsl_min_fminimizer_x_minimum (s); } gsl_min_fminimizer_free (s); free(params); free_potentialArgs(npot,actionAngleArgs); free(actionAngleArgs); *err= status; } void actionAngleStaeckel_uminUmaxVmin(int ndata, double *R, double *vR, double *vT, double *z, double *vz, double *u0, int npot, int * pot_type, double * pot_args, int ndelta, double * delta, double *umin, double *umax, double *vmin, int * err){ // Just copied this over from actionAngleStaeckel_actions below, not elegant // but does the job... int ii; double tdelta; //Set up the potentials struct potentialArg * actionAngleArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(npot,actionAngleArgs,&pot_type,&pot_args); //E,Lz double *E= (double *) malloc ( ndata * sizeof(double) ); double *Lz= (double *) malloc ( ndata * sizeof(double) ); calcEL(ndata,R,vR,vT,z,vz,E,Lz,npot,actionAngleArgs); //Calculate all necessary parameters double *ux= (double *) malloc ( ndata * sizeof(double) ); double *vx= (double *) malloc ( ndata * sizeof(double) ); Rz_to_uv_vec(ndata,R,z,ux,vx,ndelta,delta); double *coshux= (double *) malloc ( ndata * sizeof(double) ); double *sinhux= (double *) malloc ( ndata * sizeof(double) ); double *sinvx= (double *) malloc ( ndata * sizeof(double) ); double *cosvx= (double *) malloc ( ndata * sizeof(double) ); double *pux= (double *) malloc ( ndata * sizeof(double) ); double *pvx= (double *) malloc ( ndata * sizeof(double) ); double *sinh2u0= (double *) malloc ( ndata * sizeof(double) ); double *cosh2u0= (double *) malloc ( ndata * sizeof(double) ); double *v0= (double *) malloc ( ndata * sizeof(double) ); double *sin2v0= (double *) malloc ( ndata * sizeof(double) ); double *potu0v0= (double *) malloc ( ndata * sizeof(double) ); double *potupi2= (double *) malloc ( ndata * sizeof(double) ); double *I3U= (double *) malloc ( ndata * sizeof(double) ); double *I3V= (double *) malloc ( ndata * sizeof(double) ); int delta_stride= ndelta == 1 ? 0 : 1; UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) private(ii,tdelta) for (ii=0; ii < ndata; ii++){ tdelta= *(delta+ii*delta_stride); *(coshux+ii)= cosh(*(ux+ii)); *(sinhux+ii)= sinh(*(ux+ii)); *(cosvx+ii)= cos(*(vx+ii)); *(sinvx+ii)= sin(*(vx+ii)); *(pux+ii)= tdelta * (*(vR+ii) * *(coshux+ii) * *(sinvx+ii) + *(vz+ii) * *(sinhux+ii) * *(cosvx+ii)); *(pvx+ii)= tdelta * (*(vR+ii) * *(sinhux+ii) * *(cosvx+ii) - *(vz+ii) * *(coshux+ii) * *(sinvx+ii)); *(sinh2u0+ii)= sinh(*(u0+ii)) * sinh(*(u0+ii)); *(cosh2u0+ii)= cosh(*(u0+ii)) * cosh(*(u0+ii)); *(v0+ii)= 0.5 * M_PI; //*(vx+ii); *(sin2v0+ii)= sin(*(v0+ii)) * sin(*(v0+ii)); *(potu0v0+ii)= evaluatePotentialsUV(*(u0+ii),*(v0+ii),tdelta, npot,actionAngleArgs); *(I3U+ii)= *(E+ii) * *(sinhux+ii) * *(sinhux+ii) - 0.5 * *(pux+ii) * *(pux+ii) / tdelta / tdelta - 0.5 * *(Lz+ii) * *(Lz+ii) / tdelta / tdelta / *(sinhux+ii) / *(sinhux+ii) - ( *(sinhux+ii) * *(sinhux+ii) + *(sin2v0+ii)) *evaluatePotentialsUV(*(ux+ii),*(v0+ii),tdelta, npot,actionAngleArgs) + ( *(sinh2u0+ii) + *(sin2v0+ii) )* *(potu0v0+ii); *(potupi2+ii)= evaluatePotentialsUV(*(u0+ii),0.5 * M_PI,tdelta, npot,actionAngleArgs); *(I3V+ii)= - *(E+ii) * *(sinvx+ii) * *(sinvx+ii) + 0.5 * *(pvx+ii) * *(pvx+ii) / tdelta / tdelta + 0.5 * *(Lz+ii) * *(Lz+ii) / tdelta / tdelta / *(sinvx+ii) / *(sinvx+ii) - *(cosh2u0+ii) * *(potupi2+ii) + ( *(sinh2u0+ii) + *(sinvx+ii) * *(sinvx+ii)) * evaluatePotentialsUV(*(u0+ii),*(vx+ii),tdelta, npot,actionAngleArgs); } //Calculate 'peri' and 'apo'centers calcUminUmax(ndata,umin,umax,ux,pux,E,Lz,I3U,ndelta,delta,u0,sinh2u0,v0, sin2v0,potu0v0,npot,actionAngleArgs); calcVmin(ndata,vmin,vx,pvx,E,Lz,I3V,ndelta,delta,u0,cosh2u0,sinh2u0,potupi2, npot,actionAngleArgs); //Free free_potentialArgs(npot,actionAngleArgs); free(actionAngleArgs); free(E); free(Lz); free(ux); free(vx); free(coshux); free(sinhux); free(sinvx); free(cosvx); free(pux); free(pvx); free(sinh2u0); free(cosh2u0); free(v0); free(sin2v0); free(potu0v0); free(potupi2); free(I3U); free(I3V); } void actionAngleStaeckel_actions(int ndata, double *R, double *vR, double *vT, double *z, double *vz, double *u0, int npot, int * pot_type, double * pot_args, int ndelta, double * delta, int order, double *jr, double *jz, int * err){ int ii; double tdelta; //Set up the potentials struct potentialArg * actionAngleArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(npot,actionAngleArgs,&pot_type,&pot_args); //E,Lz double *E= (double *) malloc ( ndata * sizeof(double) ); double *Lz= (double *) malloc ( ndata * sizeof(double) ); calcEL(ndata,R,vR,vT,z,vz,E,Lz,npot,actionAngleArgs); //Calculate all necessary parameters double *ux= (double *) malloc ( ndata * sizeof(double) ); double *vx= (double *) malloc ( ndata * sizeof(double) ); Rz_to_uv_vec(ndata,R,z,ux,vx,ndelta,delta); double *coshux= (double *) malloc ( ndata * sizeof(double) ); double *sinhux= (double *) malloc ( ndata * sizeof(double) ); double *sinvx= (double *) malloc ( ndata * sizeof(double) ); double *cosvx= (double *) malloc ( ndata * sizeof(double) ); double *pux= (double *) malloc ( ndata * sizeof(double) ); double *pvx= (double *) malloc ( ndata * sizeof(double) ); double *sinh2u0= (double *) malloc ( ndata * sizeof(double) ); double *cosh2u0= (double *) malloc ( ndata * sizeof(double) ); double *v0= (double *) malloc ( ndata * sizeof(double) ); double *sin2v0= (double *) malloc ( ndata * sizeof(double) ); double *potu0v0= (double *) malloc ( ndata * sizeof(double) ); double *potupi2= (double *) malloc ( ndata * sizeof(double) ); double *I3U= (double *) malloc ( ndata * sizeof(double) ); double *I3V= (double *) malloc ( ndata * sizeof(double) ); int delta_stride= ndelta == 1 ? 0 : 1; UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) private(ii,tdelta) for (ii=0; ii < ndata; ii++){ tdelta= *(delta+ii*delta_stride); *(coshux+ii)= cosh(*(ux+ii)); *(sinhux+ii)= sinh(*(ux+ii)); *(cosvx+ii)= cos(*(vx+ii)); *(sinvx+ii)= sin(*(vx+ii)); *(pux+ii)= tdelta * (*(vR+ii) * *(coshux+ii) * *(sinvx+ii) + *(vz+ii) * *(sinhux+ii) * *(cosvx+ii)); *(pvx+ii)= tdelta * (*(vR+ii) * *(sinhux+ii) * *(cosvx+ii) - *(vz+ii) * *(coshux+ii) * *(sinvx+ii)); *(sinh2u0+ii)= sinh(*(u0+ii)) * sinh(*(u0+ii)); *(cosh2u0+ii)= cosh(*(u0+ii)) * cosh(*(u0+ii)); *(v0+ii)= 0.5 * M_PI; //*(vx+ii); *(sin2v0+ii)= sin(*(v0+ii)) * sin(*(v0+ii)); *(potu0v0+ii)= evaluatePotentialsUV(*(u0+ii),*(v0+ii),tdelta, npot,actionAngleArgs); *(I3U+ii)= *(E+ii) * *(sinhux+ii) * *(sinhux+ii) - 0.5 * *(pux+ii) * *(pux+ii) / tdelta / tdelta - 0.5 * *(Lz+ii) * *(Lz+ii) / tdelta / tdelta / *(sinhux+ii) / *(sinhux+ii) - ( *(sinhux+ii) * *(sinhux+ii) + *(sin2v0+ii)) *evaluatePotentialsUV(*(ux+ii),*(v0+ii),tdelta, npot,actionAngleArgs) + ( *(sinh2u0+ii) + *(sin2v0+ii) )* *(potu0v0+ii); *(potupi2+ii)= evaluatePotentialsUV(*(u0+ii),0.5 * M_PI,tdelta, npot,actionAngleArgs); *(I3V+ii)= - *(E+ii) * *(sinvx+ii) * *(sinvx+ii) + 0.5 * *(pvx+ii) * *(pvx+ii) / tdelta / tdelta + 0.5 * *(Lz+ii) * *(Lz+ii) / tdelta / tdelta / *(sinvx+ii) / *(sinvx+ii) - *(cosh2u0+ii) * *(potupi2+ii) + ( *(sinh2u0+ii) + *(sinvx+ii) * *(sinvx+ii)) * evaluatePotentialsUV(*(u0+ii),*(vx+ii),tdelta, npot,actionAngleArgs); } //Calculate 'peri' and 'apo'centers double *umin= (double *) malloc ( ndata * sizeof(double) ); double *umax= (double *) malloc ( ndata * sizeof(double) ); double *vmin= (double *) malloc ( ndata * sizeof(double) ); calcUminUmax(ndata,umin,umax,ux,pux,E,Lz,I3U,ndelta,delta,u0,sinh2u0,v0, sin2v0,potu0v0,npot,actionAngleArgs); calcVmin(ndata,vmin,vx,pvx,E,Lz,I3V,ndelta,delta,u0,cosh2u0,sinh2u0,potupi2, npot,actionAngleArgs); //Calculate the actions calcJRStaeckel(ndata,jr,umin,umax,E,Lz,I3U,ndelta,delta,u0,sinh2u0,v0,sin2v0, potu0v0,npot,actionAngleArgs,order); calcJzStaeckel(ndata,jz,vmin,E,Lz,I3V,ndelta,delta,u0,cosh2u0,sinh2u0, potupi2,npot,actionAngleArgs,order); //Free free_potentialArgs(npot,actionAngleArgs); free(actionAngleArgs); free(E); free(Lz); free(ux); free(vx); free(coshux); free(sinhux); free(sinvx); free(cosvx); free(pux); free(pvx); free(sinh2u0); free(cosh2u0); free(v0); free(sin2v0); free(potu0v0); free(potupi2); free(I3U); free(I3V); free(umin); free(umax); free(vmin); } void calcJRStaeckel(int ndata, double * jr, double * umin, double * umax, double * E, double * Lz, double * I3U, int ndelta, double * delta, double * u0, double * sinh2u0, double * v0, double * sin2v0, double * potu0v0, int nargs, struct potentialArg * actionAngleArgs, int order){ int ii, tid, nthreads; #ifdef _OPENMP nthreads = omp_get_max_threads(); #else nthreads = 1; #endif gsl_function * JRInt= (gsl_function *) malloc ( nthreads * sizeof(gsl_function) ); struct JRStaeckelArg * params= (struct JRStaeckelArg *) malloc ( nthreads * sizeof (struct JRStaeckelArg) ); for (tid=0; tid < nthreads; tid++){ (params+tid)->nargs= nargs; (params+tid)->actionAngleArgs= actionAngleArgs; } //Setup integrator gsl_integration_glfixed_table * T= gsl_integration_glfixed_table_alloc (order); int delta_stride= ndelta == 1 ? 0 : 1; UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) \ private(tid,ii) \ shared(jr,umin,umax,JRInt,params,T,delta,E,Lz,I3U,u0,sinh2u0,v0,sin2v0,potu0v0) for (ii=0; ii < ndata; ii++){ #ifdef _OPENMP tid= omp_get_thread_num(); #else tid = 0; #endif if ( *(umin+ii) == -9999.99 || *(umax+ii) == -9999.99 ){ *(jr+ii)= 9999.99; continue; } if ( (*(umax+ii) - *(umin+ii)) / *(umax+ii) < 0.000001 ){//circular *(jr+ii) = 0.; continue; } //Setup function (params+tid)->delta= *(delta+ii*delta_stride); (params+tid)->E= *(E+ii); (params+tid)->Lz22delta= 0.5 * *(Lz+ii) * *(Lz+ii) / *(delta+ii*delta_stride) / *(delta+ii*delta_stride); (params+tid)->I3U= *(I3U+ii); (params+tid)->u0= *(u0+ii); (params+tid)->sinh2u0= *(sinh2u0+ii); (params+tid)->v0= *(v0+ii); (params+tid)->sin2v0= *(sin2v0+ii); (params+tid)->potu0v0= *(potu0v0+ii); (JRInt+tid)->function = &JRStaeckelIntegrand; (JRInt+tid)->params = params+tid; //Integrate *(jr+ii)= gsl_integration_glfixed (JRInt+tid,*(umin+ii),*(umax+ii),T) * sqrt(2.) * *(delta+ii*delta_stride) / M_PI; } free(JRInt); free(params); gsl_integration_glfixed_table_free ( T ); } void calcJzStaeckel(int ndata, double * jz, double * vmin, double * E, double * Lz, double * I3V, int ndelta, double * delta, double * u0, double * cosh2u0, double * sinh2u0, double * potupi2, int nargs, struct potentialArg * actionAngleArgs, int order){ int ii, tid, nthreads; #ifdef _OPENMP nthreads = omp_get_max_threads(); #else nthreads = 1; #endif gsl_function * JzInt= (gsl_function *) malloc ( nthreads * sizeof(gsl_function) ); struct JzStaeckelArg * params= (struct JzStaeckelArg *) malloc ( nthreads * sizeof (struct JzStaeckelArg) ); for (tid=0; tid < nthreads; tid++){ (params+tid)->nargs= nargs; (params+tid)->actionAngleArgs= actionAngleArgs; } //Setup integrator gsl_integration_glfixed_table * T= gsl_integration_glfixed_table_alloc (order); int delta_stride= ndelta == 1 ? 0 : 1; UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) \ private(tid,ii) \ shared(jz,vmin,JzInt,params,T,delta,E,Lz,I3V,u0,cosh2u0,sinh2u0,potupi2) for (ii=0; ii < ndata; ii++){ #ifdef _OPENMP tid= omp_get_thread_num(); #else tid = 0; #endif if ( *(vmin+ii) == -9999.99 ){ *(jz+ii)= 9999.99; continue; } if ( (0.5 * M_PI - *(vmin+ii)) / M_PI * 2. < 0.000001 ){//circular *(jz+ii) = 0.; continue; } //Setup function (params+tid)->delta= *(delta+ii*delta_stride); (params+tid)->E= *(E+ii); (params+tid)->Lz22delta= 0.5 * *(Lz+ii) * *(Lz+ii) / *(delta+ii*delta_stride) / *(delta+ii*delta_stride); (params+tid)->I3V= *(I3V+ii); (params+tid)->u0= *(u0+ii); (params+tid)->cosh2u0= *(cosh2u0+ii); (params+tid)->sinh2u0= *(sinh2u0+ii); (params+tid)->potupi2= *(potupi2+ii); (JzInt+tid)->function = &JzStaeckelIntegrand; (JzInt+tid)->params = params+tid; //Integrate *(jz+ii)= gsl_integration_glfixed (JzInt+tid,*(vmin+ii),M_PI/2.,T) * 2 * sqrt(2.) * *(delta+ii*delta_stride) / M_PI; } free(JzInt); free(params); gsl_integration_glfixed_table_free ( T ); } void actionAngleStaeckel_actionsFreqs(int ndata, double *R, double *vR, double *vT, double *z, double *vz, double *u0, int npot, int * pot_type, double * pot_args, int ndelta, double * delta, int order, double *jr, double *jz, double *Omegar, double *Omegaphi, double *Omegaz, int * err){ int ii; double tdelta; //Set up the potentials struct potentialArg * actionAngleArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(npot,actionAngleArgs,&pot_type,&pot_args); //E,Lz double *E= (double *) malloc ( ndata * sizeof(double) ); double *Lz= (double *) malloc ( ndata * sizeof(double) ); calcEL(ndata,R,vR,vT,z,vz,E,Lz,npot,actionAngleArgs); //Calculate all necessary parameters double *ux= (double *) malloc ( ndata * sizeof(double) ); double *vx= (double *) malloc ( ndata * sizeof(double) ); Rz_to_uv_vec(ndata,R,z,ux,vx,ndelta,delta); double *coshux= (double *) malloc ( ndata * sizeof(double) ); double *sinhux= (double *) malloc ( ndata * sizeof(double) ); double *sinvx= (double *) malloc ( ndata * sizeof(double) ); double *cosvx= (double *) malloc ( ndata * sizeof(double) ); double *pux= (double *) malloc ( ndata * sizeof(double) ); double *pvx= (double *) malloc ( ndata * sizeof(double) ); double *sinh2u0= (double *) malloc ( ndata * sizeof(double) ); double *cosh2u0= (double *) malloc ( ndata * sizeof(double) ); double *v0= (double *) malloc ( ndata * sizeof(double) ); double *sin2v0= (double *) malloc ( ndata * sizeof(double) ); double *potu0v0= (double *) malloc ( ndata * sizeof(double) ); double *potupi2= (double *) malloc ( ndata * sizeof(double) ); double *I3U= (double *) malloc ( ndata * sizeof(double) ); double *I3V= (double *) malloc ( ndata * sizeof(double) ); int delta_stride= ndelta == 1 ? 0 : 1; UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) private(ii,tdelta) for (ii=0; ii < ndata; ii++){ tdelta= *(delta+ii*delta_stride); *(coshux+ii)= cosh(*(ux+ii)); *(sinhux+ii)= sinh(*(ux+ii)); *(cosvx+ii)= cos(*(vx+ii)); *(sinvx+ii)= sin(*(vx+ii)); *(pux+ii)= tdelta * (*(vR+ii) * *(coshux+ii) * *(sinvx+ii) + *(vz+ii) * *(sinhux+ii) * *(cosvx+ii)); *(pvx+ii)= tdelta * (*(vR+ii) * *(sinhux+ii) * *(cosvx+ii) - *(vz+ii) * *(coshux+ii) * *(sinvx+ii)); *(sinh2u0+ii)= sinh(*(u0+ii)) * sinh(*(u0+ii)); *(cosh2u0+ii)= cosh(*(u0+ii)) * cosh(*(u0+ii)); *(v0+ii)= 0.5 * M_PI; //*(vx+ii); *(sin2v0+ii)= sin(*(v0+ii)) * sin(*(v0+ii)); *(potu0v0+ii)= evaluatePotentialsUV(*(u0+ii),*(v0+ii),tdelta, npot,actionAngleArgs); *(I3U+ii)= *(E+ii) * *(sinhux+ii) * *(sinhux+ii) - 0.5 * *(pux+ii) * *(pux+ii) / tdelta / tdelta - 0.5 * *(Lz+ii) * *(Lz+ii) / tdelta / tdelta / *(sinhux+ii) / *(sinhux+ii) - ( *(sinhux+ii) * *(sinhux+ii) + *(sin2v0+ii)) *evaluatePotentialsUV(*(ux+ii),*(v0+ii),tdelta, npot,actionAngleArgs) + ( *(sinh2u0+ii) + *(sin2v0+ii) )* *(potu0v0+ii); *(potupi2+ii)= evaluatePotentialsUV(*(u0+ii),0.5 * M_PI,tdelta, npot,actionAngleArgs); *(I3V+ii)= - *(E+ii) * *(sinvx+ii) * *(sinvx+ii) + 0.5 * *(pvx+ii) * *(pvx+ii) / tdelta / tdelta + 0.5 * *(Lz+ii) * *(Lz+ii) / tdelta / tdelta / *(sinvx+ii) / *(sinvx+ii) - *(cosh2u0+ii) * *(potupi2+ii) + ( *(sinh2u0+ii) + *(sinvx+ii) * *(sinvx+ii)) * evaluatePotentialsUV(*(u0+ii),*(vx+ii),tdelta, npot,actionAngleArgs); } //Calculate 'peri' and 'apo'centers double *umin= (double *) malloc ( ndata * sizeof(double) ); double *umax= (double *) malloc ( ndata * sizeof(double) ); double *vmin= (double *) malloc ( ndata * sizeof(double) ); calcUminUmax(ndata,umin,umax,ux,pux,E,Lz,I3U,ndelta,delta,u0,sinh2u0,v0, sin2v0,potu0v0,npot,actionAngleArgs); calcVmin(ndata,vmin,vx,pvx,E,Lz,I3V,ndelta,delta,u0,cosh2u0,sinh2u0,potupi2, npot,actionAngleArgs); //Calculate the actions calcJRStaeckel(ndata,jr,umin,umax,E,Lz,I3U,ndelta,delta,u0,sinh2u0,v0,sin2v0, potu0v0,npot,actionAngleArgs,order); calcJzStaeckel(ndata,jz,vmin,E,Lz,I3V,ndelta,delta,u0,cosh2u0,sinh2u0, potupi2,npot,actionAngleArgs,order); //Calculate the derivatives of the actions wrt the integrals of motion double *dJRdE= (double *) malloc ( ndata * sizeof(double) ); double *dJRdLz= (double *) malloc ( ndata * sizeof(double) ); double *dJRdI3= (double *) malloc ( ndata * sizeof(double) ); double *dJzdE= (double *) malloc ( ndata * sizeof(double) ); double *dJzdLz= (double *) malloc ( ndata * sizeof(double) ); double *dJzdI3= (double *) malloc ( ndata * sizeof(double) ); double *detA= (double *) malloc ( ndata * sizeof(double) ); calcdJRStaeckel(ndata,dJRdE,dJRdLz,dJRdI3, umin,umax,E,Lz,I3U,ndelta,delta,u0,sinh2u0,v0,sin2v0, potu0v0,npot,actionAngleArgs,order); calcdJzStaeckel(ndata,dJzdE,dJzdLz,dJzdI3, vmin,E,Lz,I3V,ndelta,delta,u0,cosh2u0,sinh2u0, potupi2,npot,actionAngleArgs,order); calcFreqsFromDerivsStaeckel(ndata,Omegar,Omegaphi,Omegaz,detA, dJRdE,dJRdLz,dJRdI3, dJzdE,dJzdLz,dJzdI3); //Free free_potentialArgs(npot,actionAngleArgs); free(actionAngleArgs); free(E); free(Lz); free(ux); free(vx); free(coshux); free(sinhux); free(sinvx); free(cosvx); free(pux); free(pvx); free(sinh2u0); free(cosh2u0); free(v0); free(sin2v0); free(potu0v0); free(potupi2); free(I3U); free(I3V); free(umin); free(umax); free(vmin); free(dJRdE); free(dJRdLz); free(dJRdI3); free(dJzdE); free(detA); free(dJzdLz); free(dJzdI3); } void actionAngleStaeckel_actionsFreqsAngles(int ndata, double *R, double *vR, double *vT, double *z, double *vz, double *u0, int npot, int * pot_type, double * pot_args, int ndelta, double * delta, int order, double *jr, double *jz, double *Omegar, double *Omegaphi, double *Omegaz, double *Angler, double *Anglephi, double *Anglez, int * err){ int ii; double tdelta; //Set up the potentials struct potentialArg * actionAngleArgs= (struct potentialArg *) malloc ( npot * sizeof (struct potentialArg) ); parse_leapFuncArgs_Full(npot,actionAngleArgs,&pot_type,&pot_args); //E,Lz double *E= (double *) malloc ( ndata * sizeof(double) ); double *Lz= (double *) malloc ( ndata * sizeof(double) ); calcEL(ndata,R,vR,vT,z,vz,E,Lz,npot,actionAngleArgs); //Calculate all necessary parameters double *ux= (double *) malloc ( ndata * sizeof(double) ); double *vx= (double *) malloc ( ndata * sizeof(double) ); Rz_to_uv_vec(ndata,R,z,ux,vx,ndelta,delta); double *coshux= (double *) malloc ( ndata * sizeof(double) ); double *sinhux= (double *) malloc ( ndata * sizeof(double) ); double *sinvx= (double *) malloc ( ndata * sizeof(double) ); double *cosvx= (double *) malloc ( ndata * sizeof(double) ); double *pux= (double *) malloc ( ndata * sizeof(double) ); double *pvx= (double *) malloc ( ndata * sizeof(double) ); double *sinh2u0= (double *) malloc ( ndata * sizeof(double) ); double *cosh2u0= (double *) malloc ( ndata * sizeof(double) ); double *v0= (double *) malloc ( ndata * sizeof(double) ); double *sin2v0= (double *) malloc ( ndata * sizeof(double) ); double *potu0v0= (double *) malloc ( ndata * sizeof(double) ); double *potupi2= (double *) malloc ( ndata * sizeof(double) ); double *I3U= (double *) malloc ( ndata * sizeof(double) ); double *I3V= (double *) malloc ( ndata * sizeof(double) ); int delta_stride= ndelta == 1 ? 0 : 1; UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) private(ii,tdelta) for (ii=0; ii < ndata; ii++){ tdelta= *(delta+ii*delta_stride); *(coshux+ii)= cosh(*(ux+ii)); *(sinhux+ii)= sinh(*(ux+ii)); *(cosvx+ii)= cos(*(vx+ii)); *(sinvx+ii)= sin(*(vx+ii)); *(pux+ii)= tdelta * (*(vR+ii) * *(coshux+ii) * *(sinvx+ii) + *(vz+ii) * *(sinhux+ii) * *(cosvx+ii)); *(pvx+ii)= tdelta * (*(vR+ii) * *(sinhux+ii) * *(cosvx+ii) - *(vz+ii) * *(coshux+ii) * *(sinvx+ii)); *(sinh2u0+ii)= sinh(*(u0+ii)) * sinh(*(u0+ii)); *(cosh2u0+ii)= cosh(*(u0+ii)) * cosh(*(u0+ii)); *(v0+ii)= 0.5 * M_PI; //*(vx+ii); *(sin2v0+ii)= sin(*(v0+ii)) * sin(*(v0+ii)); *(potu0v0+ii)= evaluatePotentialsUV(*(u0+ii),*(v0+ii),tdelta, npot,actionAngleArgs); *(I3U+ii)= *(E+ii) * *(sinhux+ii) * *(sinhux+ii) - 0.5 * *(pux+ii) * *(pux+ii) / tdelta / tdelta - 0.5 * *(Lz+ii) * *(Lz+ii) / tdelta / tdelta / *(sinhux+ii) / *(sinhux+ii) - ( *(sinhux+ii) * *(sinhux+ii) + *(sin2v0+ii)) *evaluatePotentialsUV(*(ux+ii),*(v0+ii),tdelta, npot,actionAngleArgs) + ( *(sinh2u0+ii) + *(sin2v0+ii) )* *(potu0v0+ii); *(potupi2+ii)= evaluatePotentialsUV(*(u0+ii),0.5 * M_PI,tdelta, npot,actionAngleArgs); *(I3V+ii)= - *(E+ii) * *(sinvx+ii) * *(sinvx+ii) + 0.5 * *(pvx+ii) * *(pvx+ii) / tdelta / tdelta + 0.5 * *(Lz+ii) * *(Lz+ii) / tdelta / tdelta / *(sinvx+ii) / *(sinvx+ii) - *(cosh2u0+ii) * *(potupi2+ii) + ( *(sinh2u0+ii) + *(sinvx+ii) * *(sinvx+ii)) * evaluatePotentialsUV(*(u0+ii),*(vx+ii),tdelta, npot,actionAngleArgs); } //Calculate 'peri' and 'apo'centers double *umin= (double *) malloc ( ndata * sizeof(double) ); double *umax= (double *) malloc ( ndata * sizeof(double) ); double *vmin= (double *) malloc ( ndata * sizeof(double) ); calcUminUmax(ndata,umin,umax,ux,pux,E,Lz,I3U,ndelta,delta,u0,sinh2u0,v0, sin2v0,potu0v0,npot,actionAngleArgs); calcVmin(ndata,vmin,vx,pvx,E,Lz,I3V,ndelta,delta,u0,cosh2u0,sinh2u0,potupi2, npot,actionAngleArgs); //Calculate the actions calcJRStaeckel(ndata,jr,umin,umax,E,Lz,I3U,ndelta,delta,u0,sinh2u0,v0,sin2v0, potu0v0,npot,actionAngleArgs,order); calcJzStaeckel(ndata,jz,vmin,E,Lz,I3V,ndelta,delta,u0,cosh2u0,sinh2u0, potupi2,npot,actionAngleArgs,order); //Calculate the derivatives of the actions wrt the integrals of motion double *dJRdE= (double *) malloc ( ndata * sizeof(double) ); double *dJRdLz= (double *) malloc ( ndata * sizeof(double) ); double *dJRdI3= (double *) malloc ( ndata * sizeof(double) ); double *dJzdE= (double *) malloc ( ndata * sizeof(double) ); double *dJzdLz= (double *) malloc ( ndata * sizeof(double) ); double *dJzdI3= (double *) malloc ( ndata * sizeof(double) ); double *detA= (double *) malloc ( ndata * sizeof(double) ); calcdJRStaeckel(ndata,dJRdE,dJRdLz,dJRdI3, umin,umax,E,Lz,I3U,ndelta,delta,u0,sinh2u0,v0,sin2v0, potu0v0,npot,actionAngleArgs,order); calcdJzStaeckel(ndata,dJzdE,dJzdLz,dJzdI3, vmin,E,Lz,I3V,ndelta,delta,u0,cosh2u0,sinh2u0, potupi2,npot,actionAngleArgs,order); calcFreqsFromDerivsStaeckel(ndata,Omegar,Omegaphi,Omegaz,detA, dJRdE,dJRdLz,dJRdI3, dJzdE,dJzdLz,dJzdI3); double *dI3dJR= (double *) malloc ( ndata * sizeof(double) ); double *dI3dJz= (double *) malloc ( ndata * sizeof(double) ); double *dI3dLz= (double *) malloc ( ndata * sizeof(double) ); calcdI3dJFromDerivsStaeckel(ndata,dI3dJR,dI3dJz,dI3dLz,detA, dJRdE,dJzdE,dJRdLz,dJzdLz); calcAnglesStaeckel(ndata,Angler,Anglephi,Anglez, Omegar,Omegaphi,Omegaz,dI3dJR,dI3dJz,dI3dLz, dJRdE,dJRdLz,dJRdI3, dJzdE,dJzdLz,dJzdI3, ux,vx,pux,pvx, umin,umax,E,Lz,I3U,ndelta,delta,u0,sinh2u0,v0,sin2v0, potu0v0, vmin,I3V,cosh2u0,potupi2, npot,actionAngleArgs,order); //Free free_potentialArgs(npot,actionAngleArgs); free(actionAngleArgs); free(E); free(Lz); free(ux); free(vx); free(coshux); free(sinhux); free(sinvx); free(cosvx); free(pux); free(pvx); free(sinh2u0); free(cosh2u0); free(v0); free(sin2v0); free(potu0v0); free(potupi2); free(I3U); free(I3V); free(umin); free(umax); free(vmin); free(dJRdE); free(dJRdLz); free(dJRdI3); free(dJzdE); free(dJzdLz); free(dJzdI3); free(detA); free(dI3dJR); free(dI3dJz); free(dI3dLz); } void calcFreqsFromDerivsStaeckel(int ndata, double * Omegar, double * Omegaphi, double * Omegaz, double * detA, double * djrdE, double * djrdLz, double * djrdI3, double * djzdE, double * djzdLz, double * djzdI3){ int ii; UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) \ private(ii) \ shared(Omegar,Omegaphi,Omegaz,djrdE,djrdLz,djrdI3,djzdE,djzdLz,djzdI3,detA) for (ii=0; ii < ndata; ii++){ if ( *(djrdE+ii) == 9999.99 || *(djzdE+ii) == 9999.99 ) { *(Omegar+ii)= 9999.99; *(Omegaz+ii)= 9999.99; *(Omegaphi+ii)= 9999.99; } else { //First calculate the determinant of the relevant matrix *(detA+ii)= *(djrdE+ii) * *(djzdI3+ii) - *(djzdE+ii) * *(djrdI3+ii); //Then calculate the frequencies *(Omegar+ii)= *(djzdI3+ii) / *(detA+ii); *(Omegaz+ii)= - *(djrdI3+ii) / *(detA+ii); *(Omegaphi+ii)= ( *(djrdI3+ii) * *(djzdLz+ii) - *(djzdI3+ii) * *(djrdLz+ii)) / *(detA+ii); } } } void calcdI3dJFromDerivsStaeckel(int ndata, double * dI3dJR, double * dI3dJz, double * dI3dLz, double * detA, double * djrdE, double * djzdE, double * djrdLz, double * djzdLz){ int ii; UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) \ private(ii) \ shared(djrdE,djzdE,djrdLz,djzdLz,dI3dJR,dI3dJz,dI3dLz,detA) for (ii=0; ii < ndata; ii++){ *(dI3dJR+ii)= - *(djzdE+ii) / *(detA+ii); *(dI3dJz+ii)= *(djrdE+ii) / *(detA+ii); *(dI3dLz+ii)= -( *(djrdE+ii) * *(djzdLz+ii) - *(djzdE+ii) * *(djrdLz+ii) ) / *(detA+ii); } } void calcdJRStaeckel(int ndata, double * djrdE, double * djrdLz, double * djrdI3, double * umin, double * umax, double * E, double * Lz, double * I3U, int ndelta, double * delta, double * u0, double * sinh2u0, double * v0, double * sin2v0, double * potu0v0, int nargs, struct potentialArg * actionAngleArgs, int order){ int ii, tid, nthreads; double mid; #ifdef _OPENMP nthreads = omp_get_max_threads(); #else nthreads = 1; #endif gsl_function * dJRInt= (gsl_function *) malloc ( nthreads * sizeof(gsl_function) ); struct dJRStaeckelArg * params= (struct dJRStaeckelArg *) malloc ( nthreads * sizeof (struct dJRStaeckelArg) ); for (tid=0; tid < nthreads; tid++){ (params+tid)->nargs= nargs; (params+tid)->actionAngleArgs= actionAngleArgs; } //Setup integrator gsl_integration_glfixed_table * T= gsl_integration_glfixed_table_alloc (order); int delta_stride= ndelta == 1 ? 0 : 1; UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) \ private(tid,ii,mid) \ shared(djrdE,djrdLz,djrdI3,umin,umax,dJRInt,params,T,delta,E,Lz,I3U,u0,sinh2u0,v0,sin2v0,potu0v0) for (ii=0; ii < ndata; ii++){ #ifdef _OPENMP tid= omp_get_thread_num(); #else tid = 0; #endif if ( *(umin+ii) == -9999.99 || *(umax+ii) == -9999.99 ){ *(djrdE+ii)= 9999.99; *(djrdLz+ii)= 9999.99; *(djrdI3+ii)= 9999.99; continue; } if ( (*(umax+ii) - *(umin+ii)) / *(umax+ii) < 0.000001 ){//circular *(djrdE+ii) = 0.; *(djrdLz+ii) = 0.; *(djrdI3+ii) = 0.; continue; } //Setup function (params+tid)->delta= *(delta+ii*delta_stride); (params+tid)->E= *(E+ii); (params+tid)->Lz22delta= 0.5 * *(Lz+ii) * *(Lz+ii) / *(delta+ii*delta_stride) / *(delta+ii*delta_stride); (params+tid)->I3U= *(I3U+ii); (params+tid)->u0= *(u0+ii); (params+tid)->sinh2u0= *(sinh2u0+ii); (params+tid)->v0= *(v0+ii); (params+tid)->sin2v0= *(sin2v0+ii); (params+tid)->potu0v0= *(potu0v0+ii); (params+tid)->umin= *(umin+ii); (params+tid)->umax= *(umax+ii); (dJRInt+tid)->function = &dJRdELowStaeckelIntegrand; (dJRInt+tid)->params = params+tid; mid= sqrt( 0.5 * ( *(umax+ii) - *(umin+ii) ) ); //Integrate to get djrdE *(djrdE+ii)= gsl_integration_glfixed (dJRInt+tid,0.,mid,T); (dJRInt+tid)->function = &dJRdEHighStaeckelIntegrand; *(djrdE+ii)+= gsl_integration_glfixed (dJRInt+tid,0.,mid,T); *(djrdE+ii)*= *(delta+ii*delta_stride) / M_PI / sqrt(2.); //then calculate djrdLz (dJRInt+tid)->function = &dJRdLzLowStaeckelIntegrand; *(djrdLz+ii)= gsl_integration_glfixed (dJRInt+tid,0.,mid,T); (dJRInt+tid)->function = &dJRdLzHighStaeckelIntegrand; *(djrdLz+ii)+= gsl_integration_glfixed (dJRInt+tid,0.,mid,T); *(djrdLz+ii)*= - *(Lz+ii) / M_PI / sqrt(2.) / *(delta+ii*delta_stride); //then calculate djrdI3 (dJRInt+tid)->function = &dJRdI3LowStaeckelIntegrand; *(djrdI3+ii)= gsl_integration_glfixed (dJRInt+tid,0.,mid,T); (dJRInt+tid)->function = &dJRdI3HighStaeckelIntegrand; *(djrdI3+ii)+= gsl_integration_glfixed (dJRInt+tid,0.,mid,T); *(djrdI3+ii)*= - *(delta+ii*delta_stride) / M_PI / sqrt(2.); } free(dJRInt); free(params); gsl_integration_glfixed_table_free ( T ); } void calcdJzStaeckel(int ndata, double * djzdE, double * djzdLz, double * djzdI3, double * vmin, double * E, double * Lz, double * I3V, int ndelta, double * delta, double * u0, double * cosh2u0, double * sinh2u0, double * potupi2, int nargs, struct potentialArg * actionAngleArgs, int order){ int ii, tid, nthreads; double mid; #ifdef _OPENMP nthreads = omp_get_max_threads(); #else nthreads = 1; #endif gsl_function * dJzInt= (gsl_function *) malloc ( nthreads * sizeof(gsl_function) ); struct dJzStaeckelArg * params= (struct dJzStaeckelArg *) malloc ( nthreads * sizeof (struct dJzStaeckelArg) ); for (tid=0; tid < nthreads; tid++){ (params+tid)->nargs= nargs; (params+tid)->actionAngleArgs= actionAngleArgs; } //Setup integrator gsl_integration_glfixed_table * T= gsl_integration_glfixed_table_alloc (order); int delta_stride= ndelta == 1 ? 0 : 1; UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) \ private(tid,ii,mid) \ shared(djzdE,djzdLz,djzdI3,vmin,dJzInt,params,T,delta,E,Lz,I3V,u0,cosh2u0,sinh2u0,potupi2) for (ii=0; ii < ndata; ii++){ #ifdef _OPENMP tid= omp_get_thread_num(); #else tid = 0; #endif if ( *(vmin+ii) == -9999.99 ){ *(djzdE+ii)= 9999.99; *(djzdLz+ii)= 9999.99; *(djzdI3+ii)= 9999.99; continue; } if ( (0.5 * M_PI - *(vmin+ii)) / M_PI * 2. < 0.000001 ){//circular *(djzdE+ii) = 0.; *(djzdLz+ii) = 0.; *(djzdI3+ii) = 0.; continue; } //Setup function (params+tid)->delta= *(delta+ii*delta_stride); (params+tid)->E= *(E+ii); (params+tid)->Lz22delta= 0.5 * *(Lz+ii) * *(Lz+ii) / *(delta+ii*delta_stride) / *(delta+ii*delta_stride); (params+tid)->I3V= *(I3V+ii); (params+tid)->u0= *(u0+ii); (params+tid)->cosh2u0= *(cosh2u0+ii); (params+tid)->sinh2u0= *(sinh2u0+ii); (params+tid)->potupi2= *(potupi2+ii); (params+tid)->vmin= *(vmin+ii); //First calculate dJzdE (dJzInt+tid)->function = &dJzdELowStaeckelIntegrand; (dJzInt+tid)->params = params+tid; mid= sqrt( 0.5 * (M_PI/2. - *(vmin+ii) ) ); //BOVY: pv does not vanish at pi/2, so no need to break up the integral //Integrate *(djzdE+ii)= gsl_integration_glfixed (dJzInt+tid,0.,mid,T); (dJzInt+tid)->function = &dJzdEHighStaeckelIntegrand; *(djzdE+ii)+= gsl_integration_glfixed (dJzInt+tid,0.,mid,T); *(djzdE+ii)*= sqrt(2.) * *(delta+ii*delta_stride) / M_PI; //Then calculate dJzdLz (dJzInt+tid)->function = &dJzdLzLowStaeckelIntegrand; //Integrate *(djzdLz+ii)= gsl_integration_glfixed (dJzInt+tid,0.,mid,T); (dJzInt+tid)->function = &dJzdLzHighStaeckelIntegrand; *(djzdLz+ii)+= gsl_integration_glfixed (dJzInt+tid,0.,mid,T); *(djzdLz+ii)*= - *(Lz+ii) * sqrt(2.) / M_PI / *(delta+ii*delta_stride); //Then calculate dJzdI3 (dJzInt+tid)->function = &dJzdI3LowStaeckelIntegrand; //Integrate *(djzdI3+ii)= gsl_integration_glfixed (dJzInt+tid,0.,mid,T); (dJzInt+tid)->function = &dJzdI3HighStaeckelIntegrand; *(djzdI3+ii)+= gsl_integration_glfixed (dJzInt+tid,0.,mid,T); *(djzdI3+ii)*= sqrt(2.) * *(delta+ii*delta_stride) / M_PI; } free(dJzInt); free(params); gsl_integration_glfixed_table_free ( T ); } void calcAnglesStaeckel(int ndata, double * Angler, double * Anglephi, double * Anglez, double * Omegar, double * Omegaphi, double * Omegaz, double * dI3dJR, double * dI3dJz, double * dI3dLz, double * dJRdE, double * dJRdLz, double * dJRdI3, double * dJzdE, double * dJzdLz, double * dJzdI3, double * ux, double * vx, double * pux, double * pvx, double * umin, double * umax, double * E, double * Lz, double * I3U, int ndelta, double * delta, double * u0, double * sinh2u0, double * v0, double * sin2v0, double * potu0v0, double * vmin, double * I3V, double * cosh2u0, double * potupi2, int nargs, struct potentialArg * actionAngleArgs, int order){ int ii, tid, nthreads; double Or1, Or2, I3r1, I3r2,phitmp; double mid, midpoint; #ifdef _OPENMP nthreads = omp_get_max_threads(); #else nthreads = 1; #endif gsl_function * AngleuInt= (gsl_function *) malloc ( nthreads * sizeof(gsl_function) ); gsl_function * AnglevInt= (gsl_function *) malloc ( nthreads * sizeof(gsl_function) ); struct dJRStaeckelArg * paramsu= (struct dJRStaeckelArg *) malloc ( nthreads * sizeof (struct dJRStaeckelArg) ); struct dJzStaeckelArg * paramsv= (struct dJzStaeckelArg *) malloc ( nthreads * sizeof (struct dJzStaeckelArg) ); for (tid=0; tid < nthreads; tid++){ (paramsu+tid)->nargs= nargs; (paramsu+tid)->actionAngleArgs= actionAngleArgs; (paramsv+tid)->nargs= nargs; (paramsv+tid)->actionAngleArgs= actionAngleArgs; } //Setup integrator gsl_integration_glfixed_table * T= gsl_integration_glfixed_table_alloc (order); int delta_stride= ndelta == 1 ? 0 : 1; UNUSED int chunk= CHUNKSIZE; #pragma omp parallel for schedule(static,chunk) \ private(tid,ii,mid,midpoint,Or1,Or2,I3r1,I3r2,phitmp) \ shared(Angler,Anglephi,Anglez,Omegar,Omegaz,dI3dJR,dI3dJz,umin,umax,AngleuInt,AnglevInt,paramsu,paramsv,T,delta,E,Lz,I3U,u0,sinh2u0,v0,sin2v0,potu0v0,vmin,I3V,cosh2u0,potupi2) for (ii=0; ii < ndata; ii++){ #ifdef _OPENMP tid= omp_get_thread_num(); #else tid = 0; #endif if ( *(umin+ii) == -9999.99 || *(umax+ii) == -9999.99 ){ *(Angler+ii)= 9999.99; *(Anglephi+ii)= 9999.99; *(Anglez+ii)= 9999.99; continue; } if ( (*(umax+ii) - *(umin+ii)) / *(umax+ii) < 0.000001 ){//circular *(Angler+ii) = 0.; *(Anglephi+ii) = 0.; *(Anglez+ii) = 0.; continue; } //Setup u function (paramsu+tid)->delta= *(delta+ii*delta_stride); (paramsu+tid)->E= *(E+ii); (paramsu+tid)->Lz22delta= 0.5 * *(Lz+ii) * *(Lz+ii) / *(delta+ii*delta_stride) / *(delta+ii*delta_stride); (paramsu+tid)->I3U= *(I3U+ii); (paramsu+tid)->u0= *(u0+ii); (paramsu+tid)->sinh2u0= *(sinh2u0+ii); (paramsu+tid)->v0= *(v0+ii); (paramsu+tid)->sin2v0= *(sin2v0+ii); (paramsu+tid)->potu0v0= *(potu0v0+ii); (paramsu+tid)->umin= *(umin+ii); (paramsu+tid)->umax= *(umax+ii); (AngleuInt+tid)->params = paramsu+tid; midpoint= *(umin+ii)+ 0.5 * ( *(umax+ii) - *(umin+ii) ); if ( *(pux+ii) > 0. ) { if ( *(ux+ii) > midpoint ) { mid= sqrt( ( *(umax+ii) - *(ux+ii) ) ); (AngleuInt+tid)->function = &dJRdEHighStaeckelIntegrand; Or1= gsl_integration_glfixed (AngleuInt+tid,0.,mid,T); (AngleuInt+tid)->function = &dJRdI3HighStaeckelIntegrand; I3r1= -gsl_integration_glfixed (AngleuInt+tid,0.,mid,T); (AngleuInt+tid)->function = &dJRdLzHighStaeckelIntegrand; *(Anglephi+ii)= M_PI * *(dJRdLz+ii) + *(Lz+ii) * gsl_integration_glfixed (AngleuInt+tid,0.,mid,T) / *(delta+ii*delta_stride) / sqrt(2.); Or1*= *(delta+ii*delta_stride) / sqrt(2.); I3r1*= *(delta+ii*delta_stride) / sqrt(2.); Or1= M_PI * *(dJRdE+ii) - Or1; I3r1= M_PI * *(dJRdI3+ii) - I3r1; } else { mid= sqrt( ( *(ux+ii) - *(umin+ii) ) ); (AngleuInt+tid)->function = &dJRdELowStaeckelIntegrand; Or1= gsl_integration_glfixed (AngleuInt+tid,0.,mid,T); (AngleuInt+tid)->function = &dJRdI3LowStaeckelIntegrand; I3r1= -gsl_integration_glfixed (AngleuInt+tid,0.,mid,T); (AngleuInt+tid)->function = &dJRdLzLowStaeckelIntegrand; *(Anglephi+ii)= - *(Lz+ii) * gsl_integration_glfixed (AngleuInt+tid,0.,mid,T) / *(delta+ii*delta_stride) / sqrt(2.); Or1*= *(delta+ii*delta_stride) / sqrt(2.); I3r1*= *(delta+ii*delta_stride) / sqrt(2.); } } else { if ( *(ux+ii) > midpoint ) { mid= sqrt( ( *(umax+ii) - *(ux+ii) ) ); (AngleuInt+tid)->function = &dJRdEHighStaeckelIntegrand; Or1= gsl_integration_glfixed (AngleuInt+tid,0.,mid,T); Or1*= *(delta+ii*delta_stride) / sqrt(2.); Or1= M_PI * *(dJRdE+ii) + Or1; (AngleuInt+tid)->function = &dJRdI3HighStaeckelIntegrand; I3r1= -gsl_integration_glfixed (AngleuInt+tid,0.,mid,T); I3r1*= *(delta+ii*delta_stride) / sqrt(2.); I3r1= M_PI * *(dJRdI3+ii) + I3r1; (AngleuInt+tid)->function = &dJRdLzHighStaeckelIntegrand; *(Anglephi+ii)= M_PI * *(dJRdLz+ii) - *(Lz+ii) * gsl_integration_glfixed (AngleuInt+tid,0.,mid,T) / *(delta+ii*delta_stride) / sqrt(2.); } else { mid= sqrt( ( *(ux+ii) - *(umin+ii) ) ); (AngleuInt+tid)->function = &dJRdELowStaeckelIntegrand; Or1= gsl_integration_glfixed (AngleuInt+tid,0.,mid,T); Or1*= *(delta+ii*delta_stride) / sqrt(2.); Or1= 2. * M_PI * *(dJRdE+ii) - Or1; (AngleuInt+tid)->function = &dJRdI3LowStaeckelIntegrand; I3r1= -gsl_integration_glfixed (AngleuInt+tid,0.,mid,T); I3r1*= *(delta+ii*delta_stride) / sqrt(2.); I3r1= 2. * M_PI * *(dJRdI3+ii) - I3r1; (AngleuInt+tid)->function = &dJRdLzLowStaeckelIntegrand; *(Anglephi+ii)= 2. * M_PI * *(dJRdLz+ii) + *(Lz+ii) * gsl_integration_glfixed (AngleuInt+tid,0.,mid,T) / *(delta+ii*delta_stride) / sqrt(2.); } } //Setup v function (paramsv+tid)->delta= *(delta+ii*delta_stride); (paramsv+tid)->E= *(E+ii); (paramsv+tid)->Lz22delta= 0.5 * *(Lz+ii) * *(Lz+ii) / *(delta+ii*delta_stride) / *(delta+ii*delta_stride); (paramsv+tid)->I3V= *(I3V+ii); (paramsv+tid)->u0= *(u0+ii); (paramsv+tid)->cosh2u0= *(cosh2u0+ii); (paramsv+tid)->sinh2u0= *(sinh2u0+ii); (paramsv+tid)->potupi2= *(potupi2+ii); (paramsv+tid)->vmin= *(vmin+ii); (AnglevInt+tid)->params = paramsv+tid; midpoint= *(vmin+ii)+ 0.5 * ( 0.5 * M_PI - *(vmin+ii) ); if ( *(pvx+ii) > 0. ) { if ( *(vx+ii) < midpoint || *(vx+ii) > (M_PI - midpoint) ) { mid = ( *(vx+ii) > 0.5 * M_PI ) ? sqrt( (M_PI - *(vx+ii) - *(vmin+ii))): sqrt( *(vx+ii) - *(vmin+ii)); (AnglevInt+tid)->function = &dJzdELowStaeckelIntegrand; Or2= gsl_integration_glfixed (AnglevInt+tid,0.,mid,T); Or2*= *(delta+ii*delta_stride) / sqrt(2.); (AnglevInt+tid)->function = &dJzdI3LowStaeckelIntegrand; I3r2= gsl_integration_glfixed (AnglevInt+tid,0.,mid,T); I3r2*= *(delta+ii*delta_stride) / sqrt(2.); (AnglevInt+tid)->function = &dJzdLzLowStaeckelIntegrand; phitmp= gsl_integration_glfixed (AnglevInt+tid,0.,mid,T); phitmp*= - *(Lz+ii) / *(delta+ii*delta_stride) / sqrt(2.); if ( *(vx+ii) > 0.5 * M_PI ) { Or2= M_PI * *(dJzdE+ii) - Or2; I3r2= M_PI * *(dJzdI3+ii) - I3r2; phitmp= M_PI * *(dJzdLz+ii) - phitmp; } } else { mid= sqrt( fabs ( 0.5 * M_PI - *(vx+ii) ) ); (AnglevInt+tid)->function = &dJzdEHighStaeckelIntegrand; Or2= gsl_integration_glfixed (AnglevInt+tid,0.,mid,T); Or2*= *(delta+ii*delta_stride) / sqrt(2.); (AnglevInt+tid)->function = &dJzdI3HighStaeckelIntegrand; I3r2= gsl_integration_glfixed (AnglevInt+tid,0.,mid,T); I3r2*= *(delta+ii*delta_stride) / sqrt(2.); (AnglevInt+tid)->function = &dJzdLzHighStaeckelIntegrand; phitmp= gsl_integration_glfixed (AnglevInt+tid,0.,mid,T); phitmp*= - *(Lz+ii) / *(delta+ii*delta_stride) / sqrt(2.); if ( *(vx+ii) > 0.5 * M_PI ) { Or2= 0.5 * M_PI * *(dJzdE+ii) + Or2; I3r2= 0.5 * M_PI * *(dJzdI3+ii) + I3r2; phitmp= 0.5 * M_PI * *(dJzdLz+ii) + phitmp; } else { Or2= 0.5 * M_PI * *(dJzdE+ii) - Or2; I3r2= 0.5 * M_PI * *(dJzdI3+ii) - I3r2; phitmp= 0.5 * M_PI * *(dJzdLz+ii) - phitmp; } } } else { if ( *(vx+ii) < midpoint || *(vx+ii) > (M_PI - midpoint)) { mid = ( *(vx+ii) > 0.5 * M_PI ) ? sqrt( (M_PI - *(vx+ii) - *(vmin+ii))): sqrt( *(vx+ii) - *(vmin+ii)); (AnglevInt+tid)->function = &dJzdELowStaeckelIntegrand; Or2= gsl_integration_glfixed (AnglevInt+tid,0.,mid,T); Or2*= *(delta+ii*delta_stride) / sqrt(2.); (AnglevInt+tid)->function = &dJzdI3LowStaeckelIntegrand; I3r2= gsl_integration_glfixed (AnglevInt+tid,0.,mid,T); I3r2*= *(delta+ii*delta_stride) / sqrt(2.); (AnglevInt+tid)->function = &dJzdLzLowStaeckelIntegrand; phitmp= gsl_integration_glfixed (AnglevInt+tid,0.,mid,T); phitmp*= - *(Lz+ii) / *(delta+ii*delta_stride) / sqrt(2.); if ( *(vx+ii) < 0.5 * M_PI ) { Or2= 2. * M_PI * *(dJzdE+ii) - Or2; I3r2= 2. * M_PI * *(dJzdI3+ii) - I3r2; phitmp= 2. * M_PI * *(dJzdLz+ii) - phitmp; } else { Or2= M_PI * *(dJzdE+ii) + Or2; I3r2= M_PI * *(dJzdI3+ii) + I3r2; phitmp= M_PI * *(dJzdLz+ii) + phitmp; } } else { mid= sqrt( fabs ( 0.5 * M_PI - *(vx+ii) ) ); (AnglevInt+tid)->function = &dJzdEHighStaeckelIntegrand; Or2= gsl_integration_glfixed (AnglevInt+tid,0.,mid,T); Or2*= *(delta+ii*delta_stride) / sqrt(2.); (AnglevInt+tid)->function = &dJzdI3HighStaeckelIntegrand; I3r2= gsl_integration_glfixed (AnglevInt+tid,0.,mid,T); I3r2*= *(delta+ii*delta_stride) / sqrt(2.); (AnglevInt+tid)->function = &dJzdLzHighStaeckelIntegrand; phitmp= gsl_integration_glfixed (AnglevInt+tid,0.,mid,T); phitmp*= - *(Lz+ii) / *(delta+ii*delta_stride) / sqrt(2.); if ( *(vx+ii) < 0.5 * M_PI ) { Or2= 1.5 * M_PI * *(dJzdE+ii) + Or2; I3r2= 1.5 * M_PI * *(dJzdI3+ii) + I3r2; phitmp= 1.5 * M_PI * *(dJzdLz+ii) + phitmp; } else { Or2= 1.5 * M_PI * *(dJzdE+ii) - Or2; I3r2= 1.5 * M_PI * *(dJzdI3+ii) - I3r2; phitmp= 1.5 * M_PI * *(dJzdLz+ii) - phitmp; } } } *(Angler+ii)= *(Omegar+ii) * ( Or1 + Or2 ) + *(dI3dJR+ii) * ( I3r1 + I3r2 ); // In Binney (2012) Anglez starts at zmax/vmin and v_z < 0 / v_v > 0; // Put this on the same system as Isochrone and Spherical angles +pi/2 *(Anglez+ii)= *(Omegaz+ii) * ( Or1 + Or2 ) + *(dI3dJz+ii) * ( I3r1 + I3r2 ) + 0.5 * M_PI; *(Anglephi+ii)+= phitmp; *(Anglephi+ii)+= *(Omegaphi+ii) * ( Or1 + Or2 ) + *(dI3dLz+ii) * ( I3r1 + I3r2 ); *(Angler+ii)= fmod(*(Angler+ii),2. * M_PI); *(Anglez+ii)= fmod(*(Anglez+ii),2. * M_PI); while ( *(Angler+ii) < 0. ) *(Angler+ii)+= 2. * M_PI; while ( *(Anglez+ii) < 0. ) *(Anglez+ii)+= 2. * M_PI; while ( *(Angler+ii) > 2. * M_PI ) *(Angler+ii)-= 2. * M_PI; while ( *(Anglez+ii) > 2. * M_PI ) *(Anglez+ii)-= 2. * M_PI; } free(AngleuInt); free(AnglevInt); free(paramsu); free(paramsv); gsl_integration_glfixed_table_free ( T ); } void calcUminUmax(int ndata, double * umin, double * umax, double * ux, double * pux, double * E, double * Lz, double * I3U, int ndelta, double * delta, double * u0, double * sinh2u0, double * v0, double * sin2v0, double * potu0v0, int nargs, struct potentialArg * actionAngleArgs){ int ii, tid, nthreads; #ifdef _OPENMP nthreads = omp_get_max_threads(); #else nthreads = 1; #endif double peps, meps; gsl_function * JRRoot= (gsl_function *) malloc ( nthreads * sizeof(gsl_function) ); struct JRStaeckelArg * params= (struct JRStaeckelArg *) malloc ( nthreads * sizeof (struct JRStaeckelArg) ); //Setup solver int status; int iter, max_iter = 100; const gsl_root_fsolver_type *T; struct pragmasolver *s= (struct pragmasolver *) malloc ( nthreads * sizeof (struct pragmasolver) );; double u_lo, u_hi; T = gsl_root_fsolver_brent; for (tid=0; tid < nthreads; tid++){ (params+tid)->nargs= nargs; (params+tid)->actionAngleArgs= actionAngleArgs; (s+tid)->s= gsl_root_fsolver_alloc (T); } int delta_stride= ndelta == 1 ? 0 : 1; UNUSED int chunk= CHUNKSIZE; gsl_set_error_handler_off(); #pragma omp parallel for schedule(static,chunk) \ private(tid,ii,iter,status,u_lo,u_hi,meps,peps) \ shared(umin,umax,JRRoot,params,s,ux,delta,E,Lz,I3U,u0,sinh2u0,v0,sin2v0,potu0v0,max_iter) for (ii=0; ii < ndata; ii++){ #ifdef _OPENMP tid= omp_get_thread_num(); #else tid = 0; #endif //Setup function (params+tid)->delta= *(delta+ii*delta_stride); (params+tid)->E= *(E+ii); (params+tid)->Lz22delta= 0.5 * *(Lz+ii) * *(Lz+ii) / *(delta+ii*delta_stride) / *(delta+ii*delta_stride); (params+tid)->I3U= *(I3U+ii); (params+tid)->u0= *(u0+ii); (params+tid)->sinh2u0= *(sinh2u0+ii); (params+tid)->v0= *(v0+ii); (params+tid)->sin2v0= *(sin2v0+ii); (params+tid)->potu0v0= *(potu0v0+ii); (JRRoot+tid)->function = &JRStaeckelIntegrandSquared; (JRRoot+tid)->params = params+tid; //Find starting points for minimum if ( fabs(GSL_FN_EVAL(JRRoot+tid,*(ux+ii))) < 0.0000001){ //we are at umin or umax peps= GSL_FN_EVAL(JRRoot+tid,*(ux+ii)+0.000001); meps= GSL_FN_EVAL(JRRoot+tid,*(ux+ii)-0.000001); if ( fabs(peps) < 0.00000001 && fabs(meps) < 0.00000001 ) {//circular *(umin+ii) = *(ux+ii); *(umax+ii) = *(ux+ii); } else if ( peps < 0. && meps > 0. ) {//umax *(umax+ii)= *(ux+ii); u_lo= 0.9 * (*(ux+ii) - 0.000001); u_hi= *(ux+ii) - 0.0000001; while ( GSL_FN_EVAL(JRRoot+tid,u_lo) >= 0. && u_lo > 0.000000001){ u_hi= u_lo; //this makes sure that brent evaluates using previous u_lo*= 0.9; } //Find root status = gsl_root_fsolver_set ((s+tid)->s, JRRoot+tid, u_lo, u_hi); if (status == GSL_EINVAL) { *(umin+ii) = 0.;//Assume zero if below 0.000000001 } else { iter= 0; do { iter++; status = gsl_root_fsolver_iterate ((s+tid)->s); u_lo = gsl_root_fsolver_x_lower ((s+tid)->s); u_hi = gsl_root_fsolver_x_upper ((s+tid)->s); status = gsl_root_test_interval (u_lo, u_hi, 9.9999999999999998e-13, 4.4408920985006262e-16); } while (status == GSL_CONTINUE && iter < max_iter); // LCOV_EXCL_START if (status == GSL_EINVAL) {//Shouldn't ever get here *(umin+ii) = -9999.99; *(umax+ii) = -9999.99; continue; } // LCOV_EXCL_STOP *(umin+ii) = gsl_root_fsolver_root ((s+tid)->s); } } else if ( peps > 0. && meps < 0. ){//umin *(umin+ii)= *(ux+ii); u_lo= *(ux+ii) + 0.000001; u_hi= 1.1 * (*(ux+ii) + 0.000001); while ( GSL_FN_EVAL(JRRoot+tid,u_hi) >= 0. && u_hi < asinh(37.5/ *(delta+ii*delta_stride))) { u_lo= u_hi; //this makes sure that brent evaluates using previous u_hi*= 1.1; } //Find root status = gsl_root_fsolver_set ((s+tid)->s, JRRoot+tid, u_lo, u_hi); if (status == GSL_EINVAL) { *(umin+ii) = -9999.99; *(umax+ii) = -9999.99; continue; } iter= 0; do { iter++; status = gsl_root_fsolver_iterate ((s+tid)->s); u_lo = gsl_root_fsolver_x_lower ((s+tid)->s); u_hi = gsl_root_fsolver_x_upper ((s+tid)->s); status = gsl_root_test_interval (u_lo, u_hi, 9.9999999999999998e-13, 4.4408920985006262e-16); } while (status == GSL_CONTINUE && iter < max_iter); // LCOV_EXCL_START if (status == GSL_EINVAL) {//Shouldn't ever get here *(umin+ii) = -9999.99; *(umax+ii) = -9999.99; continue; } // LCOV_EXCL_STOP *(umax+ii) = gsl_root_fsolver_root ((s+tid)->s); } } else { u_lo= 0.9 * *(ux+ii); u_hi= *(ux+ii); while ( GSL_FN_EVAL(JRRoot+tid,u_lo) >= 0. && u_lo > 0.000000001){ u_hi= u_lo; //this makes sure that brent evaluates using previous u_lo*= 0.9; } u_hi= (u_lo < 0.9 * *(ux+ii)) ? u_lo / 0.9 / 0.9: *(ux+ii); //Find root status = gsl_root_fsolver_set ((s+tid)->s, JRRoot+tid, u_lo, u_hi); if (status == GSL_EINVAL) { *(umin+ii) = 0.;//Assume zero if below 0.000000001 } else { iter= 0; do { iter++; status = gsl_root_fsolver_iterate ((s+tid)->s); u_lo = gsl_root_fsolver_x_lower ((s+tid)->s); u_hi = gsl_root_fsolver_x_upper ((s+tid)->s); status = gsl_root_test_interval (u_lo, u_hi, 9.9999999999999998e-13, 4.4408920985006262e-16); } while (status == GSL_CONTINUE && iter < max_iter); // LCOV_EXCL_START if (status == GSL_EINVAL) {//Shouldn't ever get here *(umin+ii) = -9999.99; *(umax+ii) = -9999.99; continue; } // LCOV_EXCL_STOP *(umin+ii) = gsl_root_fsolver_root ((s+tid)->s); } //Find starting points for maximum u_lo= *(ux+ii); u_hi= 1.1 * *(ux+ii); while ( GSL_FN_EVAL(JRRoot+tid,u_hi) > 0. && u_hi < asinh(37.5/ *(delta+ii*delta_stride))) { u_lo= u_hi; //this makes sure that brent evaluates using previous u_hi*= 1.1; } u_lo= (u_hi > 1.1 * *(ux+ii)) ? u_hi / 1.1 / 1.1: *(ux+ii); //Find root status = gsl_root_fsolver_set ((s+tid)->s, JRRoot+tid, u_lo, u_hi); if (status == GSL_EINVAL) { *(umin+ii) = -9999.99; *(umax+ii) = -9999.99; continue; } iter= 0; do { iter++; status = gsl_root_fsolver_iterate ((s+tid)->s); u_lo = gsl_root_fsolver_x_lower ((s+tid)->s); u_hi = gsl_root_fsolver_x_upper ((s+tid)->s); status = gsl_root_test_interval (u_lo, u_hi, 9.9999999999999998e-13, 4.4408920985006262e-16); } while (status == GSL_CONTINUE && iter < max_iter); // LCOV_EXCL_START if (status == GSL_EINVAL) {//Shouldn't ever get here *(umin+ii) = -9999.99; *(umax+ii) = -9999.99; continue; } // LCOV_EXCL_STOP *(umax+ii) = gsl_root_fsolver_root ((s+tid)->s); } } gsl_set_error_handler (NULL); for (tid=0; tid < nthreads; tid++) gsl_root_fsolver_free( (s+tid)->s); free(s); free(JRRoot); free(params); } void calcVmin(int ndata, double * vmin, double * vx, double * pvx, double * E, double * Lz, double * I3V, int ndelta, double * delta, double * u0, double * cosh2u0, double * sinh2u0, double * potupi2, int nargs, struct potentialArg * actionAngleArgs){ int ii, tid, nthreads; #ifdef _OPENMP nthreads = omp_get_max_threads(); #else nthreads = 1; #endif gsl_function * JzRoot= (gsl_function *) malloc ( nthreads * sizeof(gsl_function) ); struct JzStaeckelArg * params= (struct JzStaeckelArg *) malloc ( nthreads * sizeof (struct JzStaeckelArg) ); //Setup solver int status; int iter, max_iter = 100; const gsl_root_fsolver_type *T; struct pragmasolver *s= (struct pragmasolver *) malloc ( nthreads * sizeof (struct pragmasolver) );; double v_lo, v_hi; T = gsl_root_fsolver_brent; for (tid=0; tid < nthreads; tid++){ (params+tid)->nargs= nargs; (params+tid)->actionAngleArgs= actionAngleArgs; (s+tid)->s= gsl_root_fsolver_alloc (T); } int delta_stride= ndelta == 1 ? 0 : 1; UNUSED int chunk= CHUNKSIZE; gsl_set_error_handler_off(); #pragma omp parallel for schedule(static,chunk) \ private(tid,ii,iter,status,v_lo,v_hi) \ shared(vmin,JzRoot,params,s,vx,delta,E,Lz,I3V,u0,cosh2u0,sinh2u0,potupi2,max_iter) for (ii=0; ii < ndata; ii++){ #ifdef _OPENMP tid= omp_get_thread_num(); #else tid = 0; #endif //Setup function (params+tid)->delta= *(delta+ii*delta_stride); (params+tid)->E= *(E+ii); (params+tid)->Lz22delta= 0.5 * *(Lz+ii) * *(Lz+ii) / *(delta+ii*delta_stride) / *(delta+ii*delta_stride); (params+tid)->I3V= *(I3V+ii); (params+tid)->u0= *(u0+ii); (params+tid)->cosh2u0= *(cosh2u0+ii); (params+tid)->sinh2u0= *(sinh2u0+ii); (params+tid)->potupi2= *(potupi2+ii); (JzRoot+tid)->function = &JzStaeckelIntegrandSquared; (JzRoot+tid)->params = params+tid; //Find starting points for minimum if ( fabs(GSL_FN_EVAL(JzRoot+tid,*(vx+ii))) < 0.0000001) //we are at vmin *(vmin+ii)= ( *(vx+ii) > 0.5 * M_PI ) ? M_PI - *(vx+ii): *(vx+ii); else { if ( *(vx+ii) > 0.5 * M_PI ){ v_lo= 0.9 * ( M_PI - *(vx+ii) ); v_hi= M_PI - *(vx+ii); } else { v_lo= 0.9 * *(vx+ii); v_hi= *(vx+ii); } while ( GSL_FN_EVAL(JzRoot+tid,v_lo) >= 0. && v_lo > 0.000000001){ v_hi= v_lo; //this makes sure that brent evaluates using previous v_lo*= 0.9; } //Find root status = gsl_root_fsolver_set ((s+tid)->s, JzRoot+tid, v_lo, v_hi); if (status == GSL_EINVAL) { *(vmin+ii) = -9999.99; continue; } iter= 0; do { iter++; status = gsl_root_fsolver_iterate ((s+tid)->s); v_lo = gsl_root_fsolver_x_lower ((s+tid)->s); v_hi = gsl_root_fsolver_x_upper ((s+tid)->s); status = gsl_root_test_interval (v_lo, v_hi, 9.9999999999999998e-13, 4.4408920985006262e-16); } while (status == GSL_CONTINUE && iter < max_iter); // LCOV_EXCL_START if (status == GSL_EINVAL) {//Shouldn't ever get here *(vmin+ii) = -9999.99; continue; } // LCOV_EXCL_STOP *(vmin+ii) = gsl_root_fsolver_root ((s+tid)->s); fflush(stdout); } } gsl_set_error_handler (NULL); for (tid=0; tid < nthreads; tid++) gsl_root_fsolver_free( (s+tid)->s); free(s); free(JzRoot); free(params); } double JRStaeckelIntegrand(double u, void * p){ double out= JRStaeckelIntegrandSquared(u,p); if ( out <= 0.) return 0.; else return sqrt(out); } double JRStaeckelIntegrandSquared(double u, void * p){ struct JRStaeckelArg * params= (struct JRStaeckelArg *) p; double sinh2u= sinh(u) * sinh(u); double dU= (sinh2u+params->sin2v0) *evaluatePotentialsUV(u,params->v0,params->delta, params->nargs,params->actionAngleArgs) - (params->sinh2u0+params->sin2v0)*params->potu0v0; return params->E * sinh2u - params->I3U - dU - params->Lz22delta / sinh2u; } double JRStaeckelIntegrandSquared4dJR(double u, void * p){ struct dJRStaeckelArg * params= (struct dJRStaeckelArg *) p; double sinh2u= sinh(u) * sinh(u); double dU= (sinh2u+params->sin2v0) *evaluatePotentialsUV(u,params->v0,params->delta, params->nargs,params->actionAngleArgs) - (params->sinh2u0+params->sin2v0)*params->potu0v0; return params->E * sinh2u - params->I3U - dU - params->Lz22delta / sinh2u; } double JzStaeckelIntegrand(double v, void * p){ double out= JzStaeckelIntegrandSquared(v,p); if ( out <= 0. ) return 0.; else return sqrt(out); } double JzStaeckelIntegrandSquared(double v, void * p){ struct JzStaeckelArg * params= (struct JzStaeckelArg *) p; double sin2v= sin(v) * sin(v); double dV= params->cosh2u0 * params->potupi2 - (params->sinh2u0+sin2v) *evaluatePotentialsUV(params->u0,v,params->delta, params->nargs,params->actionAngleArgs); return params->E * sin2v + params->I3V + dV - params->Lz22delta / sin2v; } double JzStaeckelIntegrandSquared4dJz(double v, void * p){ struct dJzStaeckelArg * params= (struct dJzStaeckelArg *) p; double sin2v= sin(v) * sin(v); double dV= params->cosh2u0 * params->potupi2 - (params->sinh2u0+sin2v) *evaluatePotentialsUV(params->u0,v,params->delta, params->nargs,params->actionAngleArgs); return params->E * sin2v + params->I3V + dV - params->Lz22delta / sin2v; } double dJRdELowStaeckelIntegrand(double t, void * p){ struct dJRStaeckelArg * params= (struct dJRStaeckelArg *) p; double u= params->umin + t * t; return 2. * t * dJRdEStaeckelIntegrand(u,p); } double dJRdEHighStaeckelIntegrand(double t, void * p){ struct dJRStaeckelArg * params= (struct dJRStaeckelArg *) p; double u= params->umax - t * t; return 2. * t * dJRdEStaeckelIntegrand(u,p); } double dJRdEStaeckelIntegrand(double u, void * p){ double out= JRStaeckelIntegrandSquared4dJR(u,p); if ( out <= 0. ) return 0.; else return sinh(u)*sinh(u)/sqrt(out); } double dJRdLzLowStaeckelIntegrand(double t, void * p){ struct dJRStaeckelArg * params= (struct dJRStaeckelArg *) p; double u= params->umin + t * t; return 2. * t * dJRdLzStaeckelIntegrand(u,p); } double dJRdLzHighStaeckelIntegrand(double t, void * p){ struct dJRStaeckelArg * params= (struct dJRStaeckelArg *) p; double u= params->umax - t * t; return 2. * t * dJRdLzStaeckelIntegrand(u,p); } double dJRdLzStaeckelIntegrand(double u, void * p){ double out= JRStaeckelIntegrandSquared4dJR(u,p); if ( out <= 0. ) return 0.; else return 1./sinh(u)/sinh(u)/sqrt(out); } double dJRdI3LowStaeckelIntegrand(double t, void * p){ struct dJRStaeckelArg * params= (struct dJRStaeckelArg *) p; double u= params->umin + t * t; return 2. * t * dJRdI3StaeckelIntegrand(u,p); } double dJRdI3HighStaeckelIntegrand(double t, void * p){ struct dJRStaeckelArg * params= (struct dJRStaeckelArg *) p; double u= params->umax - t * t; return 2. * t * dJRdI3StaeckelIntegrand(u,p); } double dJRdI3StaeckelIntegrand(double u, void * p){ double out= JRStaeckelIntegrandSquared4dJR(u,p); if ( out <= 0. ) return 0.; else return 1./sqrt(out); } double dJzdELowStaeckelIntegrand(double t, void * p){ struct dJzStaeckelArg * params= (struct dJzStaeckelArg *) p; double v= params->vmin + t * t; return 2. * t * dJzdEStaeckelIntegrand(v,p); } double dJzdEHighStaeckelIntegrand(double t, void * p){ double v= M_PI/2. - t * t; return 2. * t * dJzdEStaeckelIntegrand(v,p); } double dJzdEStaeckelIntegrand(double v, void * p){ double out= JzStaeckelIntegrandSquared4dJz(v,p); if ( out <= 0. ) return 0.; else return sin(v)*sin(v)/sqrt(out); } double dJzdLzLowStaeckelIntegrand(double t, void * p){ struct dJzStaeckelArg * params= (struct dJzStaeckelArg *) p; double v= params->vmin + t * t; return 2. * t * dJzdLzStaeckelIntegrand(v,p); } double dJzdLzHighStaeckelIntegrand(double t, void * p){ double v= M_PI/2. - t * t; return 2. * t * dJzdLzStaeckelIntegrand(v,p); } double dJzdLzStaeckelIntegrand(double v, void * p){ double out= JzStaeckelIntegrandSquared4dJz(v,p); if ( out <= 0. ) return 0.; else return 1./sin(v)/sin(v)/sqrt(out); } double dJzdI3LowStaeckelIntegrand(double t, void * p){ struct dJzStaeckelArg * params= (struct dJzStaeckelArg *) p; double v= params->vmin + t * t; return 2. * t * dJzdI3StaeckelIntegrand(v,p); } double dJzdI3HighStaeckelIntegrand(double t, void * p){ double v= M_PI/2. - t * t; return 2. * t * dJzdI3StaeckelIntegrand(v,p); } double dJzdI3StaeckelIntegrand(double v, void * p){ double out= JzStaeckelIntegrandSquared4dJz(v,p); if ( out <= 0. ) return 0.; else return 1./sqrt(out); } double u0Equation(double u, void * p){ struct u0EqArg * params= (struct u0EqArg *) p; double sinh2u= sinh(u) * sinh(u); double cosh2u= cosh(u) * cosh(u); double dU= cosh2u * evaluatePotentialsUV(u,0.5*M_PI,params->delta, params->nargs,params->actionAngleArgs); return -(params->E*sinh2u-dU-params->Lz22delta/sinh2u); } double evaluatePotentialsUV(double u, double v, double delta, int nargs, struct potentialArg * actionAngleArgs){ double R,z; uv_to_Rz(u,v,&R,&z,delta); return evaluatePotentials(R,z,nargs,actionAngleArgs); }
IO.h
// This code is part of the project "Ligra: A Lightweight Graph Processing // Framework for Shared Memory", presented at Principles and Practice of // Parallel Programming, 2013. // Copyright (c) 2013 Julian Shun and Guy Blelloch // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the // "Software"), to deal in the Software without restriction, including // without limitation the rights (to use, copy, modify, merge, publish, // distribute, sublicense, and/or sell copies of the Software, and to // permit persons to whom the Software is furnished to do so, subject to // the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF // MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE // LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION // OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION // WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #include <iostream> #include <fstream> #include <stdlib.h> #include <cmath> #include <sys/mman.h> #include <stdio.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <unistd.h> #include <parallel/algorithm> #include <omp.h> #include <cassert> #include "parallel.h" #include "blockRadixSort.h" #include "quickSort.h" #include "utils.h" #include "graph.h" #include "pvector.h" #include "timer.h" #include "sliding_queue.h" using namespace std; typedef pair<uintE,uintE> intPair; typedef pair<uintE, pair<uintE,intE> > intTriple; const double THRESHOLD {4.0f}; template <class E> struct pairFirstCmp { bool operator() (pair<uintE,E> a, pair<uintE,E> b) { return a.first < b.first; } }; template <class E> struct getFirst {uintE operator() (pair<uintE,E> a) {return a.first;} }; template <class IntType> struct pairBothCmp { bool operator() (pair<uintE,IntType> a, pair<uintE,IntType> b) { if (a.first != b.first) return a.first < b.first; return a.second < b.second; } }; // A structure that keeps a sequence of strings all allocated from // the same block of memory struct words { long n; // total number of characters char* Chars; // array storing all strings long m; // number of substrings char** Strings; // pointers to strings (all should be null terminated) words() {} words(char* C, long nn, char** S, long mm) : Chars(C), n(nn), Strings(S), m(mm) {} void del() {free(Chars); free(Strings);} }; inline bool isSpace(char c) { switch (c) { case '\r': case '\t': case '\n': case 0: case ' ' : return true; default : return false; } } _seq<char> mmapStringFromFile(const char *filename) { struct stat sb; int fd = open(filename, O_RDONLY); if (fd == -1) { perror("open"); exit(-1); } if (fstat(fd, &sb) == -1) { perror("fstat"); exit(-1); } if (!S_ISREG (sb.st_mode)) { perror("not a file\n"); exit(-1); } char *p = static_cast<char*>(mmap(0, sb.st_size, PROT_READ, MAP_PRIVATE, fd, 0)); if (p == MAP_FAILED) { perror("mmap"); exit(-1); } if (close(fd) == -1) { perror("close"); exit(-1); } size_t n = sb.st_size; // char *bytes = newA(char, n); // parallel_for(size_t i=0; i<n; i++) { // bytes[i] = p[i]; // } // if (munmap(p, sb.st_size) == -1) { // perror("munmap"); // exit(-1); // } // cout << "mmapped" << endl; // free(bytes); // exit(0); return _seq<char>(p, n); } _seq<char> readStringFromFile(char *fileName) { ifstream file (fileName, ios::in | ios::binary | ios::ate); if (!file.is_open()) { std::cout << "Unable to open file: " << fileName << std::endl; abort(); } long end = file.tellg(); file.seekg (0, ios::beg); long n = end - file.tellg(); char* bytes = newA(char,n+1); assert(bytes != NULL && "Malloc failure\n"); file.read (bytes,n); file.close(); return _seq<char>(bytes,n); } // parallel code for converting a string to words words stringToWords(char *Str, long n) { {parallel_for (long i=0; i < n; i++) if (isSpace(Str[i])) Str[i] = 0; } // mark start of words bool *FL = newA(bool,n); assert(FL != NULL && "Malloc failure\n"); FL[0] = Str[0]; {parallel_for (long i=1; i < n; i++) FL[i] = Str[i] && !Str[i-1];} // offset for each start of word _seq<long> Off = sequence::packIndex<long>(FL, n); free(FL); long m = Off.n; long *offsets = Off.A; // pointer to each start of word char **SA = newA(char*, m); assert(SA != NULL && "Malloc failure\n"); {parallel_for (long j=0; j < m; j++) SA[j] = Str+offsets[j];} free(offsets); return words(Str,n,SA,m); } template <class vertex> graph<vertex> readGraphFromFile(char* fname, bool isSymmetric, bool mmap) { Timer t; t.Start(); words W; if (mmap) { _seq<char> S = mmapStringFromFile(fname); char *bytes = newA(char, S.n); assert(bytes != NULL && "Malloc failure\n"); // Cannot mutate the graph unless we copy. parallel_for(size_t i=0; i<S.n; i++) { bytes[i] = S.A[i]; } if (munmap(S.A, S.n) == -1) { perror("munmap"); exit(-1); } S.A = bytes; W = stringToWords(S.A, S.n); } else { _seq<char> S = readStringFromFile(fname); W = stringToWords(S.A, S.n); } #ifndef WEIGHTED if (W.Strings[0] != (string) "AdjacencyGraph") { #else if (W.Strings[0] != (string) "WeightedAdjacencyGraph") { #endif cout << "Bad input file" << endl; abort(); } long len = W.m -1; long n = atol(W.Strings[1]); long m = atol(W.Strings[2]); #ifndef WEIGHTED if (len != n + m + 2) { #else if (len != n + 2*m + 2) { #endif cout << "Bad input file" << endl; abort(); } uintT* offsets = newA(uintT,n); assert(offsets != NULL && "Malloc failure\n"); #ifndef WEIGHTED uintE* edges = newA(uintE,m); #else intE* edges = newA(intE,2*m); #endif assert(edges != NULL && "Malloc failure\n"); {parallel_for(long i=0; i < n; i++) offsets[i] = atol(W.Strings[i + 3]);} {parallel_for(long i=0; i<m; i++) { #ifndef WEIGHTED edges[i] = atol(W.Strings[i+n+3]); #else edges[2*i] = atol(W.Strings[i+n+3]); edges[2*i+1] = atol(W.Strings[i+n+m+3]); #endif }} //W.del(); // to deal with performance bug in malloc W.del(); //The original code ^ commented this out vertex* v = newA(vertex,n); assert(v != NULL && "Malloc failure\n"); {parallel_for (uintT i=0; i < n; i++) { uintT o = offsets[i]; uintT l = ((i == n-1) ? m : offsets[i+1])-offsets[i]; v[i].setOutDegree(l); #ifndef WEIGHTED v[i].setOutNeighbors(edges+o); #else v[i].setOutNeighbors(edges+2*o); #endif }} if(!isSymmetric) { uintT* tOffsets = newA(uintT,n); assert(tOffsets != NULL && "Malloc failure\n"); {parallel_for(long i=0;i<n;i++) tOffsets[i] = INT_T_MAX;} #ifndef WEIGHTED intPair* temp = newA(intPair,m); #else intTriple* temp = newA(intTriple,m); #endif assert(temp != NULL && "Malloc failure\n"); {parallel_for(long i=0;i<n;i++){ uintT o = offsets[i]; for(uintT j=0;j<v[i].getOutDegree();j++){ #ifndef WEIGHTED temp[o+j] = make_pair(v[i].getOutNeighbor(j),i); #else temp[o+j] = make_pair(v[i].getOutNeighbor(j),make_pair(i,v[i].getOutWeight(j))); #endif } }} free(offsets); #ifndef WEIGHTED #ifndef LOWMEM intSort::iSort(temp,m,n+1,getFirst<uintE>()); #else quickSort(temp,m,pairFirstCmp<uintE>()); #endif #else #ifndef LOWMEM intSort::iSort(temp,m,n+1,getFirst<intPair>()); #else quickSort(temp,m,pairFirstCmp<intPair>()); #endif #endif tOffsets[temp[0].first] = 0; #ifndef WEIGHTED uintE* inEdges = newA(uintE,m); inEdges[0] = temp[0].second; #else intE* inEdges = newA(intE,2*m); inEdges[0] = temp[0].second.first; inEdges[1] = temp[0].second.second; #endif assert(inEdges != NULL && "Malloc failure\n"); {parallel_for(long i=1;i<m;i++) { #ifndef WEIGHTED inEdges[i] = temp[i].second; #else inEdges[2*i] = temp[i].second.first; inEdges[2*i+1] = temp[i].second.second; #endif if(temp[i].first != temp[i-1].first) { tOffsets[temp[i].first] = i; } }} free(temp); //fill in offsets of degree 0 vertices by taking closest non-zero //offset to the right sequence::scanIBack(tOffsets,tOffsets,n,minF<uintT>(),(uintT)m); {parallel_for(long i=0;i<n;i++){ uintT o = tOffsets[i]; uintT l = ((i == n-1) ? m : tOffsets[i+1])-tOffsets[i]; v[i].setInDegree(l); #ifndef WEIGHTED v[i].setInNeighbors(inEdges+o); #else v[i].setInNeighbors(inEdges+2*o); #endif }} free(tOffsets); Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges,inEdges); t.Stop(); t.PrintTime("Graph reading time(s)", t.Seconds()); std::cout << "Read directed graph. Num Nodes = " << n << " and Num Edges = " << m << "\n"; return graph<vertex>(v,n,m,mem); } else { free(offsets); Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges); t.Stop(); t.PrintTime("Graph reading time(s)", t.Seconds()); std::cout << "Read undirected graph. Num Nodes = " << n << " and Num Edges = " << m << "\n"; return graph<vertex>(v,n,m,mem); } } template <class vertex> graph<vertex> readGraphFromBinary(char* iFile, bool isSymmetric) { char* config = (char*) ".config"; char* adj = (char*) ".adj"; char* idx = (char*) ".idx"; char configFile[strlen(iFile)+strlen(config)+1]; char adjFile[strlen(iFile)+strlen(adj)+1]; char idxFile[strlen(iFile)+strlen(idx)+1]; *configFile = *adjFile = *idxFile = '\0'; strcat(configFile,iFile); strcat(adjFile,iFile); strcat(idxFile,iFile); strcat(configFile,config); strcat(adjFile,adj); strcat(idxFile,idx); ifstream in(configFile, ifstream::in); long n; in >> n; in.close(); ifstream in2(adjFile,ifstream::in | ios::binary); //stored as uints in2.seekg(0, ios::end); long size = in2.tellg(); in2.seekg(0); #ifdef WEIGHTED long m = size/(2*sizeof(uint)); #else long m = size/sizeof(uint); #endif char* s = (char *) malloc(size); in2.read(s,size); in2.close(); uintE* edges = (uintE*) s; ifstream in3(idxFile,ifstream::in | ios::binary); //stored as longs in3.seekg(0, ios::end); size = in3.tellg(); in3.seekg(0); if(n != size/sizeof(intT)) { cout << "File size wrong\n"; abort(); } char* t = (char *) malloc(size); in3.read(t,size); in3.close(); uintT* offsets = (uintT*) t; vertex* v = newA(vertex,n); #ifdef WEIGHTED intE* edgesAndWeights = newA(intE,2*m); {parallel_for(long i=0;i<m;i++) { edgesAndWeights[2*i] = edges[i]; edgesAndWeights[2*i+1] = edges[i+m]; }} //free(edges); #endif {parallel_for(long i=0;i<n;i++) { uintT o = offsets[i]; uintT l = ((i==n-1) ? m : offsets[i+1])-offsets[i]; v[i].setOutDegree(l); #ifndef WEIGHTED v[i].setOutNeighbors((uintE*)edges+o); #else v[i].setOutNeighbors(edgesAndWeights+2*o); #endif }} if(!isSymmetric) { uintT* tOffsets = newA(uintT,n); {parallel_for(long i=0;i<n;i++) tOffsets[i] = INT_T_MAX;} #ifndef WEIGHTED intPair* temp = newA(intPair,m); #else intTriple* temp = newA(intTriple,m); #endif {parallel_for(intT i=0;i<n;i++){ uintT o = offsets[i]; for(uintT j=0;j<v[i].getOutDegree();j++){ #ifndef WEIGHTED temp[o+j] = make_pair(v[i].getOutNeighbor(j),i); #else temp[o+j] = make_pair(v[i].getOutNeighbor(j),make_pair(i,v[i].getOutWeight(j))); #endif } }} free(offsets); #ifndef WEIGHTED #ifndef LOWMEM intSort::iSort(temp,m,n+1,getFirst<uintE>()); #else quickSort(temp,m,pairFirstCmp<uintE>()); #endif #else #ifndef LOWMEM intSort::iSort(temp,m,n+1,getFirst<intPair>()); #else quickSort(temp,m,pairFirstCmp<intPair>()); #endif #endif tOffsets[temp[0].first] = 0; #ifndef WEIGHTED uintE* inEdges = newA(uintE,m); inEdges[0] = temp[0].second; #else intE* inEdges = newA(intE,2*m); inEdges[0] = temp[0].second.first; inEdges[1] = temp[0].second.second; #endif {parallel_for(long i=1;i<m;i++) { #ifndef WEIGHTED inEdges[i] = temp[i].second; #else inEdges[2*i] = temp[i].second.first; inEdges[2*i+1] = temp[i].second.second; #endif if(temp[i].first != temp[i-1].first) { tOffsets[temp[i].first] = i; } }} free(temp); //fill in offsets of degree 0 vertices by taking closest non-zero //offset to the right sequence::scanIBack(tOffsets,tOffsets,n,minF<uintT>(),(uintT)m); {parallel_for(long i=0;i<n;i++){ uintT o = tOffsets[i]; uintT l = ((i == n-1) ? m : tOffsets[i+1])-tOffsets[i]; v[i].setInDegree(l); #ifndef WEIGHTED v[i].setInNeighbors((uintE*)inEdges+o); #else v[i].setInNeighbors((intE*)(inEdges+2*o)); #endif }} free(tOffsets); #ifndef WEIGHTED Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges,inEdges); return graph<vertex>(v,n,m,mem); #else Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edgesAndWeights,inEdges); return graph<vertex>(v,n,m,mem); #endif } free(offsets); #ifndef WEIGHTED Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edges); return graph<vertex>(v,n,m,mem); #else Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(v,n,m,edgesAndWeights); return graph<vertex>(v,n,m,mem); #endif } template <class vertex> graph<vertex> readGraph(char* iFile, bool compressed, bool symmetric, bool binary, bool mmap) { if(binary) return readGraphFromBinary<vertex>(iFile,symmetric); else return readGraphFromFile<vertex>(iFile,symmetric,mmap); } template <class vertex> graph<vertex> readCompressedGraph(char* fname, bool isSymmetric, bool mmap) { char* s; if (mmap) { _seq<char> S = mmapStringFromFile(fname); // Cannot mutate graph unless we copy. char *bytes = newA(char, S.n); parallel_for(size_t i=0; i<S.n; i++) { bytes[i] = S.A[i]; } if (munmap(S.A, S.n) == -1) { perror("munmap"); exit(-1); } s = bytes; } else { ifstream in(fname,ifstream::in |ios::binary); in.seekg(0,ios::end); long size = in.tellg(); in.seekg(0); cout << "size = " << size << endl; s = (char*) malloc(size); in.read(s,size); in.close(); } long* sizes = (long*) s; long n = sizes[0], m = sizes[1], totalSpace = sizes[2]; cout << "n = "<<n<<" m = "<<m<<" totalSpace = "<<totalSpace<<endl; cout << "reading file..."<<endl; uintT* offsets = (uintT*) (s+3*sizeof(long)); long skip = 3*sizeof(long) + (n+1)*sizeof(intT); uintE* Degrees = (uintE*) (s+skip); skip+= n*sizeof(intE); uchar* edges = (uchar*)(s+skip); uintT* inOffsets; uchar* inEdges; uintE* inDegrees; if(!isSymmetric){ skip += totalSpace; uchar* inData = (uchar*)(s + skip); sizes = (long*) inData; long inTotalSpace = sizes[0]; cout << "inTotalSpace = "<<inTotalSpace<<endl; skip += sizeof(long); inOffsets = (uintT*) (s + skip); skip += (n+1)*sizeof(uintT); inDegrees = (uintE*)(s+skip); skip += n*sizeof(uintE); inEdges = (uchar*)(s + skip); } else { inOffsets = offsets; inEdges = edges; inDegrees = Degrees; } vertex *V = newA(vertex,n); parallel_for(long i=0;i<n;i++) { long o = offsets[i]; uintT d = Degrees[i]; V[i].setOutDegree(d); V[i].setOutNeighbors(edges+o); } if(sizeof(vertex) == sizeof(compressedAsymmetricVertex)){ parallel_for(long i=0;i<n;i++) { long o = inOffsets[i]; uintT d = inDegrees[i]; V[i].setInDegree(d); V[i].setInNeighbors(inEdges+o); } } cout << "creating graph..."<<endl; Compressed_Mem<vertex>* mem = new Compressed_Mem<vertex>(V, s); graph<vertex> G(V,n,m,mem); return G; } /* prefix sum used by the preprocess function defined below */ static pvector<uintT> ParallelPrefixSum (const pvector<uintT> &degrees) { const size_t block_size = 1<<20; const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; pvector<uintT> local_sums(num_blocks); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { uintT lsum = 0; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) lsum += degrees[i]; local_sums[block] = lsum; } pvector<uintT> bulk_prefix(num_blocks+1); uintT total = 0; for (size_t block=0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; pvector<uintT> prefix(degrees.size() + 1); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { uintT local_total = bulk_prefix[block]; size_t block_end = std::min((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += degrees[i]; } } prefix[degrees.size()] = bulk_prefix[num_blocks]; return prefix; } /* Preprocess a graph based on outdegrees or indegrees PageRank Optimizations for directed graphs - 1) We do not create a new outNeighbors list (because it pull-only) 2) We only create new out-degrees because PR uses it during computation */ template <class vertex> graph<vertex> preprocessGraph(graph<vertex> GA, bool isSym, bool useOutdeg, pvector<uintE>& new_ids, bool isPageRank = false) { Timer t; t.Start(); auto numVertices = GA.n; auto numEdges = GA.m; vertex *origG = GA.V; typedef std::pair<uintT, uintE> degree_nodeid_t; pvector<degree_nodeid_t> degree_id_pairs(numVertices); if (!isSym) { /* directed graph */ /* STEP I - collect degrees of all vertices */ #pragma omp parallel for for (uintE v = 0; v < numVertices; ++v) { vertex vtx = origG[v]; if (useOutdeg) { degree_id_pairs[v] = std::make_pair(vtx.getOutDegree(), v); } else { degree_id_pairs[v] = std::make_pair(vtx.getInDegree(), v); } } /* Step II - sort the degrees in parallel */ __gnu_parallel::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_nodeid_t>()); /* Step III - make a remap based on the sorted degree list */ pvector<uintT> degrees(numVertices); pvector<uintT> inv_degrees(numVertices); #pragma omp parallel for for (uintE v = 0; v < numVertices; ++v) { degrees[v] = degree_id_pairs[v].first; auto origID = degree_id_pairs[v].second; new_ids[origID] = v; vertex vtx = origG[origID]; if (useOutdeg) { inv_degrees[v] = vtx.getInDegree(); } else { inv_degrees[v] = vtx.getOutDegree(); } } //clearing space from degree pairs pvector<degree_nodeid_t>().swap(degree_id_pairs); /* Step IV - make a new vertex list for the new graph */ pvector<uintT> offsets = ParallelPrefixSum(degrees); pvector<uintT> inv_offsets = ParallelPrefixSum(inv_degrees); //clearing space from degree lists pvector<uintT>().swap(degrees); pvector<uintT>().swap(inv_degrees); #ifndef WEIGHTED uintE* outEdges = newA(uintE, numEdges); uintE* inEdges = newA(uintE, numEdges); #else intE* outEdges = newA(intE, 2 * numEdges); intE* inEdges = newA(intE, 2 * numEdges); #endif vertex* newV = newA(vertex, numVertices); #pragma omp parallel for schedule (dynamic, 1024) for (uintE v = 0; v < numVertices; ++v) { /* note that vertex IDs u and v belong to the space of original vertex IDs */ if (!isPageRank) { //copy out-neighbors auto newID = new_ids[v]; newV[newID].setOutDegree(origG[v].getOutDegree()); #ifndef WEIGHTED if (useOutdeg) newV[newID].setOutNeighbors(outEdges + offsets[newID]); else newV[newID].setOutNeighbors(outEdges + inv_offsets[newID]); #else if (useOutdeg) newV[newID].setOutNeighbors(outEdges + 2 * offsets[newID]); else newV[newID].setOutNeighbors(outEdges + 2 * inv_offsets[newID]); #endif for (uintE u = 0; u < origG[v].getOutDegree(); ++u) { auto origNgh = origG[v].getOutNeighbor(u); newV[newID].setOutNeighbor(u, new_ids[origNgh]); #ifdef WEIGHTED newV[newID].setOutWeight(u, origG[v].getOutWeight(u)); #endif } //copy in-neighbors newV[newID].setInDegree(origG[v].getInDegree()); #ifndef WEIGHTED if (useOutdeg) newV[newID].setInNeighbors(inEdges + inv_offsets[newID]); else newV[newID].setInNeighbors(inEdges + offsets[newID]); #else if (useOutdeg) newV[newID].setInNeighbors(inEdges + 2 * inv_offsets[newID]); else newV[newID].setInNeighbors(inEdges + 2 * offsets[newID]); #endif for (uintE u = 0; u < origG[v].getInDegree(); ++u) { auto origNgh = origG[v].getInNeighbor(u); newV[newID].setInNeighbor(u, new_ids[origNgh]); #ifdef WEIGHTED newV[newID].setInWeight(u, origG[v].getInWeight(u)); #endif } } else { /* PageRank - no need to apply weighted conditionals */ //copy in-neighbors auto newID = new_ids[v]; newV[newID].setInDegree(origG[v].getInDegree()); if (useOutdeg) newV[newID].setInNeighbors(inEdges + inv_offsets[newID]); else newV[newID].setInNeighbors(inEdges + offsets[newID]); for (uintE u = 0; u < origG[v].getInDegree(); ++u) { auto origNgh = origG[v].getInNeighbor(u); newV[newID].setInNeighbor(u, new_ids[origNgh]); } //only set out-degrees newV[newID].setOutDegree(origG[v].getOutDegree()); } } /* Step V - make the new graph */ Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(newV,numVertices,numEdges,outEdges,inEdges); t.Stop(); t.PrintTime("DegSort Time", t.Seconds()); return graph<vertex>(newV,numVertices,numEdges,mem); } else { /* undirected graph */ /* STEP I - collect degrees of all vertices */ #pragma omp parallel for for (uintE v = 0; v < numVertices; ++v) { vertex vtx = origG[v]; degree_id_pairs[v] = std::make_pair(vtx.getOutDegree(), v); } /* Step II - sort the degrees in parallel */ __gnu_parallel::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_nodeid_t>()); /* Step III - make a remap based on the sorted degree list */ pvector<uintT> degrees(numVertices); #pragma omp parallel for for (uintE v = 0; v < numVertices; ++v) { degrees[v] = degree_id_pairs[v].first; auto origID = degree_id_pairs[v].second; new_ids[origID] = v; } //clearing space from degree pairs pvector<degree_nodeid_t>().swap(degree_id_pairs); /* Step IV - make a new vertex list for the new graph */ pvector<uintT> offsets = ParallelPrefixSum(degrees); //clearing space from degrees pvector<uintT>().swap(degrees); #ifndef WEIGHTED uintE* outEdges = newA(uintE, numEdges); #else intE* outEdges = newA(intE, 2 * numEdges); #endif vertex* newV = newA(vertex, numVertices); #pragma omp parallel for schedule (dynamic, 1024) for (uintE v = 0; v < numVertices; ++v) { /* note that vertex IDs u and v belong to the space of original vertex IDs */ //copy neighbors auto newID = new_ids[v]; newV[newID].setOutDegree(origG[v].getOutDegree()); #ifndef WEIGHTED newV[newID].setOutNeighbors(outEdges + offsets[newID]); #else newV[newID].setOutNeighbors(outEdges + 2 * offsets[newID]); #endif for (uintE u = 0; u < origG[v].getOutDegree(); ++u) { auto origNgh = origG[v].getOutNeighbor(u); newV[newID].setOutNeighbor(u, new_ids[origNgh]); #ifdef WEIGHTED newV[newID].setOutWeight(u, origG[v].getOutWeight(u)); #endif } } /* Step V - make the new graph */ Uncompressed_Mem<vertex>* mem = new Uncompressed_Mem<vertex>(newV,numVertices,numEdges,outEdges); t.Stop(); t.PrintTime("DegSort Time", t.Seconds()); return graph<vertex>(newV,numVertices,numEdges,mem); } } /* Determine if the structure of the graph is amenable to benefit from lightweight reordering techniques. The implementation is a simple scan of the entire vertex space to find the cache lines that contain atleast one hub NOTE: we found that reordering is most effective for pull-based apps. Hence, the following function assumes out-degree sorting by default */ template <class vertex> bool computePackingFactor(graph<vertex> GA, bool isSym, bool useOutdeg, size_t elemSz) { Timer t; t.Start(); auto numVertices = GA.n; auto numEdges = GA.m; vertex *origG = GA.V; uintT avgDegree = numEdges / numVertices; size_t cacheBlkSz {64}; size_t vDataSz = numVertices * elemSz; //Total size of vData array in Bytes size_t numCacheBlocks = (vDataSz + (cacheBlkSz-1)) / cacheBlkSz; //number of cache blocks to completely store entire vData size_t vtxPerBlk {0}; size_t hubCacheBlocks {0}; size_t numHubs {0}; double hotSetSize_before {0}; double hotSetSize_after {0}; double packingFactor {0}; if (elemSz < cacheBlkSz) { vtxPerBlk = cacheBlkSz / elemSz; #pragma omp parallel for reduction (+ : hubCacheBlocks, numHubs) for (uintE b = 0; b < numCacheBlocks; ++b) { bool hasHubs {false}; for (uintE v = b * vtxPerBlk; v < (b+1) * vtxPerBlk; ++v) { if (origG[v].getOutDegree() > avgDegree) { hasHubs = true; ++numHubs; } } if (hasHubs) { ++hubCacheBlocks; } } hotSetSize_before = hubCacheBlocks * 64; hotSetSize_after = ((numHubs + (vtxPerBlk-1)) / (vtxPerBlk)) * 64; hotSetSize_after = (((numHubs*elemSz) + (cacheBlkSz-1)) / (cacheBlkSz)) * 64; packingFactor = static_cast<double>(hotSetSize_before) / static_cast<double>(hotSetSize_after); } t.Stop(); t.PrintTime("Packing Factor Time(in s)", t.Seconds()); std::cout << "Number of hubs = " << numHubs << std::endl; std::cout << "HotSet size in MB (before reordering) = " << static_cast<double>(hotSetSize_before) / (1024 * 1024) << std::endl; std::cout << "HotSet size in MB (after reordering) = " << static_cast<double>(hotSetSize_after) / (1024 * 1024) << std::endl; std::cout << "Packing Factor = " << packingFactor << std::endl; bool result = packingFactor > THRESHOLD; return result; }
app.c
/** * Christina Giannoula * cgiannoula: christina.giann@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <dpu.h> #include <dpu_log.h> #include <unistd.h> #include <getopt.h> #include <assert.h> #include <math.h> #include <omp.h> #include "../support/common.h" #include "../support/matrix.h" #include "../support/params.h" #include "../support/partition.h" #include "../support/timer.h" #include "../support/utils.h" // Define the DPU Binary path as DPU_BINARY here. #ifndef DPU_BINARY #define DPU_BINARY "./bin/spmv_dpu" #endif #define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MB /* * Main Structures: * 1. Matrices * 2. Input vector * 3. Output vector * 4. Help structures for data partitioning */ static struct BDBCSRMatrix* A; static struct BDCSRMatrix* B; static struct COOMatrix* C; static val_dt* x; static val_dt* y; static val_dt* z; static struct partition_info_t *part_info; /** * @brief Specific information for each DPU */ struct dpu_info_t { uint32_t block_rows_per_dpu; uint32_t prev_block_rows_dpu; uint32_t cols_per_dpu; uint32_t block_start; uint32_t blocks; uint32_t blocks_pad; uint32_t prev_blocks_dpu; uint32_t ptr_offset; uint32_t merge; }; struct dpu_info_t *dpu_info; /** * @brief find the dpus_per_row_partition * @param factor n to create partitions * @param column_partitions to create vert_partitions * @param horz_partitions to return the 2D partitioning */ void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) { uint32_t dpus_per_vert_partition = n / vert_partitions; *horz_partitions = dpus_per_vert_partition; } /** * @brief initialize input vector * @param pointer to input vector and vector size */ void init_vector(val_dt* vec, uint32_t size) { for(unsigned int i = 0; i < size; ++i) { vec[i] = (val_dt) (i%4+1); } } /** * @brief compute output in the host CPU */ static void spmv_host(val_dt* y, struct BDBCSRMatrix *A, val_dt* x) { uint64_t total_blocks = 0; for (uint32_t c = 0; c < A->vert_partitions; c++) { uint32_t ptr_offset = c * (A->num_block_rows + 1); for(uint64_t n=0; n < A->num_block_rows; n++) { for(uint64_t i=A->browptr[ptr_offset + n]; i<A->browptr[ptr_offset + n+1]; i++){ uint64_t j = A->bcolind[total_blocks + i]; for(uint64_t blr=0; blr < A->row_block_size; blr++){ val_dt acc = 0; for(uint64_t blc=0; blc < A->col_block_size; blc++) { acc += A->bval[(total_blocks + i) * A->col_block_size * A->row_block_size + blr * A->col_block_size + blc] * x[A->vert_tile_widths[c] + j * A->col_block_size + blc]; } y[n * A->row_block_size + blr] += acc; } } } total_blocks += A->blocks_per_vert_partition[c]; } } /** * @brief main of the host application */ int main(int argc, char **argv) { struct Params p = input_params(argc, argv); struct dpu_set_t dpu_set, dpu; uint32_t nr_of_dpus; uint32_t nr_of_ranks; // Allocate DPUs and load binary DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set)); DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL)); DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus)); DPU_ASSERT(dpu_get_nr_ranks(dpu_set, &nr_of_ranks)); printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus); printf("[INFO] Allocated %d Rank(s)\n", nr_of_ranks); printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS); unsigned int i; // Initialize input data C = readCOOMatrix(p.fileName); sortCOOMatrix(C); uint32_t horz_partitions = 0; uint32_t vert_partitions = p.vert_partitions; find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions); printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions); B = coo2bdcsr(C, horz_partitions, vert_partitions); freeCOOMatrix(C); A = bdcsr2bdbcsr(B, p.row_blsize, p.col_blsize); countNNZperBlockBDBCSRMatrix(A); freeBDCSRMatrix(B); // Initialize partition data part_info = partition_init(A, nr_of_dpus, p.max_nranks, NR_TASKLETS); #if FG_TRANS struct dpu_set_t rank; uint32_t each_rank; DPU_RANK_FOREACH(dpu_set, rank, each_rank){ uint32_t nr_dpus_in_rank; DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank)); part_info->active_dpus_per_rank[each_rank+1] = nr_dpus_in_rank; } int sum = 0; for(int i=0; i < p.max_nranks+1; i++) { part_info->accum_dpus_ranks[i] = part_info->active_dpus_per_rank[i] + sum; sum += part_info->active_dpus_per_rank[i]; } #endif // Initialize help data - Padding needed uint32_t ncols_pad = A->ncols + A->max_tile_width + A->col_block_size; uint32_t tile_width_pad = A->num_block_cols * A->col_block_size; uint32_t nrows_pad = A->nrows + A->row_block_size; if (ncols_pad % (8 / byte_dt) != 0) ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt))); if (tile_width_pad % (8 / byte_dt) != 0) tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt))); #if INT8 if (tile_width_pad % 2 != 0) tile_width_pad++; #endif if (nrows_pad % (8 / byte_dt) != 0) nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt))); // Allocate input vector x = (val_dt *) malloc(ncols_pad * sizeof(val_dt)); // Allocate output vector z = (val_dt *) calloc(nrows_pad, sizeof(val_dt)); // Initialize input vector with arbitrary data init_vector(x, ncols_pad); // Load-balance blocks (block-row granularity) across DPUs of the same vertical partition partition_by_block(A, part_info); // Initialize help data dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t)); dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t)); // Max limits for parallel transfers uint64_t max_block_rows_per_dpu = 0; uint64_t max_blocks_per_dpu = 0; // Timer for measurements Timer timer; i = 0; uint32_t acc_blocks = 0; uint32_t total_blocks = 0; DPU_FOREACH(dpu_set, dpu, i) { // Find padding for block rows and non-zero elements needed for CPU-DPU transfers uint32_t tile_horz_indx = i % A->horz_partitions; uint32_t tile_vert_indx = i / A->horz_partitions; uint32_t block_rows_per_dpu = part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx + 1] - part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx]; uint32_t block_rows_per_dpu_pad = part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx + 1] - part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx] + 1; uint32_t prev_block_rows_dpu = part_info->brow_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx]; if (block_rows_per_dpu_pad > max_block_rows_per_dpu) max_block_rows_per_dpu = block_rows_per_dpu_pad; unsigned int blocks, blocks_pad; blocks = A->browptr[tile_vert_indx * (A->num_block_rows + 1) + prev_block_rows_dpu + block_rows_per_dpu] - A->browptr[tile_vert_indx * (A->num_block_rows + 1) + prev_block_rows_dpu]; assert(blocks == part_info->blocks_dpu[i]); if (blocks % 2 != 0) // bcolind blocks_pad = blocks + 1; else blocks_pad = blocks; if (blocks_pad > max_blocks_per_dpu) max_blocks_per_dpu = blocks_pad; // Keep information per DPU dpu_info[i].block_rows_per_dpu = block_rows_per_dpu; dpu_info[i].prev_block_rows_dpu = prev_block_rows_dpu; dpu_info[i].cols_per_dpu = A->vert_tile_widths[tile_vert_indx+1] - A->vert_tile_widths[tile_vert_indx]; dpu_info[i].blocks = blocks; dpu_info[i].blocks_pad = blocks_pad; dpu_info[i].prev_blocks_dpu = total_blocks; dpu_info[i].ptr_offset = tile_vert_indx * (A->num_block_rows + 1) + prev_block_rows_dpu; // Find input arguments per DPU input_args[i].block_rows = block_rows_per_dpu; input_args[i].tcols = tile_width_pad; input_args[i].row_block_size = A->row_block_size; input_args[i].col_block_size = A->col_block_size; //input_args[i].blocks = blocks; #if BLNC_TSKLT_BLOCK // Load-balance blocks across tasklets partition_tsklt_by_block(A, part_info, i, NR_TASKLETS, nr_of_dpus, acc_blocks, prev_block_rows_dpu, block_rows_per_dpu, tile_vert_indx); #else // Load-balance nnzs across tasklets partition_tsklt_by_nnz(A, part_info, i, NR_TASKLETS, nr_of_dpus, acc_blocks, prev_block_rows_dpu, block_rows_per_dpu, tile_vert_indx); #endif uint32_t t; for (t = 0; t < NR_TASKLETS; t++) { // Find input arguments per tasklet input_args[i].start_block_row[t] = part_info->brow_split_tasklet[i * (NR_TASKLETS+2) + t]; input_args[i].end_block_row[t] = part_info->brow_split_tasklet[i * (NR_TASKLETS+2) + (t+1)]; } if (tile_horz_indx == (A->horz_partitions - 1)) acc_blocks += A->blocks_per_vert_partition[tile_vert_indx]; total_blocks += part_info->blocks_dpu[i]; } #if FG_TRANS // Find max number of block rows (subset of elements of the output vector) among DPUs of each rank DPU_RANK_FOREACH(dpu_set, rank, each_rank){ uint32_t max_block_rows_cur_rank = 0; uint32_t max_cols_cur_rank = 0; uint32_t nr_dpus_in_rank; DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank)); uint32_t start_dpu = part_info->accum_dpus_ranks[each_rank]; for (int k = 0; k < nr_dpus_in_rank; k++) { if (start_dpu + k >= nr_of_dpus) break; if (dpu_info[start_dpu + k].block_rows_per_dpu > max_block_rows_cur_rank) max_block_rows_cur_rank = dpu_info[start_dpu + k].block_rows_per_dpu; if (dpu_info[start_dpu + k].cols_per_dpu > max_cols_cur_rank) max_cols_cur_rank = dpu_info[start_dpu + k].cols_per_dpu; } // Padding max_cols_cur_rank = ((max_cols_cur_rank + A->col_block_size - 1) / A->col_block_size) * A->col_block_size; if (max_block_rows_cur_rank % 2 != 0) max_block_rows_cur_rank++; if (max_cols_cur_rank % (8 / byte_dt) != 0) max_cols_cur_rank = max_cols_cur_rank + ((8 / byte_dt) - (max_cols_cur_rank % (8 / byte_dt))); part_info->max_block_rows_per_rank[each_rank] = (uint32_t) max_block_rows_cur_rank; part_info->max_cols_per_rank[each_rank] = (uint32_t) max_cols_cur_rank; } #endif // Initializations for parallel transfers with padding needed if (max_block_rows_per_dpu % 2 != 0) max_block_rows_per_dpu++; if (max_blocks_per_dpu % 2 != 0) max_blocks_per_dpu++; // Re-allocations for padding needed A->browptr = (uint32_t *) realloc(A->browptr, (max_block_rows_per_dpu * nr_of_dpus * sizeof(uint32_t))); A->bcolind = (uint32_t *) realloc(A->bcolind, (max_blocks_per_dpu * nr_of_dpus * sizeof(uint32_t))); A->bval = (val_dt *) realloc(A->bval, (max_blocks_per_dpu * A->row_block_size * A->col_block_size * nr_of_dpus * sizeof(val_dt))); y = (val_dt *) calloc((uint64_t) ((uint64_t) nr_of_dpus * (uint64_t) max_block_rows_per_dpu * A->row_block_size), sizeof(val_dt)); // Count total number of bytes to be transfered in MRAM of DPU unsigned long int total_bytes; total_bytes = ((max_block_rows_per_dpu) * sizeof(uint32_t)) + (max_blocks_per_dpu * sizeof(uint32_t)) + (max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt)) + (tile_width_pad * sizeof(val_dt)) + (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt)); assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size"); // Copy input arguments to DPUs i = 0; DPU_FOREACH(dpu_set, dpu, i) { input_args[i].max_block_rows = max_block_rows_per_dpu; input_args[i].max_blocks = max_blocks_per_dpu; DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT)); // Copy input matrix to DPUs startTimer(&timer, 0); // Copy Browptr i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->browptr + dpu_info[i].ptr_offset)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, (max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt)), max_block_rows_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT)); // Copy Bcolind i = 0; total_blocks = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->bcolind + total_blocks)); total_blocks += part_info->blocks_dpu[i]; } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_block_rows_per_dpu * sizeof(uint32_t), max_blocks_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT)); // Copy Bvalues i = 0; total_blocks = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->bval + ((uint64_t) total_blocks * A->row_block_size * A->col_block_size))); total_blocks += part_info->blocks_dpu[i]; } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_block_rows_per_dpu * sizeof(uint32_t) + max_blocks_per_dpu * sizeof(uint32_t), max_blocks_per_dpu * A->row_block_size * A->col_block_size * sizeof(val_dt), DPU_XFER_DEFAULT)); stopTimer(&timer, 0); // Copy input vector to DPUs startTimer(&timer, 1); #if CG_TRANS // Coarse-grained data transfers in the input vector i = 0; DPU_FOREACH(dpu_set, dpu, i) { uint32_t tile_vert_indx = i / A->horz_partitions; DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx])); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT)); #endif #if FG_TRANS // Fine-grained data transfers in the input vector at rank granularity i = 0; DPU_FOREACH(dpu_set, dpu, i) { uint32_t tile_vert_indx = i / A->horz_partitions; DPU_ASSERT(dpu_prepare_xfer(dpu, x + A->vert_tile_widths[tile_vert_indx])); } i = 0; //struct dpu_set_t rank; DPU_RANK_FOREACH(dpu_set, rank) { DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), part_info->max_cols_per_rank[i] * sizeof(val_dt), DPU_XFER_ASYNC)); i++; } DPU_ASSERT(dpu_sync(dpu_set)); #endif stopTimer(&timer, 1); // Run kernel on DPUs startTimer(&timer, 2); DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS)); stopTimer(&timer, 2); #if LOG // Display DPU Log (default: disabled) DPU_FOREACH(dpu_set, dpu) { DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout)); } #endif // Retrieve results for output vector from DPUs startTimer(&timer, 3); #if CG_TRANS // Coarse-grained data transfers in the output vector i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size))); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_block_rows_per_dpu * A->row_block_size * sizeof(val_dt), DPU_XFER_DEFAULT)); #endif #if FG_TRANS // Fine-grained data transfers in the output vector at rank granularity i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_block_rows_per_dpu * A->row_block_size))); } i = 0; DPU_RANK_FOREACH(dpu_set, rank) { DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, part_info->max_block_rows_per_rank[i] * A->row_block_size * sizeof(val_dt), DPU_XFER_ASYNC)); i++; } DPU_ASSERT(dpu_sync(dpu_set)); #endif stopTimer(&timer, 3); // Merge partial results to the host CPU startTimer(&timer, 4); uint32_t r, c, t, b; for (c = 0; c < A->vert_partitions; c++) { for (r = 0; r < A->horz_partitions; r++) { #pragma omp parallel for num_threads(p.nthreads) shared(A, z, y, max_block_rows_per_dpu, r, c) private(t, b) for (t = 0; t < part_info->brow_split[c * (A->horz_partitions + 1) + r+1] - part_info->brow_split[c * (A->horz_partitions + 1) + r]; t++) { for (b = 0; b < A->row_block_size; b++) { z[(part_info->brow_split[c * (A->horz_partitions + 1) + r] + t) * A->row_block_size + b] += y[(c * A->horz_partitions + r) * max_block_rows_per_dpu * A->row_block_size + t * A->row_block_size + b]; } } } } stopTimer(&timer, 4); // Print timing results printf("\n"); printf("Load Matrix "); printTimer(&timer, 0); printf("Load Input Vector "); printTimer(&timer, 1); printf("Kernel "); printTimer(&timer, 2); printf("Retrieve Output Vector "); printTimer(&timer, 3); printf("Merge Partial Results "); printTimer(&timer, 4); printf("\n\n"); #if CHECK_CORR // Check output startTimer(&timer, 4); val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt)); spmv_host(y_host, A, x); bool status = true; i = 0; for (i = 0; i < A->nrows; i++) { if(y_host[i] != z[i]) { status = false; } } if (status) { printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n"); } else { printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n"); } free(y_host); #endif // Deallocation freeBDBCSRMatrix(A); free(x); free(z); free(y); partition_free(part_info); DPU_ASSERT(dpu_free(dpu_set)); return 0; }
cryptbreaker.c
#define _GNU_SOURCE /* See feature_test_macros(7) */ #include <crypt.h> #include <omp.h> #include <mpi.h> #include <crypt.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define byte unsigned char #define HASH_SIZE 14 #define VOCABULARY " ./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" int world_size = 1; int world_rank = 0; int vocabulary_size = 0; char* data = NULL; unsigned long data_size = 0; char** hash_list = 0; unsigned int hash_list_size = 0; const int root_id = 0; extern inline int count_vocabulary_size_is_ended() { return VOCABULARY[vocabulary_size] == '\0'; } void count_vocabulary_size() { if (!count_vocabulary_size_is_ended()) { vocabulary_size++; count_vocabulary_size(); } } unsigned long read_file_size(char* name) { FILE *file = fopen(name, "rb"); fseek(file, 0, SEEK_END); unsigned long file_size = ftell(file); fclose(file); return file_size; } char* read_all_file(char* name) { FILE *file = fopen(name, "rb"); fseek(file, 0, SEEK_END); unsigned long file_size = ftell(file); fseek(file, 0, SEEK_SET); char *large_buffer = malloc(sizeof(char)*(file_size + 1)); fread(large_buffer, 1, file_size, file); fclose(file); large_buffer[file_size] = 0; return large_buffer; } void read_data() { data = read_all_file("imput"); data_size = read_file_size("imput"); } int is_frontend() { return world_rank == root_id; } void print_rank_status() { char* status = is_frontend() ? "frontend" : "backend"; printf("rank %d of %d (is %s)\n", world_rank, world_size, status); } void print_vocabulary_status() { if (is_frontend()) printf("vocabulary: %d |> '%s'\n", vocabulary_size, VOCABULARY); } void alloc_data() { data = malloc(sizeof(char)*(data_size + 1)); } void broadcast_data() { MPI_Bcast(data, data_size, MPI_CHAR, root_id, MPI_COMM_WORLD); } void broadcast_data_size() { MPI_Bcast(&data_size, 1, MPI_UNSIGNED_LONG, root_id, MPI_COMM_WORLD); } void send_data() { broadcast_data_size(); broadcast_data(); } void recv_data() { broadcast_data_size(); alloc_data(); broadcast_data(); } void calcule_hash_list_size() { hash_list_size = (unsigned int)((data_size + 1) / HASH_SIZE); } void index_hash_list() { #pragma omp parallel for schedule(static) for(int index = 0; index < hash_list_size; index++) { hash_list[index] = &data[HASH_SIZE * index]; hash_list[index][HASH_SIZE - 1] = '\0'; } } void alloc_hash_list() { hash_list = (char**)malloc(sizeof(char*) * hash_list_size); } static int string_comparator(const void* a, const void* b) { return strcmp(*(const char**)a, *(const char**)b); } void sort(char** arr, int n) { qsort(arr, n, sizeof(const char*), string_comparator); } void sort_hash_list() { sort(hash_list, hash_list_size); } void organize_data() { calcule_hash_list_size(); alloc_hash_list(); index_hash_list(); sort_hash_list(); } void break_it(char password[9], int thread_id) { char salt[2] = {'\0', '\0'}; char* hash; struct crypt_data data; data.initialized = 0; for(int index; index < hash_list_size; index++) { char *indexed_hash_list = hash_list[index]; if (indexed_hash_list[0] == '\0' ) continue; if (indexed_hash_list[0] != salt[0] || indexed_hash_list[1] != salt[1]) { salt[0] = indexed_hash_list[0]; salt[1] = indexed_hash_list[1]; hash = crypt_r(password, salt, &data); } if (indexed_hash_list[12] == hash[12] && indexed_hash_list[11] == hash[11] && indexed_hash_list[10] == hash[10] && indexed_hash_list[9] == hash[9] && indexed_hash_list[8] == hash[8] && indexed_hash_list[7] == hash[7] && indexed_hash_list[6] == hash[6] && indexed_hash_list[5] == hash[5] && indexed_hash_list[4] == hash[4] && indexed_hash_list[3] == hash[3] && indexed_hash_list[2] == hash[2]) { indexed_hash_list[0] = '\0'; printf("decrypted (THREAD %d RANK %d) |> %s |> %s\n", thread_id, world_rank, hash, password); } } } void combine_1() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0'}; password[0] = VOCABULARY[n0]; break_it(password, thread_id); } } void combine_2() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; break_it(password, thread_id); } } } void combine_3() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; for (int n2 = 0; n2 < vocabulary_size; n2 ++) { password[2] = VOCABULARY[n2]; break_it(password, thread_id); } } } } void combine_4() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; for (int n2 = 0; n2 < vocabulary_size; n2 ++) { password[2] = VOCABULARY[n2]; for (int n3 = 0; n3 < vocabulary_size; n3 ++) { password[3] = VOCABULARY[n3]; break_it(password, thread_id); } } } } } void combine_5() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0', '\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; for (int n2 = 0; n2 < vocabulary_size; n2 ++) { password[2] = VOCABULARY[n2]; for (int n3 = 0; n3 < vocabulary_size; n3 ++) { password[3] = VOCABULARY[n3]; for (int n4 = 0; n4 < vocabulary_size; n4 ++) { password[4] = VOCABULARY[n4]; break_it(password, thread_id); } } } } } } void combine_6() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0', '\0', '\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; for (int n2 = 0; n2 < vocabulary_size; n2 ++) { password[2] = VOCABULARY[n2]; for (int n3 = 0; n3 < vocabulary_size; n3 ++) { password[3] = VOCABULARY[n3]; for (int n4 = 0; n4 < vocabulary_size; n4 ++) { password[4] = VOCABULARY[n4]; for (int n5 = 0; n5 < vocabulary_size; n5 ++) { password[5] = VOCABULARY[n5]; break_it(password, thread_id); } } } } } } } void combine_7() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; for (int n2 = 0; n2 < vocabulary_size; n2 ++) { password[2] = VOCABULARY[n2]; for (int n3 = 0; n3 < vocabulary_size; n3 ++) { password[3] = VOCABULARY[n3]; for (int n4 = 0; n4 < vocabulary_size; n4 ++) { password[4] = VOCABULARY[n4]; for (int n5 = 0; n5 < vocabulary_size; n5 ++) { password[5] = VOCABULARY[n5]; for (int n6 = 0; n6 < vocabulary_size; n6 ++) { password[6] = VOCABULARY[n6]; break_it(password, thread_id); } } } } } } } } void combine_8() { #pragma omp parallel for for (int n0 = world_rank; n0 < vocabulary_size; n0 += world_size) { int thread_id = omp_get_thread_num(); char password[9] = {'\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0'}; password[0] = VOCABULARY[n0]; for (int n1 = 0; n1 < vocabulary_size; n1 ++) { password[1] = VOCABULARY[n1]; for (int n2 = 0; n2 < vocabulary_size; n2 ++) { password[2] = VOCABULARY[n2]; for (int n3 = 0; n3 < vocabulary_size; n3 ++) { password[3] = VOCABULARY[n3]; for (int n4 = 0; n4 < vocabulary_size; n4 ++) { password[4] = VOCABULARY[n4]; for (int n5 = 0; n5 < vocabulary_size; n5 ++) { password[5] = VOCABULARY[n5]; for (int n6 = 0; n6 < vocabulary_size; n6 ++) { password[6] = VOCABULARY[n6]; for (int n7 = 0; n7 < vocabulary_size; n7 ++) { password[7] = VOCABULARY[n7]; break_it(password, thread_id); } } } } } } } } } void combine() { combine_1(); combine_2(); combine_3(); combine_4(); combine_5(); combine_6(); combine_7(); combine_8(); } void front_end() { read_data(); send_data(); organize_data(); combine(); } void back_end() { recv_data(); organize_data(); combine(); } int main(int argc, char** argv) { MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &world_size); MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); count_vocabulary_size(); print_rank_status(); print_vocabulary_status(); is_frontend() ? front_end() : back_end(); MPI_Finalize(); return 0; }
parallel.c
/* * Copyright (c) 2014 ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Universitaetsstrasse 6, CH-8092 Zurich. Attn: Systems Group. */ #include <bomp_internal.h> /* * These functions implement the PARALLEL construct * * #pragma omp parallel * { * body; * } * * is translated into * void subfunction (void *data) * { * use data; * body; * } * setup data; * GOMP_parallel_start (subfunction, &data, num_threads); * subfunction (&data); * GOMP_parallel_end (); */ void GOMP_parallel_start(void (*fn)(void *), void *data, unsigned nthreads) { assert(g_bomp_state != NULL); /* * TODO: * 1) work out how many threads can be usedfor executing the parallel task * 2) create a new team for solving the task * 3) start the team work */ /* Identify the number of threads that can be spawned and start the processing */ if (!omp_in_parallel()) { g_bomp_state->bomp_threads = omp_get_max_threads(); if (nthreads == 0 || (g_bomp_state->behaviour_dynamic && g_bomp_state->num_threads < nthreads)) { nthreads = g_bomp_state->bomp_threads; } g_bomp_state->backend.start_processing(fn, data, nthreads); } g_bomp_state->nested++; } void GOMP_parallel_end(void) { /* * TODO: * 1) */ assert(g_bomp_state != NULL); if (g_bomp_state->nested == 1) { g_bomp_state->backend.end_processing(); } g_bomp_state->nested--; } void GOMP_parallel(void (*fn)(void *), void *data, unsigned num_threads, unsigned int flags) { /* * TODO: * 1) work out how many threads * 2) allocate and start a new team * 3) call the function * 4) call parallel end */ assert(!"NYI"); fn(data); GOMP_parallel_end(); } #if OMP_VERSION >= OMP_VERSION_40 bool GOMP_cancel(int which, bool do_cancel) { assert(!"NYI"); return 0; } bool GOMP_cancellation_point(int which) { assert(!"NYI"); return 0; } #endif
variational_distance_calculation_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi // Ruben Zorrilla // // #if !defined(KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED ) #define KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED // System includes #include <string> #include <iostream> #include <algorithm> // External includes // Project includes #include "includes/define.h" #include "containers/model.h" #include "includes/kratos_flags.h" #include "elements/distance_calculation_element_simplex.h" #include "linear_solvers/linear_solver.h" #include "processes/process.h" #include "modeler/connectivity_preserve_modeler.h" #include "solving_strategies/builder_and_solvers/residualbased_block_builder_and_solver.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "solving_strategies/strategies/residualbased_linear_strategy.h" #include "utilities/variable_utils.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /**takes a model part full of SIMPLICIAL ELEMENTS (triangles and tetras) and recomputes a signed distance function mantaining as much as possible the position of the zero of the function prior to the call. This is achieved by minimizing the function ( 1 - norm( gradient( distance ) )**2 with the restriction that "distance" is a finite elment function */ template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver > class VariationalDistanceCalculationProcess : public Process { public: KRATOS_DEFINE_LOCAL_FLAG(PERFORM_STEP1); KRATOS_DEFINE_LOCAL_FLAG(DO_EXPENSIVE_CHECKS); KRATOS_DEFINE_LOCAL_FLAG(CALCULATE_EXACT_DISTANCES_TO_PLANE); ///@name Type Definitions ///@{ typedef Scheme< TSparseSpace, TDenseSpace > SchemeType; typedef typename SchemeType::Pointer SchemePointerType; typedef typename BuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver>::Pointer BuilderSolverPointerType; typedef ImplicitSolvingStrategy< TSparseSpace, TDenseSpace, TLinearSolver > SolvingStrategyType; ///@} ///@name Pointer Definitions /// Pointer definition of VariationalDistanceCalculationProcess KRATOS_CLASS_POINTER_DEFINITION(VariationalDistanceCalculationProcess); ///@} ///@name Life Cycle ///@{ /**This process recomputed the distance function mantaining the zero of the existing distance distribution * for this reason the DISTANCE should be initialized to values distinct from zero in at least some portions of the domain * alternatively, the DISTANCE shall be fixed to zero at least on some nodes, and the process will compute a positive distance * respecting that zero * @param base_model_parr - is the model part on the top of which the calculation will be performed * @param plinear_solver - linear solver to be used internally * @max_iterations - maximum number of iteration to be employed in the nonlinear optimization process. * - can also be set to 0 if a (very) rough approximation is enough * * EXAMPLE OF USAGE FROM PYTHON: * class distance_linear_solver_settings: solver_type = "AMGCL" tolerance = 1E-3 max_iteration = 200 scaling = False krylov_type = "CG" smoother_type = "SPAI0" verbosity = 0 import linear_solver_factory distance_linear_solver = linear_solver_factory.ConstructSolver(distance_linear_solver_settings) max_iterations=1 distance_calculator = VariationalDistanceCalculationProcess2D(fluid_model_part, distance_linear_solver, max_iterations) distance_calculator.Execute() */ VariationalDistanceCalculationProcess( ModelPart& rBaseModelPart, typename TLinearSolver::Pointer pLinearSolver, unsigned int MaxIterations = 10, Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse(), std::string AuxPartName = "RedistanceCalculationPart" ) : mDistancePartIsInitialized(false), mMaxIterations(MaxIterations), mrModel( rBaseModelPart.GetModel() ), mrBaseModelPart (rBaseModelPart), mOptions( Options ), mAuxModelPartName( AuxPartName ) { KRATOS_TRY ValidateInput(); // Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex ReGenerateDistanceModelPart(rBaseModelPart); auto p_builder_solver = Kratos::make_shared<ResidualBasedBlockBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> >(pLinearSolver); InitializeSolutionStrategy(p_builder_solver); KRATOS_CATCH("") } /// Constructor with custom Builder And Solver /** To be used in the trilinos version, since the trilinos builder and * solver needs additional data (the EpetraComm). * @param rBaseModelPart Reference ModelPart for distance calculation. * @param pLinearSolver Linear solver for the distance system. * @param MaxIterations Maximum number of non-linear optimization iterations. * @param Options Configuration flags for the procedure. * @param AuxPartName Name to be used for the internal distance calculation ModelPart. */ VariationalDistanceCalculationProcess( ModelPart& rBaseModelPart, typename TLinearSolver::Pointer pLinearSolver, BuilderSolverPointerType pBuilderAndSolver, unsigned int MaxIterations = 10, Flags Options = CALCULATE_EXACT_DISTANCES_TO_PLANE.AsFalse(), std::string AuxPartName = "RedistanceCalculationPart" ) : mDistancePartIsInitialized(false), mMaxIterations(MaxIterations), mrModel( rBaseModelPart.GetModel() ), mrBaseModelPart (rBaseModelPart), mOptions( Options ), mAuxModelPartName( AuxPartName ) { KRATOS_TRY ValidateInput(); // Generate an auxilary model part and populate it by elements of type DistanceCalculationElementSimplex ReGenerateDistanceModelPart(rBaseModelPart); InitializeSolutionStrategy(pBuilderAndSolver); KRATOS_CATCH("") } /// Destructor. ~VariationalDistanceCalculationProcess() override { Clear(); }; ///@} ///@name Operators ///@{ void operator()() { Execute(); } ///@} ///@name Operations ///@{ void Execute() override { KRATOS_TRY; if(mDistancePartIsInitialized == false){ ReGenerateDistanceModelPart(mrBaseModelPart); } ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName ); // TODO: check flag PERFORM_STEP1 // Step1 - solve a poisson problem with a source term which depends on the sign of the existing distance function r_distance_model_part.pGetProcessInfo()->SetValue(FRACTIONAL_STEP,1); // Unfix the distances const int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes()); block_for_each(r_distance_model_part.Nodes(), [](Node<3>& rNode){ double& d = rNode.FastGetSolutionStepValue(DISTANCE); double& fix_flag = rNode.FastGetSolutionStepValue(FLAG_VARIABLE); // Free the DISTANCE values fix_flag = 1.0; rNode.Free(DISTANCE); // Save the distances rNode.SetValue(DISTANCE, d); if(d == 0){ d = 1.0e-15; fix_flag = -1.0; rNode.Fix(DISTANCE); } else { if(d > 0.0){ d = 1.0e15; // Set to a large number, to make sure that that the minimal distance is computed according to CaculateTetrahedraDistances } else { d = -1.0e15; } } }); block_for_each(r_distance_model_part.Elements(), [this](Element& rElem){ array_1d<double,TDim+1> distances; auto& geom = rElem.GetGeometry(); for(unsigned int i=0; i<TDim+1; i++){ distances[i] = geom[i].GetValue(DISTANCE); } const array_1d<double,TDim+1> original_distances = distances; // The element is cut by the interface if(this->IsSplit(distances)){ // Compute the unsigned distance using GeometryUtils if (mOptions.Is(CALCULATE_EXACT_DISTANCES_TO_PLANE)) { GeometryUtils::CalculateExactDistancesToPlane(geom, distances); } else { if(TDim==3){ GeometryUtils::CalculateTetrahedraDistances(geom, distances); } else { GeometryUtils::CalculateTriangleDistances(geom, distances); } } // Assign the sign using the original distance values for(unsigned int i = 0; i < TDim+1; ++i){ if(original_distances[i] < 0){ distances[i] = -distances[i]; } } for(unsigned int i = 0; i < TDim+1; ++i){ double &d = geom[i].FastGetSolutionStepValue(DISTANCE); double &fix_flag = geom[i].FastGetSolutionStepValue(FLAG_VARIABLE); geom[i].SetLock(); if(std::abs(d) > std::abs(distances[i])){ d = distances[i]; } fix_flag = -1.0; geom[i].Fix(DISTANCE); geom[i].UnSetLock(); } } }); // SHALL WE SYNCHRONIZE SOMETHING IN HERE?¿?¿??¿ WE'VE CHANGED THE NODAL DISTANCE VALUES FROM THE ELEMENTS... this->SynchronizeFixity(); this->SynchronizeDistance(); // Compute the maximum and minimum distance for the fixed nodes double max_dist = 0.0; double min_dist = 0.0; for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; if(it_node->IsFixed(DISTANCE)){ const double& d = it_node->FastGetSolutionStepValue(DISTANCE); if(d > max_dist){ max_dist = d; } if(d < min_dist){ min_dist = d; } } } // Synchronize the maximum and minimum distance values const auto &r_communicator = r_distance_model_part.GetCommunicator().GetDataCommunicator(); max_dist = r_communicator.MaxAll(max_dist); min_dist = r_communicator.MinAll(min_dist); // Assign the max dist to all of the non-fixed positive nodes // and the minimum one to the non-fixed negatives block_for_each(r_distance_model_part.Nodes(), [&min_dist, &max_dist](Node<3>& rNode){ if(!rNode.IsFixed(DISTANCE)){ double& d = rNode.FastGetSolutionStepValue(DISTANCE); if(d>0){ d = max_dist; } else { d = min_dist; } } }); mpSolvingStrategy->Solve(); // Step2 - minimize the target residual r_distance_model_part.pGetProcessInfo()->SetValue(FRACTIONAL_STEP,2); for(unsigned int it = 0; it<mMaxIterations; it++){ mpSolvingStrategy->Solve(); } // Unfix the distances VariableUtils().ApplyFixity(DISTANCE, false, r_distance_model_part.Nodes()); KRATOS_CATCH("") } void Clear() override { if(mrModel.HasModelPart( mAuxModelPartName )) mrModel.DeleteModelPart( mAuxModelPartName ); mDistancePartIsInitialized = false; mpSolvingStrategy->Clear(); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "VariationalDistanceCalculationProcess"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << "VariationalDistanceCalculationProcess"; } /// Print object's data. void PrintData(std::ostream& rOStream) const override { } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ bool mDistancePartIsInitialized; unsigned int mMaxIterations; Model& mrModel; ModelPart& mrBaseModelPart; Flags mOptions; std::string mAuxModelPartName; typename SolvingStrategyType::UniquePointer mpSolvingStrategy; ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ void ValidateInput() { const DataCommunicator& r_comm = mrBaseModelPart.GetCommunicator().GetDataCommunicator(); int num_elements = mrBaseModelPart.NumberOfElements(); int num_nodes = mrBaseModelPart.NumberOfNodes(); if (num_elements > 0) { const auto geometry_family = mrBaseModelPart.ElementsBegin()->GetGeometry().GetGeometryFamily(); KRATOS_ERROR_IF( (TDim == 2) && (geometry_family != GeometryData::KratosGeometryFamily::Kratos_Triangle) ) << "In 2D the element type is expected to be a triangle." << std::endl; KRATOS_ERROR_IF( (TDim == 3) && (geometry_family != GeometryData::KratosGeometryFamily::Kratos_Tetrahedra) ) << "In 3D the element type is expected to be a tetrahedron" << std::endl; } KRATOS_ERROR_IF(r_comm.SumAll(num_nodes) == 0) << "The model part has no nodes." << std::endl; KRATOS_ERROR_IF(r_comm.SumAll(num_elements) == 0) << "The model Part has no elements." << std::endl; // Check that required nodal variables are present VariableUtils().CheckVariableExists<Variable<double > >(DISTANCE, mrBaseModelPart.Nodes()); VariableUtils().CheckVariableExists<Variable<double > >(FLAG_VARIABLE, mrBaseModelPart.Nodes()); } void InitializeSolutionStrategy(BuilderSolverPointerType pBuilderAndSolver) { // Generate a linear strategy auto p_scheme = Kratos::make_shared< ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace,TDenseSpace > >(); ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName ); bool CalculateReactions = false; bool ReformDofAtEachIteration = false; bool CalculateNormDxFlag = false; mpSolvingStrategy = Kratos::make_unique<ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver> >( r_distance_model_part, p_scheme, pBuilderAndSolver, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag); // TODO: check flag DO_EXPENSIVE_CHECKS mpSolvingStrategy->Check(); } virtual void ReGenerateDistanceModelPart(ModelPart& rBaseModelPart) { KRATOS_TRY if(mrModel.HasModelPart( mAuxModelPartName )) mrModel.DeleteModelPart( mAuxModelPartName ); // Ensure that the nodes have distance as a DOF VariableUtils().AddDof<Variable<double> >(DISTANCE, rBaseModelPart); // Generate ModelPart& r_distance_model_part = mrModel.CreateModelPart( mAuxModelPartName ); Element::Pointer p_distance_element = Kratos::make_intrusive<DistanceCalculationElementSimplex<TDim> >(); r_distance_model_part.GetNodalSolutionStepVariablesList() = rBaseModelPart.GetNodalSolutionStepVariablesList(); ConnectivityPreserveModeler modeler; modeler.GenerateModelPart(rBaseModelPart, r_distance_model_part, *p_distance_element); // Using the conditions to mark the boundary with the flag boundary // Note that we DO NOT add the conditions to the model part VariableUtils().SetFlag<ModelPart::NodesContainerType>(BOUNDARY, false, r_distance_model_part.Nodes()); // Note that above we have assigned the same geometry. Thus the flag is // set in the distance model part despite we are iterating the base one for (auto it_cond = rBaseModelPart.ConditionsBegin(); it_cond != rBaseModelPart.ConditionsEnd(); ++it_cond){ Geometry< Node<3> >& geom = it_cond->GetGeometry(); for(unsigned int i=0; i<geom.size(); i++){ geom[i].Set(BOUNDARY,true); } } rBaseModelPart.GetCommunicator().SynchronizeOrNodalFlags(BOUNDARY); mDistancePartIsInitialized = true; KRATOS_CATCH("") } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ bool IsSplit(const array_1d<double,TDim+1> &rDistances){ unsigned int positives = 0, negatives = 0; for(unsigned int i = 0; i < TDim+1; ++i){ if(rDistances[i] >= 0){ ++positives; } else { ++negatives; } } if (positives > 0 && negatives > 0){ return true; } return false; } void SynchronizeDistance(){ ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName ); auto &r_communicator = r_distance_model_part.GetCommunicator(); // Only required in the MPI case if(r_communicator.TotalProcesses() != 1){ int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes()); // Set the distance absolute value #pragma omp parallel for for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; it_node->FastGetSolutionStepValue(DISTANCE) = std::abs(it_node->FastGetSolutionStepValue(DISTANCE)); } // Synchronize the unsigned value to minimum r_communicator.SynchronizeCurrentDataToMin(DISTANCE); // Set the distance sign again by retrieving it from the non-historical database #pragma omp parallel for for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; if(it_node->GetValue(DISTANCE) < 0.0){ it_node->FastGetSolutionStepValue(DISTANCE) = -it_node->FastGetSolutionStepValue(DISTANCE); } } } } void SynchronizeFixity(){ ModelPart& r_distance_model_part = mrModel.GetModelPart( mAuxModelPartName ); auto &r_communicator = r_distance_model_part.GetCommunicator(); // Only required in the MPI case if(r_communicator.TotalProcesses() != 1){ int nnodes = static_cast<int>(r_distance_model_part.NumberOfNodes()); // Synchronize the fixity flag variable to minium // (-1.0 means fixed and 1.0 means free) r_communicator.SynchronizeCurrentDataToMin(FLAG_VARIABLE); // Set the fixity according to the synchronized flag #pragma omp parallel for for(int i_node = 0; i_node < nnodes; ++i_node){ auto it_node = r_distance_model_part.NodesBegin() + i_node; const double &r_fix_flag = it_node->FastGetSolutionStepValue(FLAG_VARIABLE); if (r_fix_flag == -1.0){ it_node->Fix(DISTANCE); } } } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. VariationalDistanceCalculationProcess& operator=(VariationalDistanceCalculationProcess const& rOther); /// Copy constructor. //VariationalDistanceCalculationProcess(VariationalDistanceCalculationProcess const& rOther); ///@} }; // Class VariationalDistanceCalculationProcess //avoiding using the macro since this has a template parameter. If there was no template plase use the KRATOS_CREATE_LOCAL_FLAG macro template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::PERFORM_STEP1(Kratos::Flags::Create(0)); template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::DO_EXPENSIVE_CHECKS(Kratos::Flags::Create(1)); template< unsigned int TDim,class TSparseSpace, class TDenseSpace, class TLinearSolver > const Kratos::Flags VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>::CALCULATE_EXACT_DISTANCES_TO_PLANE(Kratos::Flags::Create(2)); ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver> inline std::istream& operator >> (std::istream& rIStream, VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>& rThis); /// output stream function template< unsigned int TDim, class TSparseSpace, class TDenseSpace, class TLinearSolver> inline std::ostream& operator << (std::ostream& rOStream, const VariationalDistanceCalculationProcess<TDim,TSparseSpace,TDenseSpace,TLinearSolver>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} } // namespace Kratos. #endif // KRATOS_VARIATIONAL_DISTANCE_CALCULATION_PROCESS_INCLUDED defined
barrDownFork.c
int main() { int X, Y; X = 0; Y = 0; #pragma omp parallel { int diff; diff = 9; while (X < 10) { // while (1) { X = X + 2; #pragma omp barrier if (diff < 10) { break; } Y = Y + 1; X = X + 1; #pragma omp barrier Y = Y + 2; #pragma omp barrier } X = Y + 10; } } //int main() { // int X, Y; // X = 0; // Y = 0; //#pragma omp parallel // { // int diff; // diff = 9; // while (X < 10) { // X = X + 2; //#pragma omp barrier // if (diff < 10) { // break; // } // Y = Y + 1; // X = X + 1; //#pragma omp barrier // Y = Y + 2; //#pragma omp barrier // } // X = Y + 10; // } //}
Utilities.h
// // Utilities.h // Gauss // // Created by David Levin on 1/31/17. // // #ifndef Utilities_h #define Utilities_h #ifdef GAUSS_OPENMP #include <omp.h> #endif #include "State.h" //Eigen Stuff #include <Eigen/Dense> #include <Eigen/Sparse> #define STRINGIFY(s) #s #define DataDir(s) STRINGIFY(s) #define Vert(f, c) V(F(f), c) //Handle raw vertex, face pointers #define Vec3f(x,y,z) std::array<float,3>({{x,y,z}}).data() #define Vec3d(x,y,z) std::array<double,3>({{x,y,z}}).data() //Random utilities and classes that I might need namespace Gauss { //Assignment operators to handle interoperability of differenty types template<typename Src, typename Dst> class increment { public: inline increment(Src &src, const Dst &dst) { std::cout<<"Default increment operator \n"; } protected: private: }; template<typename Src, typename Dst> class update { public: inline update(Src &src, const Dst &dst) { std::cout<<"Default increment operator \n"; } protected: private: }; //increment specializations //state and Eigen::Vectors //static if using templates taken from //http://baptiste-wicht.com/posts/2015/07/simulate-static_if-with-c11c14.html struct identity { template<typename T> inline T operator()(T&& x) const { return std::forward<T>(x); } }; template<bool Cond> struct statement { template<typename F> inline void then(const F& f){ f(identity()); } template<typename F> inline void else_(const F&){} }; template<> struct statement<false> { template<typename F> inline void then(const F&){} template<typename F> inline void else_(const F& f){ f(identity()); } }; template<bool Cond, typename F> inline statement<Cond> static_if(F const& f){ statement<Cond> if_; if_.then(f); return if_; } //deal with arrays template<typename ...T> class ArrayUint { public: inline ArrayUint(T... vals) : m_val{vals...} { } inline unsigned int operator[](unsigned int index) { assert(index < m_N); return m_val[index]; } constexpr unsigned int size() { return m_N; } protected: unsigned int m_N = sizeof...(T); unsigned int m_val[sizeof...(T)]; }; std::string dataDir(); std::string timeStampString(std::string toStamp); //Make handling parallel stuff easier template<bool IsParallel> struct forLoop { template <typename T, typename Assembler, typename Func> inline forLoop(T &iterateOver, Assembler &assembler, Func &&f) { //iterate for(auto &itr : iterateOver) { f(assembler, itr); } } }; //is parrallel checker template<typename Obj> struct IsParallel { public: constexpr static bool value = false; }; //Direct access into a tuple to run a designated function #if defined(_WIN32) || defined(_WIN64) || defined (WIN32) //Slow version that doesn't break Visual Studio Compiler template<unsigned int CheckIndex> class ApplyTuple { public: template<typename Tuple, typename Func, typename ...Params> inline static decltype(auto) apply(Tuple &tuple, unsigned int index, Func &func, Params ...params) { if(index == CheckIndex) { return func(std::get<CheckIndex>(tuple), params...); } return ApplyTuple<CheckIndex-1>::apply(tuple,index, func, params...); } }; template<> class ApplyTuple<0> { public: template<typename Tuple, typename Func, typename ...Params> inline static decltype(auto) apply(Tuple &tuple, unsigned int index, Func &func, Params ...params) { if(index == 0) { return func(std::get<0>(tuple), params...); } std::cout<<"Error in ApplyTuple, no index found \n"; exit(0); } }; template<typename Tuple, typename Func, typename ...Params> inline decltype(auto) apply(Tuple &tuple, unsigned int index, Func &func, Params ...params){ return ApplyTuple<std::tuple_size<Tuple>::value -1>::apply(tuple, index, func, params...); } #else //O(1) version for gcc and clang template <typename T> struct FunctionProperties { using ReturnType = void; }; template <typename CT, typename RT, typename... Args> struct FunctionProperties<RT(CT::*)(Args...) const > { using ReturnType = RT; }; template <typename CT, typename RT, typename... Args> struct FunctionProperties<RT(CT::*)(Args...)> { using ReturnType = RT; }; // for function pointers template <typename RT, typename... Args> struct FunctionProperties<RT (*)(Args...)> { using ReturnType = RT; }; //virutal function-like behaviour for tuples template<int N, class Tuple, class FunctionWrapper, typename ...Params> inline auto apply_one(Tuple & p, FunctionWrapper &func, Params ...params) { return func(std::get<N>(p), params...); } //define function table template<typename A, typename B, typename C,typename D, typename ...E> class FunctionTable { }; template<std::size_t... Is, typename Tuple, typename FunctionWrapper, typename ReturnType, typename ...Params> class FunctionTable<std::index_sequence<Is...>, Tuple, FunctionWrapper, ReturnType, Params...> { public: static ReturnType (*lookup_table[std::tuple_size<Tuple>::value])(Tuple&, FunctionWrapper &, Params ...); }; template<std::size_t... Is, typename Tuple, typename FunctionWrapper, typename ReturnType, typename ...Params> ReturnType (*FunctionTable<std::index_sequence<Is...>, Tuple, FunctionWrapper, ReturnType, Params...>::lookup_table[std::tuple_size<Tuple>::value])(Tuple&, FunctionWrapper &, Params ... ) = { &apply_one<Is, Tuple, FunctionWrapper, Params...>... }; template<typename Tuple, typename Func, typename ...Params> inline decltype(auto) apply(Tuple &tuple, unsigned int index, Func &func, Params ...params) { using ReturnType = decltype(func(std::get<0>(tuple), params...)); return FunctionTable<std::make_index_sequence<std::tuple_size<typename std::remove_reference<decltype(tuple)>::type>::value>, typename std::remove_reference<decltype(tuple)>::type, Func, ReturnType, Params... >::lookup_table[index](tuple, func, params...); } #endif #ifdef GAUSS_OPENMP //Parallel version template<> struct forLoop<true> { template <typename Func, typename Assembler, typename T> inline forLoop(T &iterateOver, Assembler &assembler, Func &&f) { #pragma omp parallel { #pragma omp for //iterate for(unsigned int ii=0; ii < iterateOver.size(); ++ii) { f(assembler.getImpl()[omp_get_thread_num()], iterateOver[ii]); } } } }; #endif } #endif /* Utilities_h */
fused_rowwise_nbit_conversion_ops.h
#pragma once #include <algorithm> #include <vector> #ifdef _OPENMP #include <omp.h> #endif #include "caffe2/core/context.h" #include "caffe2/core/logging.h" #include "caffe2/core/operator.h" // for param_search_greedy #include "caffe2/operators/fused_rowwise_nbitfake_conversion_ops.h" #include "caffe2/perfkernels/fused_nbit_rowwise_conversion.h" namespace caffe2 { template < int BIT_RATE, typename T, void (*convert)(float* dst, const T* src, size_t N), bool GREEDY = false> class FloatToFusedNBitRowwiseQuantizedOp final : public Operator<CPUContext> { public: FloatToFusedNBitRowwiseQuantizedOp(const OperatorDef& def, Workspace* ws) : Operator<CPUContext>(def, ws) {} ~FloatToFusedNBitRowwiseQuantizedOp() override {} bool RunOnDevice() override { CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness"); const auto& input = Input(DATA_FLOAT); CAFFE_ENFORCE_GT(input.dim(), 0, "Input's dimension must be at least 1"); const auto input_rows = input.size_to_dim(input.dim() - 1); const auto input_columns = input.size(input.dim() - 1); static_assert(8 % BIT_RATE == 0, "BIT_RATE must divide 8"); constexpr int NUM_ELEM_PER_BYTE = 8 / BIT_RATE; CAFFE_ENFORCE_EQ( input.dim(input.dim() - 1) % NUM_ELEM_PER_BYTE, 0, "FloatToFused" + caffe2::to_string(BIT_RATE) + "BitRowwiseQuantizedOp only works for the number of " "columns a multiple of " + caffe2::to_string(NUM_ELEM_PER_BYTE)); // The "fused" representation stores the scale and bias with the // row-wise quantized data in one tensor. // Since we represent the scale and bias in 16-bit float, we'll use the // last 4 bytes of each row for scale (2 bytes) and bias (2 bytes). // | ... quantized data ... | scale | bias | // | number_of_columns | 2B | 2B | auto output_dimensions = input.sizes().vec(); output_dimensions[input.dim() - 1] = static_cast<std::int64_t>( (input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE + 2 * sizeof(at::Half)); auto* output = Output( DATA_FUSED_SCALE_BIAS, output_dimensions, at::dtype<std::uint8_t>()); const auto* input_data = input.template data<T>(); auto* output_data = output->template mutable_data<std::uint8_t>(); if (!GREEDY && std::is_same<T, float>::value) { // fast path CAFFE_ENFORCE( reinterpret_cast<void (*)(float*, const float*, std::size_t)>( convert) == internal::convertfp32fp32, "When T == float, convert must be convertfp32fp32"); FloatToFusedNBitRowwiseQuantizedSBHalf( BIT_RATE, reinterpret_cast<const float*>(input_data), input_rows, input_columns, output_data); } else { const auto output_columns = output->size(output->dim() - 1); #ifdef _OPENMP vector<float> tmp_vec( input_columns * (GREEDY ? omp_get_max_threads() : 1)); #else vector<float> tmp_vec(input_columns); #endif #pragma omp parallel for if (GREEDY) for (int row = 0; row < input_rows; ++row) { float* tmp = tmp_vec.data(); #ifdef _OPENMP if (GREEDY) { tmp = &tmp_vec[omp_get_thread_num() * input_columns]; } #endif convert(tmp, input_data + row * input_columns, input_columns); std::uint8_t* output_row = output_data + row * output_columns; at::Half* output_row_scale = reinterpret_cast<at::Half*>( output_row + (input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE); at::Half* output_row_bias = reinterpret_cast<at::Half*>( output_row + (input_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE + sizeof(at::Half)); float Xmin = *std::min_element(tmp, tmp + input_columns); float Xmax = *std::max_element(tmp, tmp + input_columns); if (GREEDY) { internal::param_search_greedy( tmp, input_columns, 200, 0.16, Xmin, Xmax, BIT_RATE); } // Round Xmin to fp16 to match with dequantization that will use fp16 // for Xmin. Xmin = static_cast<at::Half>(Xmin); const float range = Xmax - Xmin; // Round scale to fp16 to match with dequantization that will use fp16 // for scale. // Set scale to 1.0f for the corner case of Xmax == Xmin . // Any non-zero scale would work because during quantization // (X - Xmin) / scale will be 0 for all X unless scale is 0. at::Half scale = range == 0 ? 1.0f : range / ((1 << BIT_RATE) - 1); float inverse_scale = scale == 0 ? 1.0f : 1.0f / scale; if (scale == 0 || std::isinf(inverse_scale)) { // Corner case handling when Xmax == Xmin // Any scale would work because X - Xmin will be 0 for all X scale = 1.0f; inverse_scale = 1.0f; } *output_row_scale = scale; *output_row_bias = Xmin; for (const auto col : c10::irange(input_columns)) { float X = tmp[col]; std::uint8_t quantized = std::max( 0, std::min<int>( std::lrintf((X - Xmin) * inverse_scale), (1 << BIT_RATE) - 1)); if (col % NUM_ELEM_PER_BYTE == 0) { output_row[col / NUM_ELEM_PER_BYTE] = quantized; } else { output_row[col / NUM_ELEM_PER_BYTE] |= (quantized << ((col % NUM_ELEM_PER_BYTE) * BIT_RATE)); } } } } // GREEDY || !std::is_same<T, float>::value return true; } private: INPUT_TAGS(DATA_FLOAT); OUTPUT_TAGS(DATA_FUSED_SCALE_BIAS); }; template < int BIT_RATE, typename T, void (*convert)(T* dst, const float* src, size_t N)> class FusedNBitRowwiseQuantizedToFloatOp final : public Operator<CPUContext> { public: FusedNBitRowwiseQuantizedToFloatOp(const OperatorDef& def, Workspace* ws) : Operator<CPUContext>(def, ws) {} ~FusedNBitRowwiseQuantizedToFloatOp() override {} bool RunOnDevice() override { CAFFE_ENFORCE(internal::is_little_endian(), "Unsupported endianness"); const auto& input = Input(DATA_FUSED_SCALE_BIAS); CAFFE_ENFORCE_GT(input.dim(), 0, "Input's dimension must be at least 1"); const auto input_rows = input.size_to_dim(input.dim() - 1); const auto input_columns = input.size(input.dim() - 1); static_assert(8 % BIT_RATE == 0, "BIT_RATE must divide 8"); constexpr int NUM_ELEM_PER_BYTE = 8 / BIT_RATE; // The last 4 bytes per row are two fp16 scale and bias. // The rest of input_columns is the number of values in the original row. auto output_dimensions = input.sizes().vec(); output_dimensions[input.dim() - 1] = static_cast<std::int64_t>(input_columns - 2 * sizeof(at::Half)) * NUM_ELEM_PER_BYTE; auto* output = Output(DATA_FLOAT, output_dimensions, at::dtype<T>()); const auto output_columns = output->size(output->dim() - 1); const auto* input_data = input.template data<std::uint8_t>(); T* output_data = output->template mutable_data<T>(); if (std::is_same<T, float>::value) { // fast path CAFFE_ENFORCE( reinterpret_cast<void (*)(float*, const float*, std::size_t)>( convert) == internal::convertfp32fp32, "When T == float, convert must be convertfp32fp32"); FusedNBitRowwiseQuantizedSBHalfToFloat( BIT_RATE, input_data, input_rows, input_columns, reinterpret_cast<float*>(output_data)); } else { std::vector<float> tmp(output_columns); // NOLINTNEXTLINE(clang-diagnostic-sign-compare) for (const auto row : c10::irange(input_rows)) { const std::uint8_t* input_row = input_data + row * input_columns; float scale = *reinterpret_cast<const at::Half*>( input_row + (output_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE); float bias = *reinterpret_cast<const at::Half*>( input_row + (output_columns + NUM_ELEM_PER_BYTE - 1) / NUM_ELEM_PER_BYTE + sizeof(at::Half)); for (const auto col : c10::irange(output_columns)) { std::uint8_t quantized = input_row[col / NUM_ELEM_PER_BYTE]; quantized >>= (col % NUM_ELEM_PER_BYTE) * BIT_RATE; quantized &= (1 << BIT_RATE) - 1; tmp[col] = scale * quantized + bias; } convert(output_data + row * output_columns, tmp.data(), output_columns); } } return true; } private: INPUT_TAGS(DATA_FUSED_SCALE_BIAS); OUTPUT_TAGS(DATA_FLOAT); }; } // namespace caffe2
elementwise_add_arm_func.h
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef ELEMENTWISEADD_OP #pragma once #include "operators/math/element_wise.h" #include "operators/op_param.h" #if defined(__ARM_NEON__) || defined(__ARM_NEON) #include <arm_neon.h> #endif namespace paddle_mobile { namespace operators { template <typename T> inline void ElementwiseAddCompute(const ElementwiseAddParam<CPU> &param) { const framework::Tensor *input_x = param.InputX(); const framework::Tensor *input_y = param.InputY(); framework::Tensor *output = param.Out(); int axis = param.Axis(); math::AddElememtWise<IDENTITY>(input_x, input_y, axis, output); } template <typename Dtype, ActivationType Act> struct AddElememtWiseStruct { void operator()(const Tensor *X, const Tensor *Y, const int Axis, Tensor *Out) {} }; template <ActivationType Act> struct AddElememtWiseStruct<int, Act> { void operator()(const Tensor *input, const Tensor *bias, const int Axis, Tensor *output) { const auto &x_dims = input->dims(); const auto &y_dims = bias->dims(); const int *input_data = input->data<int>(); const int *bias_data = bias->data<int>(); int *output_data = output->mutable_data<int>(); if (x_dims == y_dims) { size_t channels = 1; size_t elementwise_num = 1; for (int i = 0; i < y_dims.size(); ++i) { channels *= y_dims[i]; } #pragma omp parallel for for (int j = 0; j < channels; ++j) { size_t offset = (0 * channels + j) * elementwise_num; const int *input = input_data + offset; const int bias = bias_data[j]; int *output = output_data + offset; for (int k = 0; k < elementwise_num; ++k) { output[k] = math::Active<Act>(input[k] + bias); } } } } }; template class ElementwiseAddKernel<CPU, float>; } // namespace operators } // namespace paddle_mobile #endif
test.c
#include <stdio.h> #include <omp.h> #pragma omp requires unified_shared_memory #include "../utilities/check.h" #include "../utilities/utilities.h" #define TRIALS (1) #define N (992) #define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;}) #define ZERO(X) ZERO_ARRAY(N, X) // // TODO: Add the following runtime calls. // omp_set_num_threads // // All Lock Routines. // int main(void) { // CHECK: Able to use offloading! check_offloading(); int fail; double A[N], B[N], C[N], D[N], E[N]; INIT(); // // Test: omp_get_num_threads() // ZERO(A); TEST({ _Pragma("omp parallel if (0)") A[0] = omp_get_num_threads(); // 1 _Pragma("omp parallel num_threads(128)") { if (omp_get_thread_num() == 3) { A[0] += omp_get_num_threads(); // 128 } } }, VERIFY(0, 1, A[i], 129)); // // Test: omp_get_max_threads() (depends on device type) // ZERO(A); TEST({ _Pragma("omp parallel if(0)") A[0] = omp_get_max_threads(); _Pragma("omp parallel") { if (omp_get_thread_num() == 0) { A[0] += omp_get_max_threads(); // 1 A[1] = omp_get_num_threads(); } } }, if (!omp_is_initial_device()) VERIFY(0, 1, A[i], A[1] + 1)); // // Test: omp_get_num_procs() // ZERO(A); TEST({ _Pragma("omp parallel if(0)") A[0] = omp_get_num_procs(); _Pragma("omp parallel") { if (omp_get_thread_num() == 18) { A[0] += omp_get_num_procs(); A[1] = 2*omp_get_num_threads(); } } }, VERIFY(0, 1, A[i], A[1])); // // Test: omp_in_parallel() // ZERO(A); TEST({ _Pragma("omp distribute dist_schedule(static,1)") for (int i = 0; i < 1; ++i) { _Pragma("atomic write") A[0] = omp_in_parallel(); // 0 } // Serialized parallel _Pragma("omp parallel num_threads(32) if (A[0] == 1)") { _Pragma("atomic update") A[0] += omp_in_parallel(); // 0 } // Parallel execution _Pragma("omp parallel num_threads(32) if (A[0] == 0)") { if (omp_get_thread_num() == 0) { _Pragma("atomic update") A[0] += omp_in_parallel(); // 1 } } }, VERIFY(0, 1, A[i], 1)); // // Test: omp_set/get_dynamic() // ZERO(A); TEST({ _Pragma("omp distribute dist_schedule(static,1)") for (int i = 0; i < 1; ++i) { A[0] = omp_get_dynamic(); // 0 omp_set_dynamic(1); A[0] += omp_get_dynamic(); // 1 } _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { A[0] += omp_get_dynamic(); // 1 omp_set_dynamic(0); // Only for this parallel region. } } _Pragma("omp distribute dist_schedule(static,1)") for (int i = 0; i < 1; ++i) A[0] += omp_get_dynamic(); // 1 }, VERIFY(0, 1, A[i], 3)); // // Test: omp_get_cancellation() // FIXME: Rewrite test case once we have cancellation support. // ZERO(A); TEST({ _Pragma("omp parallel if(0)") A[0] = omp_get_cancellation(); // 0 _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { A[0] += omp_get_cancellation(); // 0 } } }, VERIFY(0, 1, A[i], 0)); // // Test: omp_set/get_nested(). Not used on the device currently. // ZERO(A); TEST({ _Pragma("omp parallel if(0)") { A[0] = omp_get_nested(); // 0 omp_set_nested(0); A[0] += omp_get_nested(); // 0 } _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 18) { A[0] += omp_get_nested(); // 0 omp_set_nested(0); } } _Pragma("omp parallel if(0)") A[0] += omp_get_nested(); // 0 }, VERIFY(0, 1, A[i], 0)); // // Test: omp_set/get_schedule(). // ZERO(A); int result = 2 * (omp_sched_static + omp_sched_dynamic + omp_sched_guided) + omp_sched_static; result += 2 * (1110) + 10; TEST({ omp_sched_t t; int chunk_size; _Pragma("omp distribute dist_schedule(static,1)") for (int i = 0; i < 1; ++i) { t = omp_sched_static; chunk_size = 10; omp_set_schedule(t, chunk_size); t = omp_sched_auto; chunk_size = 0; omp_get_schedule(&t, &chunk_size); A[0] = t + chunk_size; t = omp_sched_dynamic; chunk_size = 100; omp_set_schedule(t, chunk_size); t = omp_sched_auto; chunk_size = 0; omp_get_schedule(&t, &chunk_size); A[0] += t + chunk_size; t = omp_sched_guided; chunk_size = 1000; omp_set_schedule(t, chunk_size); t = omp_sched_auto; chunk_size = 0; omp_get_schedule(&t, &chunk_size); A[0] += t + chunk_size; t = omp_sched_static; chunk_size = 10; omp_set_schedule(t, chunk_size); } _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { omp_sched_t t; int chunk_size; t = omp_sched_static; chunk_size = 10; omp_set_schedule(t, chunk_size); t = omp_sched_auto; chunk_size = 0; omp_get_schedule(&t, &chunk_size); A[0] += t + chunk_size; t = omp_sched_dynamic; chunk_size = 100; omp_set_schedule(t, chunk_size); t = omp_sched_auto; chunk_size = 0; omp_get_schedule(&t, &chunk_size); A[0] += t + chunk_size; t = omp_sched_guided; chunk_size = 1000; omp_set_schedule(t, chunk_size); t = omp_sched_auto; chunk_size = 0; omp_get_schedule(&t, &chunk_size); A[0] += t + chunk_size; } } t = omp_sched_auto; chunk_size = 0; _Pragma("omp parallel if(0)") omp_get_schedule(&t, &chunk_size); // should read 1, 10; A[0] += t + chunk_size; }, VERIFY(0, 1, A[i], result)); // // Test: omp_get_thread_limit() // ZERO(A); TEST({ _Pragma("omp distribute dist_schedule(static,1)") for (int i = 0; i < 1; ++i) A[0] = omp_get_thread_limit(); _Pragma("omp parallel") { if (omp_get_thread_num() == 0) { A[0] += omp_get_thread_limit(); A[1] = 2*omp_get_num_threads(); } } }, VERIFY(0, 1, A[i], A[1])); // // Test: omp_set/get_max_active_levels() // ZERO(A); TEST({ // Our runtime ignores this. _Pragma("omp parallel if(0)") { omp_set_max_active_levels(1); A[0] = omp_get_max_active_levels(); // 1 } _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { A[0] += omp_get_max_active_levels(); // 1 } } }, VERIFY(0, 1, A[i], 2)); // // Test: omp_get_level() // ZERO(A); TEST({ _Pragma("omp distribute dist_schedule(static,1)") for (int i = 0; i < 1; ++i) A[0] = omp_get_level(); // 0 _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { A[0] += omp_get_level(); // 1 } } }, VERIFY(0, 1, A[i], 1)); // // Test: omp_get_ancestor_thread_num() // ZERO(A); TEST({ _Pragma("omp parallel if(0)") A[0] = omp_get_ancestor_thread_num(0); // 0 _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { A[0] += omp_get_ancestor_thread_num(0) + omp_get_ancestor_thread_num(1); // 0 + 18 } } }, VERIFY(0, 1, A[i], 0)); // // Test: omp_get_team_size() // ZERO(A); TEST({ _Pragma("omp parallel if(0)") A[0] = omp_get_team_size(0) + omp_get_team_size(1); // 1 + 1 _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { A[0] += omp_get_team_size(0) + omp_get_team_size(1); // 1 + 19 } } }, if (!omp_is_initial_device()) VERIFY(0, 1, A[i], 22)); // TODO: fix host execution // // Test: omp_get_active_level() // ZERO(A); TEST({ _Pragma("omp parallel if(0)") A[0] = omp_get_active_level(); // 0 _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { if (omp_get_num_threads() == 1) A[0] += 1; else A[0] += omp_get_active_level(); // 1 } } }, VERIFY(0, 1, A[i], 1)); // // Test: omp_in_final() // ZERO(A); TEST({ _Pragma("omp parallel if(0)") A[0] = omp_in_final(); // 1 always returns true. _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { A[0] += omp_in_final(); // 1 always returns true. } } }, VERIFY(0, 1, A[i], omp_is_initial_device() ? 0 : 2)); // // Test: omp_get_proc_bind() // ZERO(A); TEST({ _Pragma("omp parallel if(0)") A[0] = omp_get_proc_bind(); // 1 always returns omp_proc_bind_true. _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { A[0] += omp_get_proc_bind(); // 1 always returns omp_proc_bind_true. } } }, VERIFY(0, 1, A[i], omp_is_initial_device() ? 0 : 2)); #if 0 // // Test: Place routines (linking only). // ZERO(A); TEST({ _Pragma("omp parallel if(0)") { (void) omp_get_num_places(); (void) omp_get_place_num_procs(0); int *ids; omp_get_place_proc_ids(0, ids); (void) omp_get_place_num(); (void) omp_get_partition_num_places(); int *place_nums; omp_get_partition_place_nums(place_nums); } }, VERIFY(0, 1, A[i], 0)); #endif // // Test: omp_set/get_default_device() // ZERO(A); TEST({ _Pragma("omp parallel if(0)") { omp_set_default_device(0); // Not used on device. A[0] = omp_get_default_device(); // 0 always returns 0. } _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { A[0] += omp_get_default_device(); // 0 always returns 0. } } }, VERIFY(0, 1, A[i], 0)); // // Test: omp_get_num_devices(). Undefined on the target. // ZERO(A); TEST({ _Pragma("omp parallel if(0)") A[0] = omp_get_num_devices(); _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { A[1] = omp_get_num_devices(); } } }, VERIFY(0, 1, A[i], A[i] - A[1])); // // Test: omp_get_num_teams(), omp_get_team_num() // FIXME: Start teams region when supported. // ZERO(A); TEST({ A[0] = omp_get_num_teams(); // 1 A[0] += omp_get_team_num(); // 0 _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { A[0] += omp_get_num_teams(); // 1 A[0] += omp_get_team_num(); // 0 } } }, VERIFY(0, 1, A[i], 2)); // // Test: omp_is_initial_device() // ZERO(A); A[1] = omp_is_initial_device(); TEST({ _Pragma("omp parallel if(0)") A[0] = omp_is_initial_device(); // 0 _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 0) { A[0] += omp_is_initial_device(); // 0 } } }, VERIFY(0, 1, A[i], omp_is_initial_device() ? A[1] - A[1] : 2.0)); return 0; #if 0 // // Test: omp_get_initial_device(). Unspecified behavior when // called from device. // ZERO(A); TEST({ _Pragma("omp parallel if(0)") A[0] = omp_get_initial_device(); _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 18) { A[0] -= omp_get_initial_device(); } } }, VERIFY(0, 1, A[i], 0)); #endif #if 0 // // Test: omp_get_max_task_priority(). // TODO: Not used on the gpu at the moment. // ZERO(A); TEST({ _Pragma("omp parallel if(0)") A[0] = omp_get_max_task_priority(); _Pragma("omp parallel num_threads(19)") { if (omp_get_thread_num() == 18) { A[0] -= omp_get_max_task_priority(); } } }, VERIFY(0, 1, A[i], 0)); #endif // // Test: Timing Routines (linking only). // ZERO(A); TEST({ double precision; _Pragma("omp parallel if(0)") precision = omp_get_wtick(); double start; double end; _Pragma("omp parallel if(0)") { start = omp_get_wtime(); end = omp_get_wtime(); } }, VERIFY(0, 1, A[i], 0)); return 0; }
implicit_blender.c
/* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) Blender Foundation * All rights reserved. */ /** \file * \ingroup bph */ #include "implicit.h" #ifdef IMPLICIT_SOLVER_BLENDER # include "MEM_guardedalloc.h" # include "DNA_scene_types.h" # include "DNA_object_types.h" # include "DNA_object_force_types.h" # include "DNA_meshdata_types.h" # include "DNA_texture_types.h" # include "BLI_math.h" # include "BLI_utildefines.h" # include "BKE_cloth.h" # include "BKE_collision.h" # include "BKE_effect.h" # include "BPH_mass_spring.h" # ifdef __GNUC__ # pragma GCC diagnostic ignored "-Wtype-limits" # endif # ifdef _OPENMP # define CLOTH_OPENMP_LIMIT 512 # endif //#define DEBUG_TIME # ifdef DEBUG_TIME # include "PIL_time.h" # endif static float I[3][3] = {{1, 0, 0}, {0, 1, 0}, {0, 0, 1}}; static float ZERO[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; # if 0 # define C99 # ifdef C99 # defineDO_INLINE inline # else # defineDO_INLINE static # endif # endif /* if 0 */ struct Cloth; ////////////////////////////////////////// /* fast vector / matrix library, enhancements are welcome :) -dg */ ///////////////////////////////////////// /* DEFINITIONS */ typedef float lfVector[3]; typedef struct fmatrix3x3 { float m[3][3]; /* 3x3 matrix */ unsigned int c, r; /* column and row number */ /* int pinned; // is this vertex allowed to move? */ float n1, n2, n3; /* three normal vectors for collision constrains */ unsigned int vcount; /* vertex count */ unsigned int scount; /* spring count */ } fmatrix3x3; /////////////////////////// // float[3] vector /////////////////////////// /* simple vector code */ /* STATUS: verified */ DO_INLINE void mul_fvector_S(float to[3], float from[3], float scalar) { to[0] = from[0] * scalar; to[1] = from[1] * scalar; to[2] = from[2] * scalar; } /* simple v^T * v product ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvector(float to[3][3], float vectorA[3], float vectorB[3]) { mul_fvector_S(to[0], vectorB, vectorA[0]); mul_fvector_S(to[1], vectorB, vectorA[1]); mul_fvector_S(to[2], vectorB, vectorA[2]); } /* simple v^T * v product with scalar ("outer product") */ /* STATUS: HAS TO BE verified (*should* work) */ DO_INLINE void mul_fvectorT_fvectorS(float to[3][3], float vectorA[3], float vectorB[3], float aS) { mul_fvectorT_fvector(to, vectorA, vectorB); mul_fvector_S(to[0], to[0], aS); mul_fvector_S(to[1], to[1], aS); mul_fvector_S(to[2], to[2], aS); } # if 0 /* printf vector[3] on console: for debug output */ static void print_fvector(float m3[3]) { printf("%f\n%f\n%f\n\n", m3[0], m3[1], m3[2]); } /////////////////////////// // long float vector float (*)[3] /////////////////////////// /* print long vector on console: for debug output */ DO_INLINE void print_lfvector(float (*fLongVector)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { print_fvector(fLongVector[i]); } } # endif /* create long vector */ DO_INLINE lfVector *create_lfvector(unsigned int verts) { /* TODO: check if memory allocation was successful */ return (lfVector *)MEM_callocN(verts * sizeof(lfVector), "cloth_implicit_alloc_vector"); // return (lfVector *)cloth_aligned_malloc(&MEMORY_BASE, verts * sizeof(lfVector)); } /* delete long vector */ DO_INLINE void del_lfvector(float (*fLongVector)[3]) { if (fLongVector != NULL) { MEM_freeN(fLongVector); // cloth_aligned_free(&MEMORY_BASE, fLongVector); } } /* copy long vector */ DO_INLINE void cp_lfvector(float (*to)[3], float (*from)[3], unsigned int verts) { memcpy(to, from, verts * sizeof(lfVector)); } /* init long vector with float[3] */ DO_INLINE void init_lfvector(float (*fLongVector)[3], float vector[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { copy_v3_v3(fLongVector[i], vector); } } /* zero long vector with float[3] */ DO_INLINE void zero_lfvector(float (*to)[3], unsigned int verts) { memset(to, 0.0f, verts * sizeof(lfVector)); } /* multiply long vector with scalar*/ DO_INLINE void mul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { mul_fvector_S(to[i], fLongVector[i], scalar); } } /* multiply long vector with scalar*/ /* A -= B * float */ DO_INLINE void submul_lfvectorS(float (*to)[3], float (*fLongVector)[3], float scalar, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBMUL(to[i], fLongVector[i], scalar); } } /* dot product for big vector */ DO_INLINE float dot_lfvector(float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { long i = 0; float temp = 0.0; // XXX brecht, disabled this for now (first schedule line was already disabled), // due to non-commutative nature of floating point ops this makes the sim give // different results each time you run it! // schedule(guided, 2) //#pragma omp parallel for reduction(+: temp) if (verts > CLOTH_OPENMP_LIMIT) for (i = 0; i < (long)verts; i++) { temp += dot_v3v3(fLongVectorA[i], fLongVectorB[i]); } return temp; } /* A = B + C --> for big vector */ DO_INLINE void add_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { add_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /* A = B + C * float --> for big vector */ DO_INLINE void add_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B * float + C * float --> for big vector */ DO_INLINE void add_lfvectorS_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float aS, float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECADDSS(to[i], fLongVectorA[i], aS, fLongVectorB[i], bS); } } /* A = B - C * float --> for big vector */ DO_INLINE void sub_lfvector_lfvectorS(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], float bS, unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { VECSUBS(to[i], fLongVectorA[i], fLongVectorB[i], bS); } } /* A = B - C --> for big vector */ DO_INLINE void sub_lfvector_lfvector(float (*to)[3], float (*fLongVectorA)[3], float (*fLongVectorB)[3], unsigned int verts) { unsigned int i = 0; for (i = 0; i < verts; i++) { sub_v3_v3v3(to[i], fLongVectorA[i], fLongVectorB[i]); } } /////////////////////////// // 3x3 matrix /////////////////////////// # if 0 /* printf 3x3 matrix on console: for debug output */ static void print_fmatrix(float m3[3][3]) { printf("%f\t%f\t%f\n", m3[0][0], m3[0][1], m3[0][2]); printf("%f\t%f\t%f\n", m3[1][0], m3[1][1], m3[1][2]); printf("%f\t%f\t%f\n\n", m3[2][0], m3[2][1], m3[2][2]); } static void print_sparse_matrix(fmatrix3x3 *m) { if (m) { unsigned int i; for (i = 0; i < m[0].vcount + m[0].scount; i++) { printf("%d:\n", i); print_fmatrix(m[i].m); } } } # endif # if 0 static void print_lvector(lfVector *v, int numverts) { int i; for (i = 0; i < numverts; ++i) { if (i > 0) printf("\n"); printf("%f,\n", v[i][0]); printf("%f,\n", v[i][1]); printf("%f,\n", v[i][2]); } } # endif # if 0 static void print_bfmatrix(fmatrix3x3 *m) { int tot = m[0].vcount + m[0].scount; int size = m[0].vcount * 3; float *t = MEM_callocN(sizeof(float) * size * size, "bfmatrix"); int q, i, j; for (q = 0; q < tot; ++q) { int k = 3 * m[q].r; int l = 3 * m[q].c; for (j = 0; j < 3; ++j) { for (i = 0; i < 3; ++i) { // if (t[k + i + (l + j) * size] != 0.0f) { // printf("warning: overwriting value at %d, %d\n", m[q].r, m[q].c); // } if (k == l) { t[k + i + (k + j) * size] += m[q].m[i][j]; } else { t[k + i + (l + j) * size] += m[q].m[i][j]; t[l + j + (k + i) * size] += m[q].m[j][i]; } } } } for (j = 0; j < size; ++j) { if (j > 0 && j % 3 == 0) printf("\n"); for (i = 0; i < size; ++i) { if (i > 0 && i % 3 == 0) printf(" "); implicit_print_matrix_elem(t[i + j * size]); } printf("\n"); } MEM_freeN(t); } # endif /* copy 3x3 matrix */ DO_INLINE void cp_fmatrix(float to[3][3], float from[3][3]) { // memcpy(to, from, sizeof (float) * 9); copy_v3_v3(to[0], from[0]); copy_v3_v3(to[1], from[1]); copy_v3_v3(to[2], from[2]); } /* copy 3x3 matrix */ DO_INLINE void initdiag_fmatrixS(float to[3][3], float aS) { cp_fmatrix(to, ZERO); to[0][0] = aS; to[1][1] = aS; to[2][2] = aS; } # if 0 /* calculate determinant of 3x3 matrix */ DO_INLINE float det_fmatrix(float m[3][3]) { return m[0][0] * m[1][1] * m[2][2] + m[1][0] * m[2][1] * m[0][2] + m[0][1] * m[1][2] * m[2][0] - m[0][0] * m[1][2] * m[2][1] - m[0][1] * m[1][0] * m[2][2] - m[2][0] * m[1][1] * m[0][2]; } DO_INLINE void inverse_fmatrix(float to[3][3], float from[3][3]) { unsigned int i, j; float d; if ((d = det_fmatrix(from)) == 0) { printf("can't build inverse"); exit(0); } for (i = 0; i < 3; i++) { for (j = 0; j < 3; j++) { int i1 = (i + 1) % 3; int i2 = (i + 2) % 3; int j1 = (j + 1) % 3; int j2 = (j + 2) % 3; /** Reverse indexes i&j to take transpose. */ to[j][i] = (from[i1][j1] * from[i2][j2] - from[i1][j2] * from[i2][j1]) / d; /** * <pre> * if (i == j) { * to[i][j] = 1.0f / from[i][j]; * } * else { * to[i][j] = 0; * } * </pre> */ } } } # endif /* 3x3 matrix multiplied by a scalar */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_S(float matrix[3][3], float scalar) { mul_fvector_S(matrix[0], matrix[0], scalar); mul_fvector_S(matrix[1], matrix[1], scalar); mul_fvector_S(matrix[2], matrix[2], scalar); } /* a vector multiplied by a 3x3 matrix */ /* STATUS: verified */ DO_INLINE void mul_fvector_fmatrix(float *to, float *from, float matrix[3][3]) { to[0] = matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] = matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] = matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } /* 3x3 matrix multiplied by a vector */ /* STATUS: verified */ DO_INLINE void mul_fmatrix_fvector(float *to, float matrix[3][3], float from[3]) { to[0] = dot_v3v3(matrix[0], from); to[1] = dot_v3v3(matrix[1], from); to[2] = dot_v3v3(matrix[2], from); } /* 3x3 matrix addition with 3x3 matrix */ DO_INLINE void add_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { add_v3_v3v3(to[0], matrixA[0], matrixB[0]); add_v3_v3v3(to[1], matrixA[1], matrixB[1]); add_v3_v3v3(to[2], matrixA[2], matrixB[2]); } /* A -= B*x + C*y (3x3 matrix sub-addition with 3x3 matrix) */ DO_INLINE void subadd_fmatrixS_fmatrixS( float to[3][3], float matrixA[3][3], float aS, float matrixB[3][3], float bS) { VECSUBADDSS(to[0], matrixA[0], aS, matrixB[0], bS); VECSUBADDSS(to[1], matrixA[1], aS, matrixB[1], bS); VECSUBADDSS(to[2], matrixA[2], aS, matrixB[2], bS); } /* A = B - C (3x3 matrix subtraction with 3x3 matrix) */ DO_INLINE void sub_fmatrix_fmatrix(float to[3][3], float matrixA[3][3], float matrixB[3][3]) { sub_v3_v3v3(to[0], matrixA[0], matrixB[0]); sub_v3_v3v3(to[1], matrixA[1], matrixB[1]); sub_v3_v3v3(to[2], matrixA[2], matrixB[2]); } ///////////////////////////////////////////////////////////////// // special functions ///////////////////////////////////////////////////////////////// /* 3x3 matrix multiplied+added by a vector */ /* STATUS: verified */ DO_INLINE void muladd_fmatrix_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += dot_v3v3(matrix[0], from); to[1] += dot_v3v3(matrix[1], from); to[2] += dot_v3v3(matrix[2], from); } DO_INLINE void muladd_fmatrixT_fvector(float to[3], float matrix[3][3], float from[3]) { to[0] += matrix[0][0] * from[0] + matrix[1][0] * from[1] + matrix[2][0] * from[2]; to[1] += matrix[0][1] * from[0] + matrix[1][1] * from[1] + matrix[2][1] * from[2]; to[2] += matrix[0][2] * from[0] + matrix[1][2] * from[1] + matrix[2][2] * from[2]; } BLI_INLINE void outerproduct(float r[3][3], const float a[3], const float b[3]) { mul_v3_v3fl(r[0], a, b[0]); mul_v3_v3fl(r[1], a, b[1]); mul_v3_v3fl(r[2], a, b[2]); } BLI_INLINE void cross_m3_v3m3(float r[3][3], const float v[3], float m[3][3]) { cross_v3_v3v3(r[0], v, m[0]); cross_v3_v3v3(r[1], v, m[1]); cross_v3_v3v3(r[2], v, m[2]); } BLI_INLINE void cross_v3_identity(float r[3][3], const float v[3]) { r[0][0] = 0.0f; r[1][0] = v[2]; r[2][0] = -v[1]; r[0][1] = -v[2]; r[1][1] = 0.0f; r[2][1] = v[0]; r[0][2] = v[1]; r[1][2] = -v[0]; r[2][2] = 0.0f; } BLI_INLINE void madd_m3_m3fl(float r[3][3], float m[3][3], float f) { r[0][0] += m[0][0] * f; r[0][1] += m[0][1] * f; r[0][2] += m[0][2] * f; r[1][0] += m[1][0] * f; r[1][1] += m[1][1] * f; r[1][2] += m[1][2] * f; r[2][0] += m[2][0] * f; r[2][1] += m[2][1] * f; r[2][2] += m[2][2] * f; } ///////////////////////////////////////////////////////////////// /////////////////////////// // SPARSE SYMMETRIC big matrix with 3x3 matrix entries /////////////////////////// /* printf a big matrix on console: for debug output */ # if 0 static void print_bfmatrix(fmatrix3x3 *m3) { unsigned int i = 0; for (i = 0; i < m3[0].vcount + m3[0].scount; i++) { print_fmatrix(m3[i].m); } } # endif BLI_INLINE void init_fmatrix(fmatrix3x3 *matrix, int r, int c) { matrix->r = r; matrix->c = c; } /* create big matrix */ DO_INLINE fmatrix3x3 *create_bfmatrix(unsigned int verts, unsigned int springs) { // TODO: check if memory allocation was successful */ fmatrix3x3 *temp = (fmatrix3x3 *)MEM_callocN(sizeof(fmatrix3x3) * (verts + springs), "cloth_implicit_alloc_matrix"); int i; temp[0].vcount = verts; temp[0].scount = springs; /* vertex part of the matrix is diagonal blocks */ for (i = 0; i < verts; ++i) { init_fmatrix(temp + i, i, i); } return temp; } /* delete big matrix */ DO_INLINE void del_bfmatrix(fmatrix3x3 *matrix) { if (matrix != NULL) { MEM_freeN(matrix); } } /* copy big matrix */ DO_INLINE void cp_bfmatrix(fmatrix3x3 *to, fmatrix3x3 *from) { // TODO bounds checking memcpy(to, from, sizeof(fmatrix3x3) * (from[0].vcount + from[0].scount)); } /* init big matrix */ // slow in parallel DO_INLINE void init_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i; for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { cp_fmatrix(matrix[i].m, m3); } } /* init the diagonal of big matrix */ // slow in parallel DO_INLINE void initdiag_bfmatrix(fmatrix3x3 *matrix, float m3[3][3]) { unsigned int i, j; float tmatrix[3][3] = {{0, 0, 0}, {0, 0, 0}, {0, 0, 0}}; for (i = 0; i < matrix[0].vcount; i++) { cp_fmatrix(matrix[i].m, m3); } for (j = matrix[0].vcount; j < matrix[0].vcount + matrix[0].scount; j++) { cp_fmatrix(matrix[j].m, tmatrix); } } /* SPARSE SYMMETRIC multiply big matrix with long vector*/ /* STATUS: verified */ DO_INLINE void mul_bfmatrix_lfvector(float (*to)[3], fmatrix3x3 *from, lfVector *fLongVector) { unsigned int vcount = from[0].vcount; lfVector *temp = create_lfvector(vcount); zero_lfvector(to, vcount); # pragma omp parallel sections if (vcount > CLOTH_OPENMP_LIMIT) { # pragma omp section { for (unsigned int i = from[0].vcount; i < from[0].vcount + from[0].scount; i++) { /* This is the lower triangle of the sparse matrix, * therefore multiplication occurs with transposed submatrices. */ muladd_fmatrixT_fvector(to[from[i].c], from[i].m, fLongVector[from[i].r]); } } # pragma omp section { for (unsigned int i = 0; i < from[0].vcount + from[0].scount; i++) { muladd_fmatrix_fvector(temp[from[i].r], from[i].m, fLongVector[from[i].c]); } } } add_lfvector_lfvector(to, to, temp, from[0].vcount); del_lfvector(temp); } /* SPARSE SYMMETRIC sub big matrix with big matrix*/ /* A -= B * float + C * float --> for big matrix */ /* VERIFIED */ DO_INLINE void subadd_bfmatrixS_bfmatrixS( fmatrix3x3 *to, fmatrix3x3 *from, float aS, fmatrix3x3 *matrix, float bS) { unsigned int i = 0; /* process diagonal elements */ for (i = 0; i < matrix[0].vcount + matrix[0].scount; i++) { subadd_fmatrixS_fmatrixS(to[i].m, from[i].m, aS, matrix[i].m, bS); } } /////////////////////////////////////////////////////////////////// // simulator start /////////////////////////////////////////////////////////////////// typedef struct Implicit_Data { /* inputs */ fmatrix3x3 *bigI; /* identity (constant) */ fmatrix3x3 *tfm; /* local coordinate transform */ fmatrix3x3 *M; /* masses */ lfVector *F; /* forces */ fmatrix3x3 *dFdV, *dFdX; /* force jacobians */ int num_blocks; /* number of off-diagonal blocks (springs) */ /* motion state data */ lfVector *X, *Xnew; /* positions */ lfVector *V, *Vnew; /* velocities */ /* internal solver data */ lfVector *B; /* B for A*dV = B */ fmatrix3x3 *A; /* A for A*dV = B */ lfVector *dV; /* velocity change (solution of A*dV = B) */ lfVector *z; /* target velocity in constrained directions */ fmatrix3x3 *S; /* filtering matrix for constraints */ fmatrix3x3 *P, *Pinv; /* pre-conditioning matrix */ } Implicit_Data; Implicit_Data *BPH_mass_spring_solver_create(int numverts, int numsprings) { Implicit_Data *id = (Implicit_Data *)MEM_callocN(sizeof(Implicit_Data), "implicit vecmat"); /* process diagonal elements */ id->tfm = create_bfmatrix(numverts, 0); id->A = create_bfmatrix(numverts, numsprings); id->dFdV = create_bfmatrix(numverts, numsprings); id->dFdX = create_bfmatrix(numverts, numsprings); id->S = create_bfmatrix(numverts, 0); id->Pinv = create_bfmatrix(numverts, numsprings); id->P = create_bfmatrix(numverts, numsprings); id->bigI = create_bfmatrix(numverts, numsprings); // TODO 0 springs id->M = create_bfmatrix(numverts, numsprings); id->X = create_lfvector(numverts); id->Xnew = create_lfvector(numverts); id->V = create_lfvector(numverts); id->Vnew = create_lfvector(numverts); id->F = create_lfvector(numverts); id->B = create_lfvector(numverts); id->dV = create_lfvector(numverts); id->z = create_lfvector(numverts); initdiag_bfmatrix(id->bigI, I); return id; } void BPH_mass_spring_solver_free(Implicit_Data *id) { del_bfmatrix(id->tfm); del_bfmatrix(id->A); del_bfmatrix(id->dFdV); del_bfmatrix(id->dFdX); del_bfmatrix(id->S); del_bfmatrix(id->P); del_bfmatrix(id->Pinv); del_bfmatrix(id->bigI); del_bfmatrix(id->M); del_lfvector(id->X); del_lfvector(id->Xnew); del_lfvector(id->V); del_lfvector(id->Vnew); del_lfvector(id->F); del_lfvector(id->B); del_lfvector(id->dV); del_lfvector(id->z); MEM_freeN(id); } /* ==== Transformation from/to root reference frames ==== */ BLI_INLINE void world_to_root_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { copy_v3_v3(r, v); mul_transposed_m3_v3(data->tfm[index].m, r); } BLI_INLINE void root_to_world_v3(Implicit_Data *data, int index, float r[3], const float v[3]) { mul_v3_m3v3(r, data->tfm[index].m, v); } BLI_INLINE void world_to_root_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { float trot[3][3]; copy_m3_m3(trot, data->tfm[index].m); transpose_m3(trot); mul_m3_m3m3(r, trot, m); } BLI_INLINE void root_to_world_m3(Implicit_Data *data, int index, float r[3][3], float m[3][3]) { mul_m3_m3m3(r, data->tfm[index].m, m); } /* ================================ */ DO_INLINE void filter(lfVector *V, fmatrix3x3 *S) { unsigned int i = 0; for (i = 0; i < S[0].vcount; i++) { mul_m3_v3(S[i].m, V[S[i].r]); } } /* this version of the CG algorithm does not work very well with partial constraints * (where S has non-zero elements). */ # if 0 static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.0001f /* , conjgrad_lasterror=0 */ /* UNUSED */; lfVector *q, *d, *tmp, *r; float s, starget, a, s_prev; unsigned int numverts = lA[0].vcount; q = create_lfvector(numverts); d = create_lfvector(numverts); tmp = create_lfvector(numverts); r = create_lfvector(numverts); // zero_lfvector(ldV, CLOTHPARTICLES); filter(ldV, S); add_lfvector_lfvector(ldV, ldV, z, numverts); // r = B - Mul(tmp, A, X); // just use B if X known to be zero cp_lfvector(r, lB, numverts); mul_bfmatrix_lfvector(tmp, lA, ldV); sub_lfvector_lfvector(r, r, tmp, numverts); filter(r, S); cp_lfvector(d, r, numverts); s = dot_lfvector(r, r, numverts); starget = s * sqrtf(conjgrad_epsilon); while (s > starget && conjgrad_loopcount < conjgrad_looplimit) { // Mul(q, A, d); // q = A*d; mul_bfmatrix_lfvector(q, lA, d); filter(q, S); a = s / dot_lfvector(d, q, numverts); // X = X + d*a; add_lfvector_lfvectorS(ldV, ldV, d, a, numverts); // r = r - q*a; sub_lfvector_lfvectorS(r, r, q, a, numverts); s_prev = s; s = dot_lfvector(r, r, numverts); //d = r+d*(s/s_prev); add_lfvector_lfvectorS(d, r, d, (s / s_prev), numverts); filter(d, S); conjgrad_loopcount++; } /* conjgrad_lasterror = s; */ /* UNUSED */ del_lfvector(q); del_lfvector(d); del_lfvector(tmp); del_lfvector(r); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # endif static int cg_filtered(lfVector *ldV, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, ImplicitSolverResult *result) { // Solves for unknown X in equation AX=B unsigned int conjgrad_loopcount = 0, conjgrad_looplimit = 100; float conjgrad_epsilon = 0.01f; unsigned int numverts = lA[0].vcount; lfVector *fB = create_lfvector(numverts); lfVector *AdV = create_lfvector(numverts); lfVector *r = create_lfvector(numverts); lfVector *c = create_lfvector(numverts); lfVector *q = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); float bnorm2, delta_new, delta_old, delta_target, alpha; cp_lfvector(ldV, z, numverts); /* d0 = filter(B)^T * P * filter(B) */ cp_lfvector(fB, lB, numverts); filter(fB, S); bnorm2 = dot_lfvector(fB, fB, numverts); delta_target = conjgrad_epsilon * conjgrad_epsilon * bnorm2; /* r = filter(B - A * dV) */ mul_bfmatrix_lfvector(AdV, lA, ldV); sub_lfvector_lfvector(r, lB, AdV, numverts); filter(r, S); /* c = filter(P^-1 * r) */ cp_lfvector(c, r, numverts); filter(c, S); /* delta = r^T * c */ delta_new = dot_lfvector(r, c, numverts); # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== A ====\n"); print_bfmatrix(lA); printf("==== z ====\n"); print_lvector(z, numverts); printf("==== B ====\n"); print_lvector(lB, numverts); printf("==== S ====\n"); print_bfmatrix(S); # endif while (delta_new > delta_target && conjgrad_loopcount < conjgrad_looplimit) { mul_bfmatrix_lfvector(q, lA, c); filter(q, S); alpha = delta_new / dot_lfvector(c, q, numverts); add_lfvector_lfvectorS(ldV, ldV, c, alpha, numverts); add_lfvector_lfvectorS(r, r, q, -alpha, numverts); /* s = P^-1 * r */ cp_lfvector(s, r, numverts); delta_old = delta_new; delta_new = dot_lfvector(r, s, numverts); add_lfvector_lfvectorS(c, s, c, delta_new / delta_old, numverts); filter(c, S); conjgrad_loopcount++; } # ifdef IMPLICIT_PRINT_SOLVER_INPUT_OUTPUT printf("==== dV ====\n"); print_lvector(ldV, numverts); printf("========\n"); # endif del_lfvector(fB); del_lfvector(AdV); del_lfvector(r); del_lfvector(c); del_lfvector(q); del_lfvector(s); // printf("W/O conjgrad_loopcount: %d\n", conjgrad_loopcount); result->status = conjgrad_loopcount < conjgrad_looplimit ? BPH_SOLVER_SUCCESS : BPH_SOLVER_NO_CONVERGENCE; result->iterations = conjgrad_loopcount; result->error = bnorm2 > 0.0f ? sqrtf(delta_new / bnorm2) : 0.0f; return conjgrad_loopcount < conjgrad_looplimit; // true means we reached desired accuracy in given time - ie stable } # if 0 // block diagonalizer DO_INLINE void BuildPPinv(fmatrix3x3 *lA, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int i = 0; // Take only the diagonal blocks of A // #pragma omp parallel for private(i) if (lA[0].vcount > CLOTH_OPENMP_LIMIT) for (i = 0; i < lA[0].vcount; i++) { // block diagonalizer cp_fmatrix(P[i].m, lA[i].m); inverse_fmatrix(Pinv[i].m, P[i].m); } } # if 0 // version 1.3 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0; float conjgrad_epsilon = 0.0001; // 0.2 is dt for steps=5 lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif while ((deltaNew > delta0) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif // version 1.4 static int cg_filtered_pre(lfVector *dv, fmatrix3x3 *lA, lfVector *lB, lfVector *z, fmatrix3x3 *S, fmatrix3x3 *P, fmatrix3x3 *Pinv, fmatrix3x3 *bigI) { unsigned int numverts = lA[0].vcount, iterations = 0, conjgrad_looplimit = 100; float delta0 = 0, deltaNew = 0, deltaOld = 0, alpha = 0, tol = 0; lfVector *r = create_lfvector(numverts); lfVector *p = create_lfvector(numverts); lfVector *s = create_lfvector(numverts); lfVector *h = create_lfvector(numverts); lfVector *bhat = create_lfvector(numverts); lfVector *btemp = create_lfvector(numverts); BuildPPinv(lA, P, Pinv); initdiag_bfmatrix(bigI, I); sub_bfmatrix_Smatrix(bigI, bigI, S); // x = Sx_0+(I-S)z filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); // b_hat = S(b-A(I-S)z) mul_bfmatrix_lfvector(r, lA, z); mul_bfmatrix_lfvector(bhat, bigI, r); sub_lfvector_lfvector(bhat, lB, bhat, numverts); // r = S(b-Ax) mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); // p = SP^-1r mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); // delta0 = bhat^TP^-1bhat mul_prevfmatrix_lfvector(btemp, Pinv, bhat); delta0 = dot_lfvector(bhat, btemp, numverts); // deltaNew = r^TP deltaNew = dot_lfvector(r, p, numverts); # if 0 filter(dv, S); add_lfvector_lfvector(dv, dv, z, numverts); mul_bfmatrix_lfvector(r, lA, dv); sub_lfvector_lfvector(r, lB, r, numverts); filter(r, S); mul_prevfmatrix_lfvector(p, Pinv, r); filter(p, S); deltaNew = dot_lfvector(r, p, numverts); delta0 = deltaNew * sqrt(conjgrad_epsilon); # endif # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif tol = (0.01 * 0.2); while ((deltaNew > delta0 * tol * tol) && (iterations < conjgrad_looplimit)) { iterations++; mul_bfmatrix_lfvector(s, lA, p); filter(s, S); alpha = deltaNew / dot_lfvector(p, s, numverts); add_lfvector_lfvectorS(dv, dv, p, alpha, numverts); add_lfvector_lfvectorS(r, r, s, -alpha, numverts); mul_prevfmatrix_lfvector(h, Pinv, r); filter(h, S); deltaOld = deltaNew; deltaNew = dot_lfvector(r, h, numverts); add_lfvector_lfvectorS(p, h, p, deltaNew / deltaOld, numverts); filter(p, S); } # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered_pre time: %f\n", (float)(end - start)); # endif del_lfvector(btemp); del_lfvector(bhat); del_lfvector(h); del_lfvector(s); del_lfvector(p); del_lfvector(r); // printf("iterations: %d\n", iterations); return iterations < conjgrad_looplimit; } # endif bool BPH_mass_spring_solve_velocities(Implicit_Data *data, float dt, ImplicitSolverResult *result) { unsigned int numverts = data->dFdV[0].vcount; lfVector *dFdXmV = create_lfvector(numverts); zero_lfvector(data->dV, numverts); cp_bfmatrix(data->A, data->M); subadd_bfmatrixS_bfmatrixS(data->A, data->dFdV, dt, data->dFdX, (dt * dt)); mul_bfmatrix_lfvector(dFdXmV, data->dFdX, data->V); add_lfvectorS_lfvectorS(data->B, data->F, dt, dFdXmV, (dt * dt), numverts); # ifdef DEBUG_TIME double start = PIL_check_seconds_timer(); # endif cg_filtered(data->dV, data->A, data->B, data->z, data->S, result); /* conjugate gradient algorithm to solve Ax=b */ // cg_filtered_pre(id->dV, id->A, id->B, id->z, id->S, id->P, id->Pinv, id->bigI); # ifdef DEBUG_TIME double end = PIL_check_seconds_timer(); printf("cg_filtered calc time: %f\n", (float)(end - start)); # endif // advance velocities add_lfvector_lfvector(data->Vnew, data->V, data->dV, numverts); del_lfvector(dFdXmV); return result->status == BPH_SOLVER_SUCCESS; } bool BPH_mass_spring_solve_positions(Implicit_Data *data, float dt) { int numverts = data->M[0].vcount; // advance positions add_lfvector_lfvectorS(data->Xnew, data->X, data->Vnew, dt, numverts); return true; } void BPH_mass_spring_apply_result(Implicit_Data *data) { int numverts = data->M[0].vcount; cp_lfvector(data->X, data->Xnew, numverts); cp_lfvector(data->V, data->Vnew, numverts); } void BPH_mass_spring_set_vertex_mass(Implicit_Data *data, int index, float mass) { unit_m3(data->M[index].m); mul_m3_fl(data->M[index].m, mass); } void BPH_mass_spring_set_rest_transform(Implicit_Data *data, int index, float tfm[3][3]) { # ifdef CLOTH_ROOT_FRAME copy_m3_m3(data->tfm[index].m, tfm); # else unit_m3(data->tfm[index].m); (void)tfm; # endif } void BPH_mass_spring_set_motion_state(Implicit_Data *data, int index, const float x[3], const float v[3]) { world_to_root_v3(data, index, data->X[index], x); world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_set_position(Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->X[index], x); } void BPH_mass_spring_set_velocity(Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->V[index], v); } void BPH_mass_spring_get_motion_state(struct Implicit_Data *data, int index, float x[3], float v[3]) { if (x) root_to_world_v3(data, index, x, data->X[index]); if (v) root_to_world_v3(data, index, v, data->V[index]); } void BPH_mass_spring_get_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->X[index]); } void BPH_mass_spring_get_new_position(struct Implicit_Data *data, int index, float x[3]) { root_to_world_v3(data, index, x, data->Xnew[index]); } void BPH_mass_spring_set_new_position(struct Implicit_Data *data, int index, const float x[3]) { world_to_root_v3(data, index, data->Xnew[index], x); } void BPH_mass_spring_get_new_velocity(struct Implicit_Data *data, int index, float v[3]) { root_to_world_v3(data, index, v, data->Vnew[index]); } void BPH_mass_spring_set_new_velocity(struct Implicit_Data *data, int index, const float v[3]) { world_to_root_v3(data, index, data->Vnew[index], v); } /* -------------------------------- */ static int BPH_mass_spring_add_block(Implicit_Data *data, int v1, int v2) { int s = data->M[0].vcount + data->num_blocks; /* index from array start */ BLI_assert(s < data->M[0].vcount + data->M[0].scount); ++data->num_blocks; /* tfm and S don't have spring entries (diagonal blocks only) */ init_fmatrix(data->bigI + s, v1, v2); init_fmatrix(data->M + s, v1, v2); init_fmatrix(data->dFdX + s, v1, v2); init_fmatrix(data->dFdV + s, v1, v2); init_fmatrix(data->A + s, v1, v2); init_fmatrix(data->P + s, v1, v2); init_fmatrix(data->Pinv + s, v1, v2); return s; } void BPH_mass_spring_clear_constraints(Implicit_Data *data) { int i, numverts = data->S[0].vcount; for (i = 0; i < numverts; ++i) { unit_m3(data->S[i].m); zero_v3(data->z[i]); } } void BPH_mass_spring_add_constraint_ndof0(Implicit_Data *data, int index, const float dV[3]) { zero_m3(data->S[index].m); world_to_root_v3(data, index, data->z[index], dV); } void BPH_mass_spring_add_constraint_ndof1( Implicit_Data *data, int index, const float c1[3], const float c2[3], const float dV[3]) { float m[3][3], p[3], q[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); world_to_root_v3(data, index, q, c2); mul_fvectorT_fvector(cmat, q, q); sub_m3_m3m3(m, m, cmat); /* XXX not sure but multiplication should work here */ copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_add_constraint_ndof2(Implicit_Data *data, int index, const float c1[3], const float dV[3]) { float m[3][3], p[3], u[3], cmat[3][3]; world_to_root_v3(data, index, p, c1); mul_fvectorT_fvector(cmat, p, p); sub_m3_m3m3(m, I, cmat); copy_m3_m3(data->S[index].m, m); // mul_m3_m3m3(data->S[index].m, data->S[index].m, m); world_to_root_v3(data, index, u, dV); add_v3_v3(data->z[index], u); } void BPH_mass_spring_clear_forces(Implicit_Data *data) { int numverts = data->M[0].vcount; zero_lfvector(data->F, numverts); init_bfmatrix(data->dFdX, ZERO); init_bfmatrix(data->dFdV, ZERO); data->num_blocks = 0; } void BPH_mass_spring_force_reference_frame(Implicit_Data *data, int index, const float acceleration[3], const float omega[3], const float domega_dt[3], float mass) { # ifdef CLOTH_ROOT_FRAME float acc[3], w[3], dwdt[3]; float f[3], dfdx[3][3], dfdv[3][3]; float euler[3], coriolis[3], centrifugal[3], rotvel[3]; float deuler[3][3], dcoriolis[3][3], dcentrifugal[3][3], drotvel[3][3]; world_to_root_v3(data, index, acc, acceleration); world_to_root_v3(data, index, w, omega); world_to_root_v3(data, index, dwdt, domega_dt); cross_v3_v3v3(euler, dwdt, data->X[index]); cross_v3_v3v3(coriolis, w, data->V[index]); mul_v3_fl(coriolis, 2.0f); cross_v3_v3v3(rotvel, w, data->X[index]); cross_v3_v3v3(centrifugal, w, rotvel); sub_v3_v3v3(f, acc, euler); sub_v3_v3(f, coriolis); sub_v3_v3(f, centrifugal); mul_v3_fl(f, mass); /* F = m * a */ cross_v3_identity(deuler, dwdt); cross_v3_identity(dcoriolis, w); mul_m3_fl(dcoriolis, 2.0f); cross_v3_identity(drotvel, w); cross_m3_v3m3(dcentrifugal, w, drotvel); add_m3_m3m3(dfdx, deuler, dcentrifugal); negate_m3(dfdx); mul_m3_fl(dfdx, mass); copy_m3_m3(dfdv, dcoriolis); negate_m3(dfdv); mul_m3_fl(dfdv, mass); add_v3_v3(data->F[index], f); add_m3_m3m3(data->dFdX[index].m, data->dFdX[index].m, dfdx); add_m3_m3m3(data->dFdV[index].m, data->dFdV[index].m, dfdv); # else (void)data; (void)index; (void)acceleration; (void)omega; (void)domega_dt; # endif } void BPH_mass_spring_force_gravity(Implicit_Data *data, int index, float mass, const float g[3]) { /* force = mass * acceleration (in this case: gravity) */ float f[3]; world_to_root_v3(data, index, f, g); mul_v3_fl(f, mass); add_v3_v3(data->F[index], f); } void BPH_mass_spring_force_drag(Implicit_Data *data, float drag) { int i, numverts = data->M[0].vcount; for (i = 0; i < numverts; i++) { float tmp[3][3]; /* NB: uses root space velocity, no need to transform */ madd_v3_v3fl(data->F[i], data->V[i], -drag); copy_m3_m3(tmp, I); mul_m3_fl(tmp, -drag); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tmp); } } void BPH_mass_spring_force_extern( struct Implicit_Data *data, int i, const float f[3], float dfdx[3][3], float dfdv[3][3]) { float tf[3], tdfdx[3][3], tdfdv[3][3]; world_to_root_v3(data, i, tf, f); world_to_root_m3(data, i, tdfdx, dfdx); world_to_root_m3(data, i, tdfdv, dfdv); add_v3_v3(data->F[i], tf); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, tdfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, tdfdv); } static float calc_nor_area_tri(float nor[3], const float v1[3], const float v2[3], const float v3[3]) { float n1[3], n2[3]; sub_v3_v3v3(n1, v1, v2); sub_v3_v3v3(n2, v2, v3); cross_v3_v3v3(nor, n1, n2); return normalize_v3(nor); } /* XXX does not support force jacobians yet, since the effector system does not provide them either */ void BPH_mass_spring_force_face_wind( Implicit_Data *data, int v1, int v2, int v3, const float (*winvec)[3]) { const float effector_scale = 0.02f; float win[3], nor[3], area; float factor; /* calculate face normal and area */ area = calc_nor_area_tri(nor, data->X[v1], data->X[v2], data->X[v3]); factor = effector_scale * area / 3.0f; world_to_root_v3(data, v1, win, winvec[v1]); madd_v3_v3fl(data->F[v1], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v2, win, winvec[v2]); madd_v3_v3fl(data->F[v2], nor, factor * dot_v3v3(win, nor)); world_to_root_v3(data, v3, win, winvec[v3]); madd_v3_v3fl(data->F[v3], nor, factor * dot_v3v3(win, nor)); } static void edge_wind_vertex(const float dir[3], float length, float radius, const float wind[3], float f[3], float UNUSED(dfdx[3][3]), float UNUSED(dfdv[3][3])) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float cos_alpha, sin_alpha, cross_section; float windlen = len_v3(wind); if (windlen == 0.0f) { zero_v3(f); return; } /* angle of wind direction to edge */ cos_alpha = dot_v3v3(wind, dir) / windlen; sin_alpha = sqrtf(1.0f - cos_alpha * cos_alpha); cross_section = radius * ((float)M_PI * radius * sin_alpha + length * cos_alpha); mul_v3_v3fl(f, wind, density * cross_section); } void BPH_mass_spring_force_edge_wind( Implicit_Data *data, int v1, int v2, float radius1, float radius2, const float (*winvec)[3]) { float win[3], dir[3], length; float f[3], dfdx[3][3], dfdv[3][3]; sub_v3_v3v3(dir, data->X[v1], data->X[v2]); length = normalize_v3(dir); world_to_root_v3(data, v1, win, winvec[v1]); edge_wind_vertex(dir, length, radius1, win, f, dfdx, dfdv); add_v3_v3(data->F[v1], f); world_to_root_v3(data, v2, win, winvec[v2]); edge_wind_vertex(dir, length, radius2, win, f, dfdx, dfdv); add_v3_v3(data->F[v2], f); } void BPH_mass_spring_force_vertex_wind(Implicit_Data *data, int v, float UNUSED(radius), const float (*winvec)[3]) { const float density = 0.01f; /* XXX arbitrary value, corresponds to effect of air density */ float wind[3]; float f[3]; world_to_root_v3(data, v, wind, winvec[v]); mul_v3_v3fl(f, wind, density); add_v3_v3(data->F[v], f); } BLI_INLINE void dfdx_spring(float to[3][3], const float dir[3], float length, float L, float k) { // dir is unit length direction, rest is spring's restlength, k is spring constant. // return ( (I-outerprod(dir, dir))*Min(1.0f, rest/length) - I) * -k; outerproduct(to, dir, dir); sub_m3_m3m3(to, I, to); mul_m3_fl(to, (L / length)); sub_m3_m3m3(to, to, I); mul_m3_fl(to, k); } /* unused */ # if 0 BLI_INLINE void dfdx_damp(float to[3][3], const float dir[3], float length, const float vel[3], float rest, float damping) { // inner spring damping vel is the relative velocity of the endpoints. // return (I-outerprod(dir, dir)) * (-damping * -(dot(dir, vel)/Max(length, rest))); mul_fvectorT_fvector(to, dir, dir); sub_fmatrix_fmatrix(to, I, to); mul_fmatrix_S(to, (-damping * -(dot_v3v3(dir, vel) / MAX2(length, rest)))); } # endif BLI_INLINE void dfdv_damp(float to[3][3], const float dir[3], float damping) { // derivative of force wrt velocity outerproduct(to, dir, dir); mul_m3_fl(to, -damping); } BLI_INLINE float fb(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; float xxxx = xxx * x; return (-11.541f * xxxx + 34.193f * xxx - 39.083f * xx + 23.116f * x - 9.713f); } BLI_INLINE float fbderiv(float length, float L) { float x = length / L; float xx = x * x; float xxx = xx * x; return (-46.164f * xxx + 102.579f * xx - 78.166f * x + 23.116f); } BLI_INLINE float fbstar(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) return fbstar_fl; else return tempfb_fl; } // function to calculae bending spring force (taken from Choi & Co) BLI_INLINE float fbstar_jacobi(float length, float L, float kb, float cb) { float tempfb_fl = kb * fb(length, L); float fbstar_fl = cb * (length - L); if (tempfb_fl < fbstar_fl) { return -cb; } else { return -kb * fbderiv(length, L); } } /* calculate elonglation */ BLI_INLINE bool spring_length(Implicit_Data *data, int i, int j, float r_extent[3], float r_dir[3], float *r_length, float r_vel[3]) { sub_v3_v3v3(r_extent, data->X[j], data->X[i]); sub_v3_v3v3(r_vel, data->V[j], data->V[i]); *r_length = len_v3(r_extent); if (*r_length > ALMOST_ZERO) { # if 0 if (length > L) { if ((clmd->sim_parms->flags & CSIMSETT_FLAG_TEARING_ENABLED) && (((length - L) * 100.0f / L) > clmd->sim_parms->maxspringlen)) { // cut spring! s->flags |= CSPRING_FLAG_DEACTIVATE; return false; } } # endif mul_v3_v3fl(r_dir, r_extent, 1.0f / (*r_length)); } else { zero_v3(r_dir); } return true; } BLI_INLINE void apply_spring( Implicit_Data *data, int i, int j, const float f[3], float dfdx[3][3], float dfdv[3][3]) { int block_ij = BPH_mass_spring_add_block(data, i, j); add_v3_v3(data->F[i], f); sub_v3_v3(data->F[j], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfdx); sub_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfdv); sub_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfdv); } bool BPH_mass_spring_force_spring_linear(Implicit_Data *data, int i, int j, float restlen, float stiffness_tension, float damping_tension, float stiffness_compression, float damping_compression, bool resist_compress, bool new_compress, float clamp_force) { float extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; float damping = 0; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); /* This code computes not only the force, but also its derivative. * Zero derivative effectively disables the spring for the implicit solver. * Thus length > restlen makes cloth unconstrained at the start of simulation. */ if ((length >= restlen && length > 0) || resist_compress) { float stretch_force; damping = damping_tension; stretch_force = stiffness_tension * (length - restlen); if (clamp_force > 0.0f && stretch_force > clamp_force) { stretch_force = clamp_force; } mul_v3_v3fl(f, dir, stretch_force); dfdx_spring(dfdx, dir, length, restlen, stiffness_tension); } else if (new_compress) { /* This is based on the Choi and Ko bending model, * which works surprisingly well for compression. */ float kb = stiffness_compression; float cb = kb; /* cb equal to kb seems to work, but a factor can be added if necessary */ damping = damping_compression; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); } else { return false; } madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdv_damp(dfdv, dir, damping); apply_spring(data, i, j, f, dfdx, dfdv); return true; } /* See "Stable but Responsive Cloth" (Choi, Ko 2005) */ bool BPH_mass_spring_force_spring_bending( Implicit_Data *data, int i, int j, float restlen, float kb, float cb) { float extent[3], length, dir[3], vel[3]; // calculate elonglation spring_length(data, i, j, extent, dir, &length, vel); if (length < restlen) { float f[3], dfdx[3][3], dfdv[3][3]; mul_v3_v3fl(f, dir, fbstar(length, restlen, kb, cb)); outerproduct(dfdx, dir, dir); mul_m3_fl(dfdx, fbstar_jacobi(length, restlen, kb, cb)); /* XXX damping not supported */ zero_m3(dfdv); apply_spring(data, i, j, f, dfdx, dfdv); return true; } else { return false; } } BLI_INLINE void poly_avg(lfVector *data, int *inds, int len, float r_avg[3]) { float fact = 1.0f / (float)len; zero_v3(r_avg); for (int i = 0; i < len; i++) { madd_v3_v3fl(r_avg, data[inds[i]], fact); } } BLI_INLINE void poly_norm(lfVector *data, int i, int j, int *inds, int len, float r_dir[3]) { float mid[3]; poly_avg(data, inds, len, mid); normal_tri_v3(r_dir, data[i], data[j], mid); } BLI_INLINE void edge_avg(lfVector *data, int i, int j, float r_avg[3]) { r_avg[0] = (data[i][0] + data[j][0]) * 0.5f; r_avg[1] = (data[i][1] + data[j][1]) * 0.5f; r_avg[2] = (data[i][2] + data[j][2]) * 0.5f; } BLI_INLINE void edge_norm(lfVector *data, int i, int j, float r_dir[3]) { sub_v3_v3v3(r_dir, data[i], data[j]); normalize_v3(r_dir); } BLI_INLINE float bend_angle(float dir_a[3], float dir_b[3], float dir_e[3]) { float cos, sin; float tmp[3]; cos = dot_v3v3(dir_a, dir_b); cross_v3_v3v3(tmp, dir_a, dir_b); sin = dot_v3v3(tmp, dir_e); return atan2f(sin, cos); } BLI_INLINE void spring_angle(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float r_dir_a[3], float r_dir_b[3], float *r_angle, float r_vel_a[3], float r_vel_b[3]) { float dir_e[3], vel_e[3]; poly_norm(data->X, j, i, i_a, len_a, r_dir_a); poly_norm(data->X, i, j, i_b, len_b, r_dir_b); edge_norm(data->X, i, j, dir_e); *r_angle = bend_angle(r_dir_a, r_dir_b, dir_e); poly_avg(data->V, i_a, len_a, r_vel_a); poly_avg(data->V, i_b, len_b, r_vel_b); edge_avg(data->V, i, j, vel_e); sub_v3_v3(r_vel_a, vel_e); sub_v3_v3(r_vel_b, vel_e); } /* Angular springs roughly based on the bending model proposed by Baraff and Witkin in "Large Steps * in Cloth Simulation". */ bool BPH_mass_spring_force_spring_angular(Implicit_Data *data, int i, int j, int *i_a, int *i_b, int len_a, int len_b, float restang, float stiffness, float damping) { float angle, dir_a[3], dir_b[3], vel_a[3], vel_b[3]; float f_a[3], f_b[3], f_e[3]; float force; int x; spring_angle(data, i, j, i_a, i_b, len_a, len_b, dir_a, dir_b, &angle, vel_a, vel_b); /* spring force */ force = stiffness * (angle - restang); /* damping force */ force += -damping * (dot_v3v3(vel_a, dir_a) + dot_v3v3(vel_b, dir_b)); mul_v3_v3fl(f_a, dir_a, force / len_a); mul_v3_v3fl(f_b, dir_b, force / len_b); for (x = 0; x < len_a; x++) { add_v3_v3(data->F[i_a[x]], f_a); } for (x = 0; x < len_b; x++) { add_v3_v3(data->F[i_b[x]], f_b); } mul_v3_v3fl(f_a, dir_a, force * 0.5f); mul_v3_v3fl(f_b, dir_b, force * 0.5f); add_v3_v3v3(f_e, f_a, f_b); sub_v3_v3(data->F[i], f_e); sub_v3_v3(data->F[j], f_e); return true; } /* Jacobian of a direction vector. * Basically the part of the differential orthogonal to the direction, * inversely proportional to the length of the edge. * * dD_ij/dx_i = -dD_ij/dx_j = (D_ij * D_ij^T - I) / len_ij */ BLI_INLINE void spring_grad_dir( Implicit_Data *data, int i, int j, float edge[3], float dir[3], float grad_dir[3][3]) { float length; sub_v3_v3v3(edge, data->X[j], data->X[i]); length = normalize_v3_v3(dir, edge); if (length > ALMOST_ZERO) { outerproduct(grad_dir, dir, dir); sub_m3_m3m3(grad_dir, I, grad_dir); mul_m3_fl(grad_dir, 1.0f / length); } else { zero_m3(grad_dir); } } BLI_INLINE void spring_hairbend_forces(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, const float dx[3], const float dv[3], float r_f[3]) { float edge_ij[3], dir_ij[3]; float edge_jk[3], dir_jk[3]; float vel_ij[3], vel_jk[3], vel_ortho[3]; float f_bend[3], f_damp[3]; float fk[3]; float dist[3]; zero_v3(fk); sub_v3_v3v3(edge_ij, data->X[j], data->X[i]); if (q == i) sub_v3_v3(edge_ij, dx); if (q == j) add_v3_v3(edge_ij, dx); normalize_v3_v3(dir_ij, edge_ij); sub_v3_v3v3(edge_jk, data->X[k], data->X[j]); if (q == j) sub_v3_v3(edge_jk, dx); if (q == k) add_v3_v3(edge_jk, dx); normalize_v3_v3(dir_jk, edge_jk); sub_v3_v3v3(vel_ij, data->V[j], data->V[i]); if (q == i) sub_v3_v3(vel_ij, dv); if (q == j) add_v3_v3(vel_ij, dv); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); if (q == j) sub_v3_v3(vel_jk, dv); if (q == k) add_v3_v3(vel_jk, dv); /* bending force */ sub_v3_v3v3(dist, goal, edge_jk); mul_v3_v3fl(f_bend, dist, stiffness); add_v3_v3(fk, f_bend); /* damping force */ madd_v3_v3v3fl(vel_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); mul_v3_v3fl(f_damp, vel_ortho, damping); sub_v3_v3(fk, f_damp); copy_v3_v3(r_f, fk); } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdx(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdx[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; ++a) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_pos[a], dvec_null[a], f); copy_v3_v3(dfdx[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_neg[a], dvec_null[a], f); sub_v3_v3(dfdx[a], f); for (b = 0; b < 3; ++b) { dfdx[a][b] /= delta; } } } /* Finite Differences method for estimating the jacobian of the force */ BLI_INLINE void spring_hairbend_estimate_dfdv(Implicit_Data *data, int i, int j, int k, const float goal[3], float stiffness, float damping, int q, float dfdv[3][3]) { const float delta = 0.00001f; // TODO find a good heuristic for this float dvec_null[3][3], dvec_pos[3][3], dvec_neg[3][3]; float f[3]; int a, b; zero_m3(dvec_null); unit_m3(dvec_pos); mul_m3_fl(dvec_pos, delta * 0.5f); copy_m3_m3(dvec_neg, dvec_pos); negate_m3(dvec_neg); /* XXX TODO offset targets to account for position dependency */ for (a = 0; a < 3; ++a) { spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_pos[a], f); copy_v3_v3(dfdv[a], f); spring_hairbend_forces( data, i, j, k, goal, stiffness, damping, q, dvec_null[a], dvec_neg[a], f); sub_v3_v3(dfdv[a], f); for (b = 0; b < 3; ++b) { dfdv[a][b] /= delta; } } } /* Angular spring that pulls the vertex toward the local target * See "Artistic Simulation of Curly Hair" (Pixar technical memo #12-03a) */ bool BPH_mass_spring_force_spring_bending_hair(Implicit_Data *data, int i, int j, int k, const float target[3], float stiffness, float damping) { float goal[3]; float fj[3], fk[3]; float dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfj_dvi[3][3], dfj_dvj[3][3], dfk_dvi[3][3], dfk_dvj[3][3], dfk_dvk[3][3]; const float vecnull[3] = {0.0f, 0.0f, 0.0f}; int block_ij = BPH_mass_spring_add_block(data, i, j); int block_jk = BPH_mass_spring_add_block(data, j, k); int block_ik = BPH_mass_spring_add_block(data, i, k); world_to_root_v3(data, j, goal, target); spring_hairbend_forces(data, i, j, k, goal, stiffness, damping, k, vecnull, vecnull, fk); negate_v3_v3(fj, fk); /* counterforce */ spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, i, dfk_dxi); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, j, dfk_dxj); spring_hairbend_estimate_dfdx(data, i, j, k, goal, stiffness, damping, k, dfk_dxk); copy_m3_m3(dfj_dxi, dfk_dxi); negate_m3(dfj_dxi); copy_m3_m3(dfj_dxj, dfk_dxj); negate_m3(dfj_dxj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, i, dfk_dvi); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, j, dfk_dvj); spring_hairbend_estimate_dfdv(data, i, j, k, goal, stiffness, damping, k, dfk_dvk); copy_m3_m3(dfj_dvi, dfk_dvi); negate_m3(dfj_dvi); copy_m3_m3(dfj_dvj, dfk_dvj); negate_m3(dfj_dvj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); add_m3_m3m3(data->dFdV[j].m, data->dFdV[j].m, dfj_dvj); add_m3_m3m3(data->dFdV[k].m, data->dFdV[k].m, dfk_dvk); add_m3_m3m3(data->dFdV[block_ij].m, data->dFdV[block_ij].m, dfj_dvi); add_m3_m3m3(data->dFdV[block_jk].m, data->dFdV[block_jk].m, dfk_dvj); add_m3_m3m3(data->dFdV[block_ik].m, data->dFdV[block_ik].m, dfk_dvi); /* XXX analytical calculation of derivatives below is incorrect. * This proved to be difficult, but for now just using the finite difference method for * estimating the jacobians should be sufficient. */ # if 0 float edge_ij[3], dir_ij[3], grad_dir_ij[3][3]; float edge_jk[3], dir_jk[3], grad_dir_jk[3][3]; float dist[3], vel_jk[3], vel_jk_ortho[3], projvel[3]; float target[3]; float tmp[3][3]; float fi[3], fj[3], fk[3]; float dfi_dxi[3][3], dfj_dxi[3][3], dfj_dxj[3][3], dfk_dxi[3][3], dfk_dxj[3][3], dfk_dxk[3][3]; float dfdvi[3][3]; // TESTING damping = 0.0f; zero_v3(fi); zero_v3(fj); zero_v3(fk); zero_m3(dfi_dxi); zero_m3(dfj_dxi); zero_m3(dfk_dxi); zero_m3(dfk_dxj); zero_m3(dfk_dxk); /* jacobian of direction vectors */ spring_grad_dir(data, i, j, edge_ij, dir_ij, grad_dir_ij); spring_grad_dir(data, j, k, edge_jk, dir_jk, grad_dir_jk); sub_v3_v3v3(vel_jk, data->V[k], data->V[j]); /* bending force */ mul_v3_v3fl(target, dir_ij, restlen); sub_v3_v3v3(dist, target, edge_jk); mul_v3_v3fl(fk, dist, stiffness); /* damping force */ madd_v3_v3v3fl(vel_jk_ortho, vel_jk, dir_jk, -dot_v3v3(vel_jk, dir_jk)); madd_v3_v3fl(fk, vel_jk_ortho, damping); /* XXX this only holds true as long as we assume straight rest shape! * eventually will become a bit more involved since the opposite segment * gets its own target, under condition of having equal torque on both sides. */ copy_v3_v3(fi, fk); /* counterforce on the middle point */ sub_v3_v3(fj, fi); sub_v3_v3(fj, fk); /* === derivatives === */ madd_m3_m3fl(dfk_dxi, grad_dir_ij, stiffness * restlen); madd_m3_m3fl(dfk_dxj, grad_dir_ij, -stiffness * restlen); madd_m3_m3fl(dfk_dxj, I, stiffness); madd_m3_m3fl(dfk_dxk, I, -stiffness); copy_m3_m3(dfi_dxi, dfk_dxk); negate_m3(dfi_dxi); /* dfj_dfi == dfi_dfj due to symmetry, * dfi_dfj == dfk_dfj due to fi == fk * XXX see comment above on future bent rest shapes */ copy_m3_m3(dfj_dxi, dfk_dxj); /* dfj_dxj == -(dfi_dxj + dfk_dxj) due to fj == -(fi + fk) */ sub_m3_m3m3(dfj_dxj, dfj_dxj, dfj_dxi); sub_m3_m3m3(dfj_dxj, dfj_dxj, dfk_dxj); /* add forces and jacobians to the solver data */ add_v3_v3(data->F[i], fi); add_v3_v3(data->F[j], fj); add_v3_v3(data->F[k], fk); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfi_dxi); add_m3_m3m3(data->dFdX[j].m, data->dFdX[j].m, dfj_dxj); add_m3_m3m3(data->dFdX[k].m, data->dFdX[k].m, dfk_dxk); add_m3_m3m3(data->dFdX[block_ij].m, data->dFdX[block_ij].m, dfj_dxi); add_m3_m3m3(data->dFdX[block_jk].m, data->dFdX[block_jk].m, dfk_dxj); add_m3_m3m3(data->dFdX[block_ik].m, data->dFdX[block_ik].m, dfk_dxi); # endif return true; } bool BPH_mass_spring_force_spring_goal(Implicit_Data *data, int i, const float goal_x[3], const float goal_v[3], float stiffness, float damping) { float root_goal_x[3], root_goal_v[3], extent[3], length, dir[3], vel[3]; float f[3], dfdx[3][3], dfdv[3][3]; /* goal is in world space */ world_to_root_v3(data, i, root_goal_x, goal_x); world_to_root_v3(data, i, root_goal_v, goal_v); sub_v3_v3v3(extent, root_goal_x, data->X[i]); sub_v3_v3v3(vel, root_goal_v, data->V[i]); length = normalize_v3_v3(dir, extent); if (length > ALMOST_ZERO) { mul_v3_v3fl(f, dir, stiffness * length); // Ascher & Boxman, p.21: Damping only during elonglation // something wrong with it... madd_v3_v3fl(f, dir, damping * dot_v3v3(vel, dir)); dfdx_spring(dfdx, dir, length, 0.0f, stiffness); dfdv_damp(dfdv, dir, damping); add_v3_v3(data->F[i], f); add_m3_m3m3(data->dFdX[i].m, data->dFdX[i].m, dfdx); add_m3_m3m3(data->dFdV[i].m, data->dFdV[i].m, dfdv); return true; } else { return false; } } #endif /* IMPLICIT_SOLVER_BLENDER */
aggregate_ops.h
// // @author raver119@gmail.com // #ifndef LIBND4J_AGGREGATE_OPS_H #define LIBND4J_AGGREGATE_OPS_H #include <ops/ops.h> #include <templatemath.h> #define HS_MAX_EXP 6.0f #ifdef __CUDACC__ #define aggregate_def __device__ inline static #else #include <ops/gemm.h> #define aggregate_def inline static #endif /* * * * Aggregate Ops are special things suited for CUDA mostly. They are meant to be executed within single block ONLY. * So, when batched, they should provide proper parallelism levels on poorly parallel tasks otherwise. * * On CPU aggregate ops are trying to minimize OpenMP multi-threading use, only SIMD is enforced * * */ namespace aggregateOps { template<typename T> class GEMM { public: #ifdef __CUDACC__ aggregate_def void executeAggregateCuda(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { // no-op } #endif #ifndef __CUDACC__ static CBLAS_ORDER convertOrder(int from) { switch(from) { //'c' case 99: return CblasRowMajor; //'C' case 67: return CblasRowMajor; //'f' case 102: return CblasColMajor; //'F' case 70: return CblasColMajor; default: return CblasColMajor; } } static CBLAS_TRANSPOSE convertTranspose(int from) { switch(from) { //'t' case 116: return CblasTrans; //'T' case 84: return CblasTrans; //'n' case 110: return CblasNoTrans; //'N' case 78: return CblasNoTrans; //'c' case 99: return CblasConjTrans; //'C' case 67: return CblasConjTrans; default: return CblasNoTrans; } } #endif #ifndef __CUDACC__ aggregate_def void executeAggregate(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { int M = indexArguments[0]; int N = indexArguments[1]; int K = indexArguments[2]; int lda = indexArguments[3]; int ldb = indexArguments[4]; int ldc = indexArguments[5]; int TransA = indexArguments[6]; int TransB = indexArguments[7]; int Order = indexArguments[8]; T alpha = realArguments[0]; T beta = realArguments[1]; T *A = arguments[0]; T *B = arguments[1]; T *C = arguments[2]; nd4j::blas::GEMM<T>::op(convertOrder(Order), convertTranspose(TransA), convertTranspose(TransB),M,N,K,(T) alpha,A,lda,B,ldb,(T) beta,C,ldc); } #else aggregate_def void executeAggregate(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { // stub for nvcc } #endif }; /** * We don't include this class into ops directly, since it won't be ever used directly, * Only as part of SkipGram or CBOW */ template<typename T> class HierarchicSoftmax { public: aggregate_def void executeAggregate(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { int vectorLength = indexArguments[0]; int expLength = indexArguments[1]; int code = indexArguments[2]; int isInference = indexArguments[3]; T *syn0 = arguments[0]; // we pass row pointer here T *syn1 = arguments[1]; // we pass row pointer here T *expTable = arguments[2]; T *neu1e = arguments[3]; T dot = (T) 0.0f; T g = (T) 0.0f; T f = (T) 0.0f; T alpha = realArguments[0]; // dot #pragma omp simd reduction(sumT:dot) for (int x = 0; x < vectorLength; x++) { dot += syn0[x] * syn1[x]; } // gradient if (dot < (T) - HS_MAX_EXP || dot >= (T) HS_MAX_EXP) { return; } int idx = (int) ((dot + HS_MAX_EXP) * ((T) expLength / HS_MAX_EXP / 2.0)); if (idx >= expLength || idx < 0) { return; } f = expTable[idx]; g = ((T) 1.0f - code - f) * alpha; // axpy1 #pragma omp simd for (int x = 0; x < vectorLength; x++) { neu1e[x] = g * syn1[x] + neu1e[x]; } // axpy2 if (!isInference) { #pragma omp simd for (int x = 0; x < vectorLength; x++) { syn1[x] = g * syn0[x] + syn1[x]; } } } #ifdef __CUDACC__ aggregate_def void executeAggregateCuda(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { /* We know that syn0 & syn1 are 2D matrices, so we can just use offsets here */ __shared__ int vectorLength; __shared__ int expLength; __shared__ int code; __shared__ int isInference; T *syn0 = arguments[0]; T *syn1 = arguments[1]; T *expTable = arguments[2]; T *neu1e = arguments[3]; __shared__ T dot; __shared__ T g; __shared__ T f; __shared__ T alpha; if (threadIdx.x == 0) { vectorLength = indexArguments[0]; expLength = indexArguments[1]; code = indexArguments[2]; isInference = indexArguments[3]; dot = (T) 0.0f; alpha = realArguments[0]; } __syncthreads(); // TODO: it would be great to implement dot without atomicAdd call. like aggregateParticles, or something like that // dot for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) { T prod = syn0[x] * syn1[x]; nd4j::math::atomics::nd4j_atomicAdd<T>(&dot, prod); } // gradient __syncthreads(); if (dot < - (T) HS_MAX_EXP || dot >= (T) HS_MAX_EXP) return; int idx = (int) ((dot + HS_MAX_EXP) * ((T) expLength / (T) HS_MAX_EXP / 2.0)); if (idx >= expLength) return; if (threadIdx.x == 0) { // gradient calculation f = expTable[idx]; g = ((T) 1.0f - (T) code - f) * alpha; } __syncthreads(); // axpy1 for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) { neu1e[x] = g * syn1[x] + neu1e[x]; } // axpy2 if (!isInference) for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) { syn1[x] = g * syn0[x] + syn1[x]; } } #endif }; /** * We don't include this class into ops directly, since it won't be ever used directly, * Only as part of SkipGram or CBOW */ template<typename T> class NegativeSampling { public: aggregate_def void executeAggregate(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { int vectorLength = indexArguments[0]; int expLength = indexArguments[1]; int code = indexArguments[2]; int isInference = indexArguments[3]; T *syn0 = arguments[0]; // we pass row pointer here T *syn1Neg = arguments[1]; // we pass row pointer here T *expTable = arguments[2]; T *neu1e = arguments[3]; T dot = (T) 0.0f; T g = (T) 0.0f; T alpha = realArguments[0]; // dot #pragma omp simd reduction(sumT:dot) for (int x = 0; x < vectorLength; x++) { dot += syn0[x] * syn1Neg[x]; } if (dot > HS_MAX_EXP) g = (code - 1) * alpha; else if (dot < (T) - HS_MAX_EXP) g = (code - 0) * alpha; else { int idx = (int) ((dot + (T) HS_MAX_EXP) * ((T) expLength / HS_MAX_EXP / 2.0)); if (idx >= expLength) return; if (idx < 0) return; g = ((T) code - expTable[idx]) * alpha; } // axpy1 #pragma omp simd for (int x = 0; x < vectorLength; x++) { neu1e[x] = g * syn1Neg[x] + neu1e[x]; } // axpy2 if (!isInference) { #pragma omp simd for (int x = 0; x < vectorLength; x++) { syn1Neg[x] = g * syn0[x] + syn1Neg[x]; } } } #ifdef __CUDACC__ aggregate_def void executeAggregateCuda(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { /* We know that syn0 & syn1 are 2D matrices, so we can just use offsets here */ __shared__ int vectorLength; __shared__ int expLength; __shared__ int code; __shared__ int isInference; T *syn0 = arguments[0]; T *syn1Neg = arguments[1]; T *expTable = arguments[2]; T *neu1e = arguments[3]; __shared__ T dot; __shared__ T g; __shared__ T alpha; if (threadIdx.x == 0) { vectorLength = indexArguments[0]; expLength = indexArguments[1]; code = indexArguments[2]; isInference = indexArguments[3]; dot = (T) 0.0f; alpha = realArguments[0]; } __syncthreads(); // TODO: it would be great to implement dot without atomicAdd call. like aggregateParticles, or something like that // dot for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) { T prod = syn0[x] * syn1Neg[x]; nd4j::math::atomics::nd4j_atomicAdd<T>(&dot, prod); } // gradient __syncthreads(); int idx = (int) ((dot + (T) HS_MAX_EXP) * ((T) expLength / (T) HS_MAX_EXP / 2.0)); if (idx >= expLength && dot <= (T) HS_MAX_EXP && dot >= (T) -HS_MAX_EXP) return; if (threadIdx.x == 0) { // gradient calculation if (dot > (T) HS_MAX_EXP) g = (code - 1) * alpha; else if (dot < (T) - HS_MAX_EXP) g = (code - 0) * alpha; else { g = ((T) code - expTable[idx]) * alpha; } // printf("dot: [%f]; g: [%f]\n", dot, g); } __syncthreads(); // printf("before syn1Neg[%i]: [%f], dot: [%f]; g: [%f]; vectorLength: [%i]\n", threadIdx.x, syn1Neg[threadIdx.x], dot, g, vectorLength); // axpy1 for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) { neu1e[x] = g * syn1Neg[x] + neu1e[x]; } // axpy2 if (!isInference) for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) { syn1Neg[x] = g * syn0[x] + syn1Neg[x]; } // printf("after syn1Neg[%i]: [%f]\n", threadIdx.x, syn1Neg[threadIdx.x]); } #endif }; template<typename T> class Dot { public: aggregate_def void executeAggregate(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { T *vecX = arguments[0]; T *vecY = arguments[1]; T *vecZ = arguments[2]; T dot = (T) 0.0f; int vectorLength = indexArguments[0]; #pragma omp simd reduction(sumT:dot) for (int x = 0; x < vectorLength; x++) { dot += vecX[x] * vecY[x]; } vecZ[0] = dot; }; #ifdef __CUDACC__ aggregate_def void executeAggregateCuda(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { T *vecX = arguments[0]; T *vecY = arguments[1]; T *vecZ = arguments[2]; int vectorLength = indexArguments[0]; __shared__ T dot; if (threadIdx.x == 0) dot = (T) 0.0f; __syncthreads(); for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) { T prod = vecX[x] * vecY[x]; nd4j::math::atomics::nd4j_atomicAdd<T>(&dot, prod); } __syncthreads(); if (threadIdx.x == 0) vecZ[0] = dot; } #endif }; template<typename T> class Axpy { public: aggregate_def void executeAggregate(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { T *vecX = arguments[0]; T *vecY = arguments[1]; T alpha = realArguments[0]; int vectorLength = indexArguments[0]; #pragma omp simd for (int x = 0; x < vectorLength; x++) { vecY[x] = alpha * vecX[x] + vecY[x]; } }; #ifdef __CUDACC__ aggregate_def void executeAggregateCuda(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { T *vecX = arguments[0]; T *vecY = arguments[1]; T alpha = realArguments[0]; int vectorLength = indexArguments[0]; for (int x = threadIdx.x; x < vectorLength; x+=blockDim.x) { vecY[x] = alpha * vecX[x] + vecY[x]; } __syncthreads(); } #endif }; template<typename T> class SkipGram { public: aggregate_def void executeAggregate(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { int syn0Row = indexArguments[0]; int vectorLength = indexArguments[1]; int hsRounds = indexArguments[2]; int ngRounds = indexArguments[3]; int expLength = indexArguments[4]; int vocabSize = indexArguments[5]; int ngStarter = indexArguments[6]; int negTableLength = indexArguments[7]; int isInference = indexArguments[8]; T *neu1e = new T[vectorLength]; std::memset(neu1e, 0, sizeof(T) * vectorLength); T *args[4]; int idxArgs[4]; args[1] = arguments[1]; // syn1 args[2] = arguments[2]; // expTable args[3] = neu1e; idxArgs[0] = vectorLength; // vectorLength idxArgs[1] = expLength; // expLength idxArgs[3] = isInference; T *syn1Neg = arguments[3]; T *negTable = arguments[4]; T *inferenceVector = arguments[5]; T *syn0 = isInference == 1 ? inferenceVector : arguments[0] + (syn0Row * vectorLength); args[0] = syn0;// syn0 int *idxSyn1 = intArrays[0]; int *codes = intArrays[1]; unsigned long long next_random = (unsigned long long) realArguments[1]; if (hsRounds > 0) { for (int r = 0; r < hsRounds; r++) { args[1] = arguments[1] + (idxSyn1[r] * vectorLength); // syn1 row idxArgs[2] = codes[r]; // code for row //printf("idx syn1: [%i]; code: [%i]\n", idxArgs[1], idxArgs[4]); HierarchicSoftmax<T>::executeAggregate(args, 4, nullptr, 0, idxArgs, 5, nullptr, 0, realArguments, 1); } } int target = ngStarter; if (ngRounds > 0) { for (int r = 0; r < ngRounds + 1; r++) { if (r == 0) { idxArgs[2] = 1; } else { next_random = next_random * (unsigned long long) 25214903917 + 11; target = negTable[(next_random >> 16) % negTableLength]; if (target <= 0 || target >= vocabSize) target = next_random % (vocabSize - 1) + 1; if (target == ngStarter) continue; idxArgs[2] = 0; } args[1] = syn1Neg + (target * vectorLength); // syn1Neg instead of syn1 NegativeSampling<T>::executeAggregate(args, 4, nullptr, 0, idxArgs, 5, nullptr, 0, realArguments, 1); } } if (!isInference) { #pragma omp simd for (int x = 0; x < vectorLength; x++) { syn0[x] += neu1e[x]; } } else { #pragma omp simd for (int x = 0; x < vectorLength; x++) { inferenceVector[x] += neu1e[x]; } } delete[] neu1e; } #ifdef __CUDACC__ aggregate_def void executeAggregateCuda(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { __shared__ int syn0Row; __shared__ int vectorLength; __shared__ int hsRounds; __shared__ int ngRounds; __shared__ int expLength; __shared__ int vocabSize; __shared__ int ngStarter; __shared__ int negTableLength; __shared__ int isInference; __shared__ T *neu1e; __shared__ T *args[4]; __shared__ int idxArgs[4]; __shared__ unsigned long long next_random; __shared__ T *negTable; T *syn1Neg = arguments[3]; __shared__ T *inferenceVector; if (threadIdx.x == 0) { extern __shared__ unsigned char shmem[]; neu1e = (T *) shmem; syn0Row = indexArguments[0]; vectorLength = indexArguments[1]; hsRounds = indexArguments[2]; ngRounds = indexArguments[3]; expLength = indexArguments[4]; vocabSize = indexArguments[5]; ngStarter = indexArguments[6]; negTableLength = indexArguments[7]; isInference = indexArguments[8]; inferenceVector = arguments[5]; next_random = (unsigned long long) realArguments[1]; args[0] = isInference == 1 ? inferenceVector : arguments[0] + (syn0Row * vectorLength); // syn0 args[1] = arguments[1]; // syn1 args[2] = arguments[2]; // expTable args[3] = neu1e; negTable = arguments[4]; idxArgs[0] = vectorLength; // vectorLength idxArgs[1] = expLength; // expLength idxArgs[3] = isInference; } __syncthreads(); T *syn0 = isInference ? inferenceVector : arguments[0] + (syn0Row * vectorLength); for (int i = threadIdx.x; i < vectorLength; i+=blockDim.x) { neu1e[i] = (T) 0.0f; } int *idxSyn1 = intArrays[0]; int *codes = intArrays[1]; for (int r = 0; r < hsRounds; r++) { if (threadIdx.x == 0) { args[1] = arguments[1] + (idxSyn1[r] * vectorLength);// syn1 row idxArgs[2] = codes[r]; // code for row } __syncthreads(); HierarchicSoftmax<T>::executeAggregateCuda(args, 4, nullptr, 0, idxArgs, 3, nullptr, 0, realArguments, 1); } __syncthreads(); __shared__ int target; if (ngRounds > 0) for (int r = 0; r < ngRounds + 1; r++) { if (threadIdx.x == 0) { if (r == 0) { // this line isn't a mistake target = ngStarter; idxArgs[2] = 1; } else { next_random = next_random * (unsigned long long)25214903917 + 11 + blockIdx.x; target = negTable[(next_random >> 16) % negTableLength]; if (target <= 0 || target >= vocabSize) target = next_random % (vocabSize - 1) + 1; idxArgs[2] = 0; } args[1] = syn1Neg + (target * vectorLength); } __syncthreads(); // we put it here, to make sure all threads pick up continue call if (r != 0 && target == ngStarter) continue; NegativeSampling<T>::executeAggregateCuda(args, 4, nullptr, 0, idxArgs, 3, nullptr, 0, realArguments, 1); } // final axpy with 1.0f as alpha if (!isInference) for (int x = threadIdx.x; x < vectorLength; x+= blockDim.x) { syn0[x] += neu1e[x]; } else for (int x = threadIdx.x; x < vectorLength; x+= blockDim.x) { inferenceVector[x] += neu1e[x]; } } #endif }; template<typename T> class CBOW { public: aggregate_def void executeAggregate(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { int vectorLength = indexArguments[0]; int hsRounds = indexArguments[1]; int ngRounds = indexArguments[2]; int expLength = indexArguments[3]; int vocabSize = indexArguments[4]; int ngStarter = indexArguments[5]; int negTableLength = indexArguments[6]; int idxSyn0Length = indexArguments[7]; //int initialIdx = indexArguments[8]; int numLabels = indexArguments[9]; int trainWords = indexArguments[10]; int isInference = indexArguments[11]; int *idxSyn0 = intArrays[0]; int *idxSyn1 = intArrays[1]; int *codes = intArrays[2]; T *neu1 = new T[vectorLength]; T *neu1e = new T[vectorLength]; std::memset(neu1, 0, sizeof(T) * vectorLength); std::memset(neu1e, 0, sizeof(T) * vectorLength); T *syn0 = arguments[0]; T *syn1 = arguments[1]; T *expTable = arguments[2]; T *syn1Neg = arguments[3]; T *negTable = arguments[4]; T *inferenceVector = arguments[5]; T *args[4]; int idxArgs[4]; idxArgs[0] = vectorLength; // vectorLength idxArgs[1] = expLength; // expLength idxArgs[3] = isInference; unsigned long long next_random = (unsigned long long) realArguments[1]; // building neu1 for current window for (int c = 0; c < idxSyn0Length; c++) { T *syn0word = syn0 + (idxSyn0[c] * vectorLength); #pragma omp simd for (int i = 0; i < vectorLength; i++) { neu1[i] += syn0word[i]; } } // for inference we use additional inference vector if (isInference) { #pragma omp simd for (int i = 0; i < vectorLength; i++) { neu1[i] += inferenceVector[i]; } } // average neu1 if (idxSyn0Length > 0) { #pragma omp simd for (int i = 0; i < vectorLength; i++) { neu1[i] /= idxSyn0Length + isInference; } } args[0] = neu1; args[2] = expTable; args[3] = neu1e; if (hsRounds > 0) for (int i = 0; i < hsRounds; i++) { args[1] = syn1 + (idxSyn1[i] * vectorLength); idxArgs[2] = codes[i]; HierarchicSoftmax<T>::executeAggregate((T **)args, 4, nullptr, 0, idxArgs, 3, nullptr, 0, realArguments, 2); } int target = ngStarter; if (ngRounds > 0) for (int i = 0; i < ngRounds + 1; i++) { if (i == 0) { idxArgs[2] = 1; } else { next_random = next_random * (unsigned long long) 25214903917 + 11; target = negTable[(next_random >> 16) % negTableLength]; if (target <= 0 || target >= vocabSize) target = next_random % (vocabSize - 1) + 1; if (target == ngStarter) continue; idxArgs[2] = 0; } args[1] = syn1Neg + (target * vectorLength); // syn1Neg instead of syn1 //printf("Negative round: target: [%i]; code: [%i]; neu1e[0]: [%f]\n", target, idxArgs[4], neu1e[0]); NegativeSampling<T>::executeAggregate((T **)args, 4, nullptr, 0, idxArgs, 3, nullptr, 0, realArguments, 2); } // if we don't train words - we skip start of idxSyn0 int starter = trainWords == 1 ? 0 : idxSyn0Length - numLabels; // propagate neu1e -> syn0 if (!isInference) { for (int c = starter; c < idxSyn0Length; c++) { T *syn0word = arguments[0] + (idxSyn0[c] * vectorLength); #pragma omp simd for (int i = 0; i < vectorLength; i++) { syn0word[i] += neu1e[i]; } } } else { #pragma omp simd for (int i = 0; i < vectorLength; i++) { inferenceVector[i] += neu1e[i]; } } delete[] neu1; delete[] neu1e; } #ifdef __CUDACC__ aggregate_def void executeAggregateCuda(T **arguments, int numArguments, int **shapeArguments, int numShapeArguments, int *indexArguments, int numIndexArguments, int **intArrays, int numIntArrays, T *realArguments, int numRealArguments) { __shared__ int vectorLength; __shared__ int hsRounds; __shared__ int ngRounds; __shared__ int expLength; __shared__ int vocabSize; __shared__ int ngStarter; __shared__ int negTableLength; __shared__ int idxSyn0Length; __shared__ int initialIdx; __shared__ int numLabels; __shared__ int trainWords; __shared__ int isInference; int *idxSyn0 = intArrays[0]; int *idxSyn1 = intArrays[1]; int *codes = intArrays[2]; __shared__ T *neu1; __shared__ T *neu1e; __shared__ T *args[5]; __shared__ int idxArgs[4]; T *syn0 = arguments[0]; T *syn1 = arguments[1]; //T *expTable = arguments[2]; T *syn1Neg = arguments[3]; T *negTable = arguments[4]; T *inferenceVector = arguments[5]; if (threadIdx.x == 0) { vectorLength = indexArguments[0]; hsRounds = indexArguments[1]; ngRounds = indexArguments[2]; expLength = indexArguments[3]; vocabSize = indexArguments[4]; ngStarter = indexArguments[5]; negTableLength = indexArguments[6]; idxSyn0Length = indexArguments[7]; initialIdx = indexArguments[8]; numLabels = indexArguments[9]; trainWords = indexArguments[10]; isInference = indexArguments[11]; extern __shared__ unsigned char shmem[]; neu1 = (T *) shmem; neu1e = neu1 + vectorLength; args[0] = neu1; args[2] = arguments[2]; //expTable args[3] = neu1e; idxArgs[0] = vectorLength; // vectorLength idxArgs[1] = expLength; // expLength idxArgs[3] = isInference; } __syncthreads(); for (int i = threadIdx.x; i < vectorLength; i += blockDim.x) { neu1[i] = (T) 0.0f; neu1e[i] = (T) 0.0f; } unsigned long long next_random = (unsigned long long) realArguments[1]; for (int c = 0; c < idxSyn0Length; c++) { T *syn0word = syn0 + (idxSyn0[c] * vectorLength); for (int i = threadIdx.x; i < vectorLength; i += blockDim.x) { neu1[i] += syn0word[i]; } } if (isInference) for (int i = threadIdx.x; i < vectorLength; i += blockDim.x) { neu1[i] += inferenceVector[i]; } // average neu1 if (idxSyn0Length > 0) { for (int i = threadIdx.x; i < vectorLength; i += blockDim.x) { neu1[i] /= idxSyn0Length + + isInference; } } __syncthreads(); if (hsRounds > 0) for (int i = 0; i < hsRounds; i++) { if (threadIdx.x == 0) { args[1] = syn1 + (idxSyn1[i] * vectorLength); idxArgs[2] = codes[i]; } __syncthreads(); HierarchicSoftmax<T>::executeAggregateCuda(args, 4, nullptr, 0, idxArgs, 3, nullptr, 0, realArguments, 2); } __shared__ int target; if (ngRounds > 0) for (int i = 0; i < ngRounds + 1; i++) { if (threadIdx.x == 0) { if (i == 0) { target = ngStarter; } else { next_random = next_random * (unsigned long long) 25214903917 + 11; target = negTable[(next_random >> 16) % negTableLength]; if (target <= 0 || target >= vocabSize) target = next_random % (vocabSize - 1) + 1; } args[1] = syn1Neg + (target * vectorLength); // syn1Neg instead of syn1 idxArgs[2] = i == 0 ? 1 : 0; } __syncthreads(); if (i != 0 && target == ngStarter) continue; NegativeSampling<T>::executeAggregateCuda(args, 4, nullptr, 0, idxArgs, 3, nullptr, 0, realArguments, 2); //printf("Negative round: target: [%i]; code: [%i]; neu1[%i]: [%f]; neu1e[%i]: [%f]\n", target, idxArgs[2], threadIdx.x, neu1[threadIdx.x], threadIdx.x, neu1e[threadIdx.x]); } // if we don't train words - we skip start of idxSyn0 int starter = trainWords == 1 ? 0 : idxSyn0Length - numLabels; if (!isInference) for (int c = starter; c < idxSyn0Length; c++) { T *syn0word = arguments[0] + (idxSyn0[c] * vectorLength); for (int i = threadIdx.x; i < vectorLength; i += blockDim.x) { syn0word[i] += neu1e[i]; } } else { for (int i = threadIdx.x; i < vectorLength; i += blockDim.x) { inferenceVector[i] += neu1e[i]; } } } #endif }; } #endif //LIBND4J_AGGREGATE_OPS_H
shear.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS H H EEEEE AAA RRRR % % SS H H E A A R R % % SSS HHHHH EEE AAAAA RRRR % % SS H H E A A R R % % SSSSS H H EEEEE A A R R % % % % % % MagickCore Methods to Shear or Rotate an Image by an Arbitrary Angle % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The XShearImage() and YShearImage() methods are based on the paper "A Fast % Algorithm for General Raster Rotation" by Alan W. Paeth, Graphics % Interface '86 (Vancouver). ShearRotateImage() is adapted from a similar % method based on the Paeth paper written by Michael Halle of the Spatial % Imaging Group, MIT Media Lab. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache-private.h" #include "MagickCore/channel.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/shear.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C r o p T o F i t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropToFitImage() crops the sheared image as determined by the bounding box % as defined by width and height and shearing angles. % % The format of the CropToFitImage method is: % % MagickBooleanType CropToFitImage(Image **image, % const double x_shear,const double x_shear, % const double width,const double height, % const MagickBooleanType rotate,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear, width, height: Defines a region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CropToFitImage(Image **image, const double x_shear,const double y_shear, const double width,const double height, const MagickBooleanType rotate,ExceptionInfo *exception) { Image *crop_image; PointInfo extent[4], min, max; RectangleInfo geometry, page; register ssize_t i; /* Calculate the rotated image size. */ extent[0].x=(double) (-width/2.0); extent[0].y=(double) (-height/2.0); extent[1].x=(double) width/2.0; extent[1].y=(double) (-height/2.0); extent[2].x=(double) (-width/2.0); extent[2].y=(double) height/2.0; extent[3].x=(double) width/2.0; extent[3].y=(double) height/2.0; for (i=0; i < 4; i++) { extent[i].x+=x_shear*extent[i].y; extent[i].y+=y_shear*extent[i].x; if (rotate != MagickFalse) extent[i].x+=x_shear*extent[i].y; extent[i].x+=(double) (*image)->columns/2.0; extent[i].y+=(double) (*image)->rows/2.0; } min=extent[0]; max=extent[0]; for (i=1; i < 4; i++) { if (min.x > extent[i].x) min.x=extent[i].x; if (min.y > extent[i].y) min.y=extent[i].y; if (max.x < extent[i].x) max.x=extent[i].x; if (max.y < extent[i].y) max.y=extent[i].y; } geometry.x=(ssize_t) ceil(min.x-0.5); geometry.y=(ssize_t) ceil(min.y-0.5); geometry.width=(size_t) floor(max.x-min.x+0.5); geometry.height=(size_t) floor(max.y-min.y+0.5); page=(*image)->page; (void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page); crop_image=CropImage(*image,&geometry,exception); if (crop_image == (Image *) NULL) return(MagickFalse); crop_image->page=page; *image=DestroyImage(*image); *image=crop_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s k e w I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeskewImage() removes skew from the image. Skew is an artifact that % occurs in scanned images because of the camera being misaligned, % imperfections in the scanning or surface, or simply because the paper was % not placed completely flat when scanned. % % The result will be auto-croped if the artifact "deskew:auto-crop" is % defined, while the amount the image is to be deskewed, in degrees is also % saved as the artifact "deskew:angle". % % The format of the DeskewImage method is: % % Image *DeskewImage(const Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: separate background from foreground. % % o exception: return any errors or warnings in this structure. % */ static void RadonProjection(const Image *image,MatrixInfo *source_matrixs, MatrixInfo *destination_matrixs,const ssize_t sign,size_t *projection) { MatrixInfo *swap; register MatrixInfo *p, *q; register ssize_t x; size_t step; p=source_matrixs; q=destination_matrixs; for (step=1; step < GetMatrixColumns(p); step*=2) { for (x=0; x < (ssize_t) GetMatrixColumns(p); x+=2*(ssize_t) step) { register ssize_t i; ssize_t y; unsigned short element, neighbor; for (i=0; i < (ssize_t) step; i++) { for (y=0; y < (ssize_t) (GetMatrixRows(p)-i-1); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i+1,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i+1,y,&neighbor) == MagickFalse) continue; } for ( ; y < (ssize_t) (GetMatrixRows(p)-i); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x+i+step,y+i,&neighbor) == MagickFalse) continue; neighbor+=element; if (SetMatrixElement(q,x+2*i,y,&neighbor) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } for ( ; y < (ssize_t) GetMatrixRows(p); y++) { if (GetMatrixElement(p,x+i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i,y,&element) == MagickFalse) continue; if (SetMatrixElement(q,x+2*i+1,y,&element) == MagickFalse) continue; } } } swap=p; p=q; q=swap; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,GetMatrixColumns(p),1) #endif for (x=0; x < (ssize_t) GetMatrixColumns(p); x++) { register ssize_t y; size_t sum; sum=0; for (y=0; y < (ssize_t) (GetMatrixRows(p)-1); y++) { ssize_t delta; unsigned short element, neighbor; if (GetMatrixElement(p,x,y,&element) == MagickFalse) continue; if (GetMatrixElement(p,x,y+1,&neighbor) == MagickFalse) continue; delta=(ssize_t) element-(ssize_t) neighbor; sum+=delta*delta; } projection[GetMatrixColumns(p)+sign*x-1]=sum; } } static MagickBooleanType RadonTransform(const Image *image, const double threshold,size_t *projection,ExceptionInfo *exception) { CacheView *image_view; MatrixInfo *destination_matrixs, *source_matrixs; MagickBooleanType status; size_t count, width; ssize_t j, y; unsigned char c; unsigned short bits[256]; for (width=1; width < ((image->columns+7)/8); width<<=1) ; source_matrixs=AcquireMatrixInfo(width,image->rows,sizeof(unsigned short), exception); destination_matrixs=AcquireMatrixInfo(width,image->rows, sizeof(unsigned short),exception); if ((source_matrixs == (MatrixInfo *) NULL) || (destination_matrixs == (MatrixInfo *) NULL)) { if (destination_matrixs != (MatrixInfo *) NULL) destination_matrixs=DestroyMatrixInfo(destination_matrixs); if (source_matrixs != (MatrixInfo *) NULL) source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } if (NullMatrix(source_matrixs) == MagickFalse) { destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickFalse); } for (j=0; j < 256; j++) { c=(unsigned char) j; for (count=0; c != 0; c>>=1) count+=c & 0x01; bits[j]=(unsigned short) count; } status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=(ssize_t) (image->columns+7)/8; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,--i,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,-1,projection); (void) NullMatrix(source_matrixs); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t i, x; size_t bit, byte; unsigned short value; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } bit=0; byte=0; i=0; for (x=0; x < (ssize_t) image->columns; x++) { byte<<=1; if (((MagickRealType) GetPixelRed(image,p) < threshold) || ((MagickRealType) GetPixelGreen(image,p) < threshold) || ((MagickRealType) GetPixelBlue(image,p) < threshold)) byte|=0x01; bit++; if (bit == 8) { value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) { byte<<=(8-bit); value=bits[byte]; (void) SetMatrixElement(source_matrixs,i++,y,&value); } } RadonProjection(image,source_matrixs,destination_matrixs,1,projection); image_view=DestroyCacheView(image_view); destination_matrixs=DestroyMatrixInfo(destination_matrixs); source_matrixs=DestroyMatrixInfo(source_matrixs); return(MagickTrue); } static void GetImageBackgroundColor(Image *image,const ssize_t offset, ExceptionInfo *exception) { CacheView *image_view; PixelInfo background; double count; ssize_t y; /* Compute average background color. */ if (offset <= 0) return; GetPixelInfo(image,&background); count=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if ((y >= offset) && (y < ((ssize_t) image->rows-offset))) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { if ((x >= offset) && (x < ((ssize_t) image->columns-offset))) continue; background.red+=QuantumScale*GetPixelRed(image,p); background.green+=QuantumScale*GetPixelGreen(image,p); background.blue+=QuantumScale*GetPixelBlue(image,p); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) background.alpha+=QuantumScale*GetPixelAlpha(image,p); count++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->background_color.red=(double) ClampToQuantum(QuantumRange* background.red/count); image->background_color.green=(double) ClampToQuantum(QuantumRange* background.green/count); image->background_color.blue=(double) ClampToQuantum(QuantumRange* background.blue/count); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->background_color.alpha=(double) ClampToQuantum(QuantumRange* background.alpha/count); } MagickExport Image *DeskewImage(const Image *image,const double threshold, ExceptionInfo *exception) { AffineMatrix affine_matrix; const char *artifact; double degrees; Image *clone_image, *crop_image, *deskew_image, *median_image; MagickBooleanType status; RectangleInfo geometry; register ssize_t i; size_t max_projection, *projection, width; ssize_t skew; /* Compute deskew angle. */ for (width=1; width < ((image->columns+7)/8); width<<=1) ; projection=(size_t *) AcquireQuantumMemory((size_t) (2*width-1), sizeof(*projection)); if (projection == (size_t *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); status=RadonTransform(image,threshold,projection,exception); if (status == MagickFalse) { projection=(size_t *) RelinquishMagickMemory(projection); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } max_projection=0; skew=0; for (i=0; i < (ssize_t) (2*width-1); i++) { if (projection[i] > max_projection) { skew=i-(ssize_t) width+1; max_projection=projection[i]; } } projection=(size_t *) RelinquishMagickMemory(projection); degrees=RadiansToDegrees(-atan((double) skew/width/8)); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Deskew angle: %g",degrees); /* Deskew image. */ clone_image=CloneImage(image,0,0,MagickTrue,exception); if (clone_image == (Image *) NULL) return((Image *) NULL); { char angle[MagickPathExtent]; (void) FormatLocaleString(angle,MagickPathExtent,"%.20g",degrees); (void) SetImageArtifact(clone_image,"deskew:angle",angle); } (void) SetImageVirtualPixelMethod(clone_image,BackgroundVirtualPixelMethod, exception); affine_matrix.sx=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.rx=sin(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.ry=(-sin(DegreesToRadians(fmod((double) degrees,360.0)))); affine_matrix.sy=cos(DegreesToRadians(fmod((double) degrees,360.0))); affine_matrix.tx=0.0; affine_matrix.ty=0.0; artifact=GetImageArtifact(image,"deskew:auto-crop"); if (IsStringTrue(artifact) == MagickFalse) { deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); return(deskew_image); } /* Auto-crop image. */ GetImageBackgroundColor(clone_image,(ssize_t) StringToLong(artifact), exception); deskew_image=AffineTransformImage(clone_image,&affine_matrix,exception); clone_image=DestroyImage(clone_image); if (deskew_image == (Image *) NULL) return((Image *) NULL); median_image=StatisticImage(deskew_image,MedianStatistic,3,3,exception); if (median_image == (Image *) NULL) { deskew_image=DestroyImage(deskew_image); return((Image *) NULL); } geometry=GetImageBoundingBox(median_image,exception); median_image=DestroyImage(median_image); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule()," Deskew geometry: " "%.20gx%.20g%+.20g%+.20g",(double) geometry.width,(double) geometry.height,(double) geometry.x,(double) geometry.y); crop_image=CropImage(deskew_image,&geometry,exception); deskew_image=DestroyImage(deskew_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e g r a l R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IntegralRotateImage() rotates the image an integral of 90 degrees. It % allocates the memory necessary for the new Image structure and returns a % pointer to the rotated image. % % The format of the IntegralRotateImage method is: % % Image *IntegralRotateImage(const Image *image,size_t rotations, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o rotations: Specifies the number of 90 degree rotations. % */ MagickExport Image *IntegralRotateImage(const Image *image,size_t rotations, ExceptionInfo *exception) { #define RotateImageTag "Rotate/Image" CacheView *image_view, *rotate_view; Image *rotate_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; /* Initialize rotated image attributes. */ assert(image != (Image *) NULL); page=image->page; rotations%=4; switch (rotations) { case 0: default: { rotate_image=CloneImage(image,0,0,MagickTrue,exception); break; } case 2: { rotate_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); break; } case 1: case 3: { rotate_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); break; } } if (rotate_image == (Image *) NULL) return((Image *) NULL); if (rotations == 0) return(rotate_image); /* Integral rotate the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); rotate_view=AcquireAuthenticCacheView(rotate_image,exception); switch (rotations) { case 1: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 90 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,rotate_image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { register const Quantum *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,(ssize_t) (rotate_image->columns-(tile_y+height)),y+tile_x,height,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((height-1)*width+y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels-=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); break; } case 2: { register ssize_t y; /* Rotate 180 degrees. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,rotate_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(rotate_view,0,(ssize_t) (image->rows-y- 1),image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(rotate_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; q-=GetPixelChannels(rotate_image); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); if (page.width != 0) page.x=(ssize_t) (page.width-rotate_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } case 3: { size_t tile_height, tile_width; ssize_t tile_y; /* Rotate 270 degrees. */ GetPixelCacheTileSize(image,&tile_width,&tile_height); tile_width=image->columns; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,rotate_image,image->rows/tile_height,1) #endif for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) tile_height) { register ssize_t tile_x; if (status == MagickFalse) continue; tile_x=0; for ( ; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) tile_width) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t y; size_t height, width; width=tile_width; if ((tile_x+(ssize_t) tile_width) > (ssize_t) image->columns) width=(size_t) (tile_width-(tile_x+tile_width-image->columns)); height=tile_height; if ((tile_y+(ssize_t) tile_height) > (ssize_t) image->rows) height=(size_t) (tile_height-(tile_y+tile_height-image->rows)); p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,width,height, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (y=0; y < (ssize_t) width; y++) { register const Quantum *magick_restrict tile_pixels; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(rotate_view,tile_y,(ssize_t) (y+ rotate_image->rows-(tile_x+width)),height,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } tile_pixels=p+((width-1)-y)*GetPixelChannels(image); for (x=0; x < (ssize_t) height; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait rotate_traits = GetPixelChannelTraits(rotate_image, channel); if ((traits == UndefinedPixelTrait) || (rotate_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rotate_image,channel,tile_pixels[i],q); } tile_pixels+=width*GetPixelChannels(image); q+=GetPixelChannels(rotate_image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_IntegralRotateImage) #endif sync=SyncCacheViewAuthenticPixels(rotate_view,exception); if (sync == MagickFalse) status=MagickFalse; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,RotateImageTag,progress+=tile_height, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,RotateImageTag,(MagickOffsetType) image->rows-1,image->rows); Swap(page.width,page.height); Swap(page.x,page.y); if (page.height != 0) page.y=(ssize_t) (page.height-rotate_image->rows-page.y); break; } default: break; } rotate_view=DestroyCacheView(rotate_view); image_view=DestroyCacheView(image_view); rotate_image->type=image->type; rotate_image->page=page; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + X S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % XShearImage() shears the image in the X direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a vertical % Y-axis. X shears will widen an image creating 'empty' triangles on the left % and right sides of the source image. % % The format of the XShearImage method is: % % MagickBooleanType XShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the X % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType XShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define XShearImageTag "XShear/Image" typedef enum { LEFT, RIGHT } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t y; /* X shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; background=image->background_color; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,height,1) #endif for (y=0; y < (ssize_t) height; y++) { PixelInfo pixel, source, destination; double area, displacement; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,0,y_offset+y,image->columns,1, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=x_offset*GetPixelChannels(image); displacement=degrees*(double) (y-height/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=RIGHT; else { displacement*=(-1.0); direction=LEFT; } step=(ssize_t) floor((double) displacement); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case LEFT: { /* Transfer pixels left-to-right. */ if (step > x_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { if ((x_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case RIGHT: { /* Transfer pixels right-to-left. */ p+=width*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) width; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (x_offset+width+step-i) > image->columns) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area,&destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,XShearImageTag,progress,height); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Y S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % YShearImage shears the image in the Y direction with a shear angle of % 'degrees'. Positive angles shear counter-clockwise (right-hand rule), and % negative angles shear clockwise. Angles are measured relative to a % horizontal X-axis. Y shears will increase the height of an image creating % 'empty' triangles on the top and bottom of the source image. % % The format of the YShearImage method is: % % MagickBooleanType YShearImage(Image *image,const double degrees, % const size_t width,const size_t height, % const ssize_t x_offset,const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: A double representing the shearing angle along the Y % axis. % % o width, height, x_offset, y_offset: Defines a region of the image % to shear. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType YShearImage(Image *image,const double degrees, const size_t width,const size_t height,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define YShearImageTag "YShear/Image" typedef enum { UP, DOWN } ShearDirection; CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo background; ssize_t x; /* Y Shear image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; progress=0; background=image->background_color; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,width,1) #endif for (x=0; x < (ssize_t) width; x++) { double area, displacement; PixelInfo pixel, source, destination; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t i; ShearDirection direction; ssize_t step; if (status == MagickFalse) continue; p=GetCacheViewAuthenticPixels(image_view,x_offset+x,0,1,image->rows, exception); if (p == (Quantum *) NULL) { status=MagickFalse; continue; } p+=y_offset*GetPixelChannels(image); displacement=degrees*(double) (x-width/2.0); if (displacement == 0.0) continue; if (displacement > 0.0) direction=DOWN; else { displacement*=(-1.0); direction=UP; } step=(ssize_t) floor((double) displacement); area=(double) (displacement-step); step++; pixel=background; GetPixelInfo(image,&source); GetPixelInfo(image,&destination); switch (direction) { case UP: { /* Transfer pixels top-to-bottom. */ if (step > y_offset) break; q=p-step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { if ((y_offset+i) < step) { p+=GetPixelChannels(image); GetPixelInfoPixel(image,p,&pixel); q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); p+=GetPixelChannels(image); q+=GetPixelChannels(image); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); SetPixelViaPixelInfo(image,&destination,q); q+=GetPixelChannels(image); for (i=0; i < (step-1); i++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } break; } case DOWN: { /* Transfer pixels bottom-to-top. */ p+=height*GetPixelChannels(image); q=p+step*GetPixelChannels(image); for (i=0; i < (ssize_t) height; i++) { p-=GetPixelChannels(image); q-=GetPixelChannels(image); if ((size_t) (y_offset+height+step-i) > image->rows) continue; GetPixelInfoPixel(image,p,&source); CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &source,(double) GetPixelAlpha(image,p),area, &destination); SetPixelViaPixelInfo(image,&destination,q); GetPixelInfoPixel(image,p,&pixel); } CompositePixelInfoAreaBlend(&pixel,(double) pixel.alpha, &background,(double) background.alpha,area,&destination); q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&destination,q); for (i=0; i < (step-1); i++) { q-=GetPixelChannels(image); SetPixelViaPixelInfo(image,&background,q); } break; } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,YShearImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearImage() creates a new image that is a shear_image copy of an existing % one. Shearing slides one edge of an image along the X or Y axis, creating % a parallelogram. An X direction shear slides an edge along the X axis, % while a Y direction shear slides an edge along the Y axis. The amount of % the shear is controlled by a shear angle. For X direction shears, x_shear % is measured relative to the Y axis, and similarly, for Y direction shears % y_shear is measured relative to the X axis. Empty triangles left over from % shearing the image are filled with the background color defined by member % 'background_color' of the image.. ShearImage() allocates the memory % necessary for the new Image structure and returns a pointer to the new image. % % ShearImage() is based on the paper "A Fast Algorithm for General Raster % Rotatation" by Alan W. Paeth. % % The format of the ShearImage method is: % % Image *ShearImage(const Image *image,const double x_shear, % const double y_shear,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o x_shear, y_shear: Specifies the number of degrees to shear the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearImage(const Image *image,const double x_shear, const double y_shear,ExceptionInfo *exception) { Image *integral_image, *shear_image; MagickBooleanType status; PointInfo shear; RectangleInfo border_info, bounds; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((x_shear != 0.0) && (fmod(x_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); if ((y_shear != 0.0) && (fmod(y_shear,90.0) == 0.0)) ThrowImageException(ImageError,"AngleIsDiscontinuous"); /* Initialize shear angle. */ integral_image=CloneImage(image,0,0,MagickTrue,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan(DegreesToRadians(fmod(x_shear,360.0)))); shear.y=tan(DegreesToRadians(fmod(y_shear,360.0))); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute image size. */ bounds.width=image->columns+(ssize_t) floor(fabs(shear.x)*image->rows+0.5); bounds.x=(ssize_t) ceil((double) image->columns+((fabs(shear.x)*image->rows)- image->columns)/2.0-0.5); bounds.y=(ssize_t) ceil((double) image->rows+((fabs(shear.y)*bounds.width)- image->rows)/2.0-0.5); /* Surround image with border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; shear_image=BorderImage(integral_image,&border_info,image->compose,exception); integral_image=DestroyImage(integral_image); if (shear_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Shear the image. */ if (shear_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(shear_image,OpaqueAlphaChannel,exception); status=XShearImage(shear_image,shear.x,image->columns,image->rows,bounds.x, (ssize_t) (shear_image->rows-image->rows)/2,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=YShearImage(shear_image,shear.y,bounds.width,image->rows,(ssize_t) (shear_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { shear_image=DestroyImage(shear_image); return((Image *) NULL); } status=CropToFitImage(&shear_image,shear.x,shear.y,(MagickRealType) image->columns,(MagickRealType) image->rows,MagickFalse,exception); shear_image->alpha_trait=image->alpha_trait; shear_image->compose=image->compose; shear_image->page.width=0; shear_image->page.height=0; if (status == MagickFalse) shear_image=DestroyImage(shear_image); return(shear_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h e a r R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShearRotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. ShearRotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % ShearRotateImage() is based on the paper "A Fast Algorithm for General % Raster Rotatation" by Alan W. Paeth. ShearRotateImage is adapted from a % similar method based on the Paeth paper written by Michael Halle of the % Spatial Imaging Group, MIT Media Lab. % % The format of the ShearRotateImage method is: % % Image *ShearRotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShearRotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *integral_image, *rotate_image; MagickBooleanType status; MagickRealType angle; PointInfo shear; RectangleInfo border_info, bounds; size_t height, rotations, shear_width, width; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=fmod(degrees,360.0); if (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; /* Calculate shear equations. */ integral_image=IntegralRotateImage(image,rotations,exception); if (integral_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((shear.x == 0.0) && (shear.y == 0.0)) return(integral_image); if (SetImageStorageClass(integral_image,DirectClass,exception) == MagickFalse) { integral_image=DestroyImage(integral_image); return(integral_image); } if (integral_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(integral_image,OpaqueAlphaChannel,exception); /* Compute maximum bounds for 3 shear operations. */ width=integral_image->columns; height=integral_image->rows; bounds.width=(size_t) floor(fabs((double) height*shear.x)+width+0.5); bounds.height=(size_t) floor(fabs((double) bounds.width*shear.y)+height+0.5); shear_width=(size_t) floor(fabs((double) bounds.height*shear.x)+ bounds.width+0.5); bounds.x=(ssize_t) floor((double) ((shear_width > bounds.width) ? width : bounds.width-shear_width+2)/2.0+0.5); bounds.y=(ssize_t) floor(((double) bounds.height-height+2)/2.0+0.5); /* Surround image with a border. */ integral_image->border_color=integral_image->background_color; integral_image->compose=CopyCompositeOp; border_info.width=(size_t) bounds.x; border_info.height=(size_t) bounds.y; rotate_image=BorderImage(integral_image,&border_info,image->compose, exception); integral_image=DestroyImage(integral_image); if (rotate_image == (Image *) NULL) ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); /* Rotate the image. */ status=XShearImage(rotate_image,shear.x,width,height,bounds.x,(ssize_t) (rotate_image->rows-height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=YShearImage(rotate_image,shear.y,bounds.width,height,(ssize_t) (rotate_image->columns-bounds.width)/2,bounds.y,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=XShearImage(rotate_image,shear.x,bounds.width,bounds.height,(ssize_t) (rotate_image->columns-bounds.width)/2,(ssize_t) (rotate_image->rows- bounds.height)/2,exception); if (status == MagickFalse) { rotate_image=DestroyImage(rotate_image); return((Image *) NULL); } status=CropToFitImage(&rotate_image,shear.x,shear.y,(MagickRealType) width, (MagickRealType) height,MagickTrue,exception); rotate_image->alpha_trait=image->alpha_trait; rotate_image->compose=image->compose; rotate_image->page.width=0; rotate_image->page.height=0; if (status == MagickFalse) rotate_image=DestroyImage(rotate_image); return(rotate_image); }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 16; tile_size[3] = 128; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,12);t1++) { lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24)); ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(3*t1-3,4)),ceild(24*t2-Nz-12,16));t3<=min(min(min(floord(Nt+Ny-4,16),floord(12*t1+Ny+21,16)),floord(24*t2+Ny+20,16)),floord(24*t1-24*t2+Nz+Ny+19,16));t3++) { for (t4=max(max(max(0,ceild(3*t1-31,32)),ceild(24*t2-Nz-124,128)),ceild(16*t3-Ny-124,128));t4<=min(min(min(min(floord(Nt+Nx-4,128),floord(12*t1+Nx+21,128)),floord(24*t2+Nx+20,128)),floord(16*t3+Nx+12,128)),floord(24*t1-24*t2+Nz+Nx+19,128));t4++) { for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),16*t3-Ny+2),128*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),16*t3+14),128*t4+126),24*t1-24*t2+Nz+21);t5++) { for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(16*t3,t5+1);t7<=min(16*t3+15,t5+Ny-2);t7++) { lbv=max(128*t4,t5+1); ubv=min(128*t4+127,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/Availability.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/LoopHint.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class VersionTuple; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. mutable IdentifierInfo *Ident_instancetype; /// \brief Identifier for "introduced". IdentifierInfo *Ident_introduced; /// \brief Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// \brief Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// \brief Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// \brief Identifier for "message". IdentifierInfo *Ident_message; /// \brief Identifier for "strict". IdentifierInfo *Ident_strict; /// \brief Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++0x contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> CmNonstrictHandler; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// \brief When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// \brief RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } unsigned getDepth() const { return Depth; } }; /// Factory object for creating AttributeList objects. AttributeFactory AttrFactory; /// \brief Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// \brief Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren; } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square; } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace; } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// \brief Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// \brief Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed); PP.Lex(Tok); PP.EnterToken(Next); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) --ParenCount; // Don't let unbalanced )'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) --BracketCount; // Don't let unbalanced ]'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) --BraceCount; // Don't let unbalanced }'s drive the count negative. PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// \brief Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// \brief Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// \brief Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// \brief Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// \brief Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// \brief Initialize all pragma handlers. void initializePragmaHandlers(); /// \brief Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// \brief Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// \brief Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// \brief Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// \brief Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// \brief Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// \brief Handle the annotation token produced for /// #pragma cm_nonstrict... void HandlePragmaCmNonstrict(); /// \brief Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// \brief Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// \brief Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// \brief Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// \brief Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// \brief Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// \brief Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// \brief Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// \brief Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } /// \brief Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// \brief Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC1); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// \brief Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// \brief The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// \brief The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// \brief Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, unsigned TST = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// \brief RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// \brief Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// \brief Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// \brief Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// \brief Whether this member function had an associated template /// scope. When true, D is a template declaration. /// othewise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// \brief The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// \brief Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// \brief Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// \brief Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// othewise, it is a tag declaration. bool TemplateScope : 1; /// \brief Whether this class is an __interface. bool IsInterface : 1; /// \brief The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// \brief The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// \brief RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// \brief Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// \brief Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// \brief The kind of template we are parsing. enum { /// \brief We are not parsing a template at all. NonTemplate = 0, /// \brief We are parsing a template declaration. Template, /// \brief We are parsing an explicit specialization. ExplicitSpecialization, /// \brief We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// \brief The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// \brief The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// \brief The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// \brief Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, AttributeList *AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers& VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseCMMethodExpr(ExprResult LHS); ExprResult ParseCMAll(ExprResult LHS); ExprResult ParseCMAny(ExprResult LHS); ExprResult ParseCMColumn(ExprResult LHS); ExprResult ParseCMFormat(ExprResult LHS); ExprResult ParseCMGenxSelect(ExprResult LHS); ExprResult ParseCMIselect(ExprResult LHS); ExprResult ParseCMMerge(ExprResult LHS); ExprResult ParseCMNCols(ExprResult LHS); ExprResult ParseCMNElems(ExprResult LHS); ExprResult ParseCMNRows(ExprResult LHS); ExprResult ParseCMReplicate(ExprResult LHS); ExprResult ParseCMRow(ExprResult LHS); ExprResult ParseCMSelect(ExprResult LHS); ExprResult ParseCMSelectAll(ExprResult LHS); bool isCMMethodIdentifier(const IdentifierInfo &Id); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList( SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> Completer = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParseCMExpressionList - A simple comma-separated list of expressions in a /// context where '>' is not considered to be an operator. bool ParseCMExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false); //===--------------------------------------------------------------------===// // C++0x 5.1.2: Lambda expressions // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro, bool *SkippedInits = nullptr); bool TryParseLambdaIntroducer(LambdaIntroducer &Intro); ExprResult ParseLambdaExpressionAfterIntroducer( LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while condition expression. Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, bool AllowOpenMPStandalone = false); enum AllowedConstructsKind { /// \brief Allow any declarations, statements, OpenMP directives. ACK_Any, /// \brief Allow only statements and non-standalone OpenMP directives. ACK_StatementsOpenMPNonStandalone, /// \brief Allow statements and all executable OpenMP directives ACK_StatementsOpenMPAnyExecutable }; StmtResult ParseStatementOrDeclaration(StmtVector &Stmts, AllowedConstructsKind Allowed, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, AllowedConstructsKind Allowed, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs); StmtResult ParseCaseStatement(bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, AllowedConstructsKind Allowed, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// \brief Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// \brief Parse the block; this code is always used. IEB_Parse, /// \brief Skip the block entirely; this code is never used. IEB_Skip, /// \brief Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// \brief Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// \brief The location of the initial keyword. SourceLocation KeywordLoc; /// \brief Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// \brief Nested-name-specifier preceding the name. CXXScopeSpec SS; /// \brief The name we're looking for. UnqualifiedId Name; /// \brief The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, AccessSpecifier& CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// \brief When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// \brief Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// \brief Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// \brief Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// \brief Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// \brief Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// \brief Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. Error ///< Can't be any of the above! }; /// \brief Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// \brief Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *HasMissingTypename = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// \brief Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &attrs, SourceLocation FixItLoc = SourceLocation()) { if (!attrs.Range.isValid()) return; DiagnoseProhibitedAttributes(attrs, FixItLoc); attrs.clear(); } void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// \brief Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// \brief Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// \brief Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// \brief Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// \brief Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// \brief Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, AttributeList::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); void ParseCMTypeSpecifiers(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); void ParseInnerNamespace(std::vector<SourceLocation> &IdentLoc, std::vector<IdentifierInfo *> &Ident, std::vector<SourceLocation> &NamespaceLoc, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; SourceLocation TemplateKWLoc; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = TemplateKWLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, AttributeList *Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// \brief Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// \brief Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// \brief Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// \brief Parses declarative or executable directive. /// /// \param Allowed ACK_Any, if any directives are allowed, /// ACK_StatementsOpenMPAnyExecutable - if any executable directives are /// allowed, ACK_StatementsOpenMPNonStandalone - if only non-standalone /// executable directives are allowed. /// StmtResult ParseOpenMPDeclarativeOrExecutableDirective(AllowedConstructsKind Allowed); /// \brief Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// \brief Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind); /// \brief Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind); /// \brief Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind); /// \brief Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind); /// \brief Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; CXXScopeSpec ReductionIdScopeSpec; DeclarationNameInfo ReductionId; OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; OpenMPLinearClauseKind LinKind = OMPC_LINEAR_val; OpenMPMapClauseKind MapTypeModifier = OMPC_MAP_unknown; OpenMPMapClauseKind MapType = OMPC_MAP_unknown; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation& TemplateKWLoc, UnqualifiedId &Result); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none, AttributeList *AccessAttrs = nullptr); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, AccessSpecifier AS, AttributeList *AccessAttrs); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, AccessSpecifier AS=AS_none, AttributeList *AccessAttrs = nullptr); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(bool IsClassName = false); bool IsTemplateArgumentList(unsigned Skip = 0); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
GB_binop__copysign_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__copysign_fp32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__copysign_fp32) // A.*B function (eWiseMult): GB (_AemultB_03__copysign_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__copysign_fp32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__copysign_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__copysign_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__copysign_fp32) // C=scalar+B GB (_bind1st__copysign_fp32) // C=scalar+B' GB (_bind1st_tran__copysign_fp32) // C=A+scalar GB (_bind2nd__copysign_fp32) // C=A'+scalar GB (_bind2nd_tran__copysign_fp32) // C type: float // A type: float // B,b type: float // BinaryOp: cij = copysignf (aij, bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = copysignf (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COPYSIGN || GxB_NO_FP32 || GxB_NO_COPYSIGN_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__copysign_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__copysign_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__copysign_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__copysign_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__copysign_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__copysign_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__copysign_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__copysign_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__copysign_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = copysignf (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__copysign_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = copysignf (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = copysignf (x, aij) ; \ } GrB_Info GB (_bind1st_tran__copysign_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = copysignf (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__copysign_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pi-v3.c
/* * Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x) * between 0 and 1. * * parallel version using OpenMP */ #include <stdio.h> #include <stdlib.h> #include <omp.h> /* OpenMP */ #if _DEBUG_ #define _DEBUG_ 1 #else #define _DEBUG_ 0 #endif int main(int argc, char *argv[]) { double x, sum=0.0, pi=0.0; #if !_DEBUG_ double start,end; #endif int i; const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n"; if (argc < 2) { fprintf(stderr, Usage); exit(1); } int num_steps = atoi(argv[1]); double step = 1.0/(double) num_steps; #if !_DEBUG_ start= omp_get_wtime(); #endif /* do computation -- using all available threads */ // WARNING : incorrect code #pragma omp parallel private(i, x) { int id = omp_get_thread_num(); int num_threads = omp_get_num_threads(); // interleaved execution of iterations among threads for (i=id; i < num_steps; i=i+num_threads) { x = (i+0.5)*step; sum += 4.0/(1.0+x*x); #if _DEBUG_ printf("thread id:%d it:%d\n",id,i); #endif } } pi = step * sum; #if !_DEBUG_ end = omp_get_wtime(); printf("Wall clock execution time = %.9f seconds\n", end-start); #endif /* print results */ printf("Value of pi = %12.10f\n", pi); return EXIT_SUCCESS; }
DRB029-truedep1-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This program has data races due to true dependence within the loop at 63. Data race pair: a[i+1]@64:5 vs. a[i]@64:12 */ #include <stdlib.h> #include <stdio.h> int main(int argc, char* argv[]) { int i; int len=100; int a[100]; for (i=0;i<len;i++) a[i]=i; #pragma omp parallel for for (i=0;i<len-1;i++) a[i+1]=a[i]+1; printf("a[50]=%d\n", a[50]); return 0; }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue, exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; register const Quantum *magick_restrict p, *magick_restrict pixels; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveThresholdImage) #endif proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically selects a threshold and replaces each % pixel in the image with a black pixel if the image intentsity is less than % the selected threshold otherwise white. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const double *histogram, ExceptionInfo *exception) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ (void) exception; start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(histogram,exception); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BilevelImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlackThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClampImage) #endif proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; register ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; ssize_t n; n=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[i]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OrderedDitherImage) #endif proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PerceptibleImage) #endif proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&threshold); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n g e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RangeThresholdImage() applies soft and hard thresholding. % % The format of the RangeThresholdImage method is: % % MagickBooleanType RangeThresholdImage(Image *image, % const double low_black,const double low_white,const double high_white, % const double high_black,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low_black: Define the minimum threshold value. % % o low_white: Define the maximum threshold value. % % o high_white: Define the minimum threshold value. % % o low_white: Define the maximum threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RangeThresholdImage(Image *image, const double low_black,const double low_white,const double high_white, const double high_black,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Range threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < low_black) q[i]=0; else if ((pixel >= low_black) && (pixel < low_white)) q[i]=ClampToQuantum(QuantumRange* PerceptibleReciprocal(low_white-low_black)*(pixel-low_black)); else if ((pixel >= low_white) && (pixel <= high_white)) q[i]=QuantumRange; else if ((pixel > high_white) && (pixel <= high_black)) q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal( high_black-high_white)*(high_black-pixel)); else if (pixel > high_black) q[i]=0; else q[i]=0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RangeThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WhiteThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
pngquant.c
/* pngquant.c - quantize the colors in an alphamap down to a specified number ** ** © 2009-2019 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997-2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ #define PNGQUANT_VERSION LIQ_VERSION_STRING " (January 2019)" #define PNGQUANT_USAGE "\ usage: pngquant [options] [ncolors] -- pngfile [pngfile ...]\n\ pngquant [options] [ncolors] - >stdout <stdin\n\n\ options:\n\ --force overwrite existing output files (synonym: -f)\n\ --skip-if-larger only save converted files if they're smaller than original\n\ --output file destination file path to use instead of --ext (synonym: -o)\n\ --ext new.png set custom suffix/extension for output filenames\n\ --quality min-max don't save below min, use fewer colors below max (0-100)\n\ --speed N speed/quality trade-off. 1=slow, 4=default, 11=fast & rough\n\ --nofs disable Floyd-Steinberg dithering\n\ --posterize N output lower-precision color (e.g. for ARGB4444 output)\n\ --strip remove optional metadata (default on Mac)\n\ --verbose print status messages (synonym: -v)\n\ \n\ Quantizes one or more 32-bit RGBA PNGs to 8-bit (or smaller) RGBA-palette.\n\ The output filename is the same as the input name except that\n\ it ends in \"-fs8.png\", \"-or8.png\" or your custom extension (unless the\n\ input is stdin, in which case the quantized image will go to stdout).\n\ If you pass the special output path \"-\" and a single input file, that file\n\ will be processed and the quantized image will go to stdout.\n\ The default behavior if the output file exists is to skip the conversion;\n\ use --force to overwrite. See man page for full list of options.\n" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdarg.h> #include <stdbool.h> #include <math.h> #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) # include <fcntl.h> /* O_BINARY */ # include <io.h> /* setmode() */ #else # include <unistd.h> #endif #ifdef _OPENMP #include <omp.h> #else #define omp_get_max_threads() 1 #define omp_get_thread_num() 0 #endif #include "rwpng.h" /* typedefs, common macros, public prototypes */ #include "libimagequant.h" /* if it fails here, run: git submodule update; ./configure; or add -Ilib to compiler flags */ #include "pngquant_opts.h" static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform tag, png8_image *output_image); static void set_palette(liq_result *result, png8_image *output_image); static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose); static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options); static char *add_filename_extension(const char *filename, const char *newext); static bool file_exists(const char *outname); static void verbose_printf(struct pngquant_options *context, const char *fmt, ...) { if (context->log_callback) { va_list va; va_start(va, fmt); int required_space = vsnprintf(NULL, 0, fmt, va)+1; // +\0 va_end(va); #if defined(_MSC_VER) char *buf = malloc(required_space); #else char buf[required_space]; #endif va_start(va, fmt); vsnprintf(buf, required_space, fmt, va); va_end(va); context->log_callback(context->liq, buf, context->log_callback_user_info); #if defined(_MSC_VER) free(buf); #endif } } static void log_callback(const liq_attr *attr, const char *msg, void* user_info) { fprintf(stderr, "%s\n", msg); } #ifdef _OPENMP #define LOG_BUFFER_SIZE 1300 struct buffered_log { int buf_used; char buf[LOG_BUFFER_SIZE]; }; static void log_callback_buferred_flush(const liq_attr *attr, void *context) { struct buffered_log *log = context; if (log->buf_used) { fwrite(log->buf, 1, log->buf_used, stderr); fflush(stderr); log->buf_used = 0; } } static void log_callback_buferred(const liq_attr *attr, const char *msg, void* context) { struct buffered_log *log = context; int len = strlen(msg); if (len > LOG_BUFFER_SIZE-2) len = LOG_BUFFER_SIZE-2; if (len > LOG_BUFFER_SIZE - log->buf_used - 2) log_callback_buferred_flush(attr, log); memcpy(&log->buf[log->buf_used], msg, len); log->buf_used += len+1; log->buf[log->buf_used-1] = '\n'; log->buf[log->buf_used] = '\0'; } #endif static void print_full_version(FILE *fd) { fprintf(fd, "pngquant, %s, by Kornel Lesinski, Greg Roelofs.\n" #ifndef NDEBUG " WARNING: this is a DEBUG (slow) version.\n" /* NDEBUG disables assert() */ #endif #if !USE_SSE && (defined(__SSE__) || defined(__amd64__) || defined(__X86_64__) || defined(__i386__)) " SSE acceleration disabled.\n" #endif #if _OPENMP " Compiled with OpenMP (multicore support).\n" #endif , PNGQUANT_VERSION); rwpng_version_info(fd); fputs("\n", fd); } static void print_usage(FILE *fd) { fputs(PNGQUANT_USAGE, fd); } /** * N = automatic quality, uses limit unless force is set (N-N or 0-N) * -N = no better than N (same as 0-N) * N-M = no worse than N, no better than M * N- = no worse than N, perfect if possible (same as N-100) * * where N,M are numbers between 0 (lousy) and 100 (perfect) */ static bool parse_quality(const char *quality, liq_attr *options, bool *min_quality_limit) { long limit, target; const char *str = quality; char *end; long t1 = strtol(str, &end, 10); if (str == end) return false; str = end; if ('\0' == end[0] && t1 < 0) { // quality="-%d" target = -t1; limit = 0; } else if ('\0' == end[0]) { // quality="%d" target = t1; limit = t1*9/10; } else if ('-' == end[0] && '\0' == end[1]) { // quality="%d-" target = 100; limit = t1; } else { // quality="%d-%d" long t2 = strtol(str, &end, 10); if (str == end || t2 > 0) return false; target = -t2; limit = t1; } *min_quality_limit = (limit > 0); return LIQ_OK == liq_set_quality(options, limit, target); } pngquant_error pngquant_main(struct pngquant_options *options); static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options); #ifndef PNGQUANT_NO_MAIN int main(int argc, char *argv[]) { struct pngquant_options options = { .floyd = 1.f, // floyd-steinberg dithering .strip = false, }; pngquant_error retval = pngquant_parse_options(argc, argv, &options); if (retval != SUCCESS) { return retval; } return pngquant_main(&options); } #endif pngquant_error pngquant_main(struct pngquant_options *options) { if (options->print_version) { puts(PNGQUANT_VERSION); return SUCCESS; } if (options->missing_arguments) { print_full_version(stderr); print_usage(stderr); return MISSING_ARGUMENT; } if (options->print_help) { print_full_version(stdout); print_usage(stdout); return SUCCESS; } options->liq = liq_attr_create(); if (!options->liq) { fputs("SSE-capable CPU is required for this build.\n", stderr); return WRONG_ARCHITECTURE; } if (options->verbose) { liq_set_log_callback(options->liq, log_callback, NULL); options->log_callback = log_callback; } if (options->quality && !parse_quality(options->quality, options->liq, &options->min_quality_limit)) { fputs("Quality should be in format min-max where min and max are numbers in range 0-100.\n", stderr); return INVALID_ARGUMENT; } if (options->iebug) { // opacities above 238 will be rounded up to 255, because IE6 truncates <255 to 0. liq_set_min_opacity(options->liq, 238); fputs(" warning: the workaround for IE6 is deprecated\n", stderr); } if (options->last_index_transparent) { liq_set_last_index_transparent(options->liq, true); } if (options->speed >= 10) { options->fast_compression = true; if (options->speed == 11) { options->floyd = 0; options->speed = 10; } } if (options->speed && LIQ_OK != liq_set_speed(options->liq, options->speed)) { fputs("Speed should be between 1 (slow) and 11 (fast).\n", stderr); return INVALID_ARGUMENT; } if (options->colors && LIQ_OK != liq_set_max_colors(options->liq, options->colors)) { fputs("Number of colors must be between 2 and 256.\n", stderr); return INVALID_ARGUMENT; } if (options->posterize && LIQ_OK != liq_set_min_posterization(options->liq, options->posterize)) { fputs("Posterization should be number of bits in range 0-4.\n", stderr); return INVALID_ARGUMENT; } if (options->extension && options->output_file_path) { fputs("--ext and --output options can't be used at the same time\n", stderr); return INVALID_ARGUMENT; } // new filename extension depends on options used. Typically basename-fs8.png if (options->extension == NULL) { options->extension = options->floyd > 0 ? "-fs8.png" : "-or8.png"; } if (options->output_file_path && options->num_files != 1) { fputs(" error: Only one input file is allowed when --output is used. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (options->using_stdout && !options->using_stdin && options->num_files != 1) { fputs(" error: Only one input file is allowed when using the special output path \"-\" to write to stdout. This error also happens when filenames with spaces are not in quotes.\n", stderr); return INVALID_ARGUMENT; } if (options->map_file) { png24_image tmp = {.width=0}; if (SUCCESS != read_image(options->liq, options->map_file, false, &tmp, &options->fixed_palette_image, true, true, false)) { fprintf(stderr, " error: unable to load %s", options->map_file); return INVALID_ARGUMENT; } liq_result *tmp_quantize = liq_quantize_image(options->liq, options->fixed_palette_image); const liq_palette *pal = liq_get_palette(tmp_quantize); if (!pal) { fprintf(stderr, " error: unable to read colors from %s", options->map_file); return INVALID_ARGUMENT; } for(unsigned int i=0; i < pal->count; i++) { liq_image_add_fixed_color(options->fixed_palette_image, pal->entries[i]); } liq_result_destroy(tmp_quantize); } if (!options->num_files && !options->using_stdin) { fputs("No input files specified.\n", stderr); if (options->verbose) { print_full_version(stderr); } print_usage(stderr); return MISSING_ARGUMENT; } #ifdef _OPENMP // if there's a lot of files, coarse parallelism can be used if (options->num_files > 2*omp_get_max_threads()) { omp_set_nested(0); omp_set_dynamic(1); } else { omp_set_nested(1); } #endif unsigned int error_count=0, skipped_count=0, file_count=0; pngquant_error latest_error=SUCCESS; #pragma omp parallel for \ schedule(static, 1) reduction(+:skipped_count) reduction(+:error_count) reduction(+:file_count) shared(latest_error) for(int i=0; i < options->num_files; i++) { const char *filename = options->using_stdin ? "stdin" : options->files[i]; struct pngquant_options opts = *options; opts.liq = liq_attr_copy(options->liq); #ifdef _OPENMP struct buffered_log buf = {0}; if (opts.log_callback && omp_get_num_threads() > 1 && opts.num_files > 1) { liq_set_log_callback(opts.liq, log_callback_buferred, &buf); liq_set_log_flush_callback(opts.liq, log_callback_buferred_flush, &buf); opts.log_callback = log_callback_buferred; opts.log_callback_user_info = &buf; } #endif pngquant_error retval = SUCCESS; const char *outname = opts.output_file_path; char *outname_free = NULL; if (!opts.using_stdout) { if (!outname) { outname = outname_free = add_filename_extension(filename, opts.extension); } if (!opts.force && file_exists(outname)) { fprintf(stderr, " error: '%s' exists; not overwriting\n", outname); retval = NOT_OVERWRITING_ERROR; } } if (SUCCESS == retval) { retval = pngquant_file_internal(filename, outname, &opts); } free(outname_free); liq_attr_destroy(opts.liq); if (retval) { #pragma omp critical { latest_error = retval; } if (retval == TOO_LOW_QUALITY || retval == TOO_LARGE_FILE) { skipped_count++; } else { error_count++; } } ++file_count; } if (error_count) { verbose_printf(options, "There were errors quantizing %d file%s out of a total of %d file%s.", error_count, (error_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (skipped_count) { verbose_printf(options, "Skipped %d file%s out of a total of %d file%s.", skipped_count, (skipped_count == 1)? "" : "s", file_count, (file_count == 1)? "" : "s"); } if (!skipped_count && !error_count) { verbose_printf(options, "Quantized %d image%s.", file_count, (file_count == 1)? "" : "s"); } if (options->fixed_palette_image) liq_image_destroy(options->fixed_palette_image); liq_attr_destroy(options->liq); return latest_error; } /// Don't hack this. Instead use https://github.com/ImageOptim/libimagequant/blob/f54d2f1a3e1cf728e17326f4db0d45811c63f063/example.c static pngquant_error pngquant_file_internal(const char *filename, const char *outname, struct pngquant_options *options) { pngquant_error retval = SUCCESS; verbose_printf(options, "%s:", filename); liq_image *input_image = NULL; png24_image input_image_rwpng = {.width=0}; bool keep_input_pixels = options->skip_if_larger || (options->using_stdout && options->min_quality_limit); // original may need to be output to stdout if (SUCCESS == retval) { retval = read_image(options->liq, filename, options->using_stdin, &input_image_rwpng, &input_image, keep_input_pixels, options->strip, options->verbose); } int quality_percent = 90; // quality on 0-100 scale, updated upon successful remap png8_image output_image = {.width=0}; if (SUCCESS == retval) { verbose_printf(options, " read %luKB file", (input_image_rwpng.file_size+1023UL)/1024UL); if (RWPNG_ICCP == input_image_rwpng.input_color) { verbose_printf(options, " used embedded ICC profile to transform image to sRGB colorspace"); } else if (RWPNG_GAMA_CHRM == input_image_rwpng.input_color) { verbose_printf(options, " used gAMA and cHRM chunks to transform image to sRGB colorspace"); } else if (RWPNG_ICCP_WARN_GRAY == input_image_rwpng.input_color) { verbose_printf(options, " warning: ignored ICC profile in GRAY colorspace"); } else if (RWPNG_COCOA == input_image_rwpng.input_color) { // No comment } else if (RWPNG_SRGB == input_image_rwpng.input_color) { verbose_printf(options, " passing sRGB tag from the input"); } else if (input_image_rwpng.gamma != 0.45455) { verbose_printf(options, " converted image from gamma %2.1f to gamma 2.2", 1.0/input_image_rwpng.gamma); } // when using image as source of a fixed palette the palette is extracted using regular quantization liq_result *remap; liq_error remap_error = liq_image_quantize(options->fixed_palette_image ? options->fixed_palette_image : input_image, options->liq, &remap); if (LIQ_OK == remap_error) { // fixed gamma ~2.2 for the web. PNG can't store exact 1/2.2 // NB: can't change gamma here, because output_color is allowed to be an sRGB tag liq_set_output_gamma(remap, 0.45455); liq_set_dithering_level(remap, options->floyd); retval = prepare_output_image(remap, input_image, input_image_rwpng.output_color, &output_image); if (SUCCESS == retval) { if (LIQ_OK != liq_write_remapped_image_rows(remap, input_image, output_image.row_pointers)) { retval = OUT_OF_MEMORY_ERROR; } set_palette(remap, &output_image); double palette_error = liq_get_quantization_error(remap); if (palette_error >= 0) { quality_percent = liq_get_quantization_quality(remap); verbose_printf(options, " mapped image to new colors...MSE=%.3f (Q=%d)", palette_error, quality_percent); } } liq_result_destroy(remap); } else if (LIQ_QUALITY_TOO_LOW == remap_error) { retval = TOO_LOW_QUALITY; } else { retval = INVALID_ARGUMENT; // dunno } } if (SUCCESS == retval) { if (options->skip_if_larger) { // this is very rough approximation, but generally avoid losing more quality than is gained in file size. // Quality is raised to 1.5, because even greater savings are needed to justify big quality loss. // but >50% savings are considered always worthwile in order to allow low quality conversions to work at all const double quality = quality_percent/100.0; const double expected_reduced_size = pow(quality, 1.5); output_image.maximum_file_size = (input_image_rwpng.file_size-1) * (expected_reduced_size < 0.5 ? 0.5 : expected_reduced_size); } output_image.fast_compression = options->fast_compression; output_image.chunks = input_image_rwpng.chunks; input_image_rwpng.chunks = NULL; retval = write_image(&output_image, NULL, outname, options); if (TOO_LARGE_FILE == retval) { verbose_printf(options, " file exceeded expected size of %luKB", (unsigned long)output_image.maximum_file_size/1024UL); } if (SUCCESS == retval && output_image.metadata_size > 0) { verbose_printf(options, " copied %dKB of additional PNG metadata", (int)(output_image.metadata_size+999)/1000); } } if (options->using_stdout && keep_input_pixels && (TOO_LARGE_FILE == retval || TOO_LOW_QUALITY == retval)) { // when outputting to stdout it'd be nasty to create 0-byte file // so if quality is too low, output 24-bit original pngquant_error write_retval = write_image(NULL, &input_image_rwpng, outname, options); if (write_retval) { retval = write_retval; } } if (input_image) liq_image_destroy(input_image); rwpng_free_image24(&input_image_rwpng); rwpng_free_image8(&output_image); return retval; } static void set_palette(liq_result *result, png8_image *output_image) { const liq_palette *palette = liq_get_palette(result); output_image->num_palette = palette->count; for(unsigned int i=0; i < palette->count; i++) { const liq_color px = palette->entries[i]; output_image->palette[i] = (rwpng_rgba){.r=px.r, .g=px.g, .b=px.b, .a=px.a}; } } static bool file_exists(const char *outname) { FILE *outfile = fopen(outname, "rb"); if ((outfile ) != NULL) { fclose(outfile); return true; } return false; } /* build the output filename from the input name by inserting "-fs8" or * "-or8" before the ".png" extension (or by appending that plus ".png" if * there isn't any extension), then make sure it doesn't exist already */ static char *add_filename_extension(const char *filename, const char *newext) { size_t x = strlen(filename); char* outname = malloc(x+4+strlen(newext)+1); if (!outname) return NULL; strncpy(outname, filename, x); if (strncmp(outname+x-4, ".png", 4) == 0 || strncmp(outname+x-4, ".PNG", 4) == 0) { strcpy(outname+x-4, newext); } else { strcpy(outname+x, newext); } return outname; } static char *temp_filename(const char *basename) { size_t x = strlen(basename); char *outname = malloc(x+1+4); if (!outname) return NULL; strcpy(outname, basename); strcpy(outname+x, ".tmp"); return outname; } static void set_binary_mode(FILE *fp) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) setmode(fp == stdout ? 1 : 0, O_BINARY); #endif } static const char *filename_part(const char *path) { const char *outfilename = strrchr(path, '/'); if (outfilename) { return outfilename+1; } else { return path; } } static bool replace_file(const char *from, const char *to, const bool force) { #if defined(_WIN32) || defined(WIN32) || defined(__WIN32__) if (force) { // On Windows rename doesn't replace unlink(to); } #endif return (0 == rename(from, to)); } static pngquant_error write_image(png8_image *output_image, png24_image *output_image24, const char *outname, struct pngquant_options *options) { FILE *outfile; char *tempname = NULL; if (options->using_stdout) { set_binary_mode(stdout); outfile = stdout; if (output_image) { verbose_printf(options, " writing %d-color image to stdout", output_image->num_palette); } else { verbose_printf(options, " writing truecolor image to stdout"); } } else { tempname = temp_filename(outname); if (!tempname) return OUT_OF_MEMORY_ERROR; if ((outfile = fopen(tempname, "wb")) == NULL) { fprintf(stderr, " error: cannot open '%s' for writing\n", tempname); free(tempname); return CANT_WRITE_ERROR; } if (output_image) { verbose_printf(options, " writing %d-color image as %s", output_image->num_palette, filename_part(outname)); } else { verbose_printf(options, " writing truecolor image as %s", filename_part(outname)); } } pngquant_error retval; #pragma omp critical (libpng) { if (output_image) { retval = rwpng_write_image8(outfile, output_image); } else { retval = rwpng_write_image24(outfile, output_image24); } } if (!options->using_stdout) { fclose(outfile); if (SUCCESS == retval) { // Image has been written to a temporary file and then moved over destination. // This makes replacement atomic and avoids damaging destination file on write error. if (!replace_file(tempname, outname, options->force)) { retval = CANT_WRITE_ERROR; } } if (retval) { unlink(tempname); } } free(tempname); if (retval && retval != TOO_LARGE_FILE) { fprintf(stderr, " error: failed writing image to %s (%d)\n", options->using_stdout ? "stdout" : outname, retval); } return retval; } static pngquant_error read_image(liq_attr *options, const char *filename, int using_stdin, png24_image *input_image_p, liq_image **liq_image_p, bool keep_input_pixels, bool strip, bool verbose) { FILE *infile; if (using_stdin) { set_binary_mode(stdin); infile = stdin; } else if ((infile = fopen(filename, "rb")) == NULL) { fprintf(stderr, " error: cannot open %s for reading\n", filename); return READ_ERROR; } pngquant_error retval; #pragma omp critical (libpng) { retval = rwpng_read_image24(infile, input_image_p, strip, verbose); } if (!using_stdin) { fclose(infile); } if (retval) { fprintf(stderr, " error: cannot decode image %s\n", using_stdin ? "from stdin" : filename_part(filename)); return retval; } *liq_image_p = liq_image_create_rgba_rows(options, (void**)input_image_p->row_pointers, input_image_p->width, input_image_p->height, input_image_p->gamma); if (!*liq_image_p) { return OUT_OF_MEMORY_ERROR; } if (!keep_input_pixels) { if (LIQ_OK != liq_image_set_memory_ownership(*liq_image_p, LIQ_OWN_ROWS | LIQ_OWN_PIXELS)) { return OUT_OF_MEMORY_ERROR; } input_image_p->row_pointers = NULL; input_image_p->rgba_data = NULL; } return SUCCESS; } static pngquant_error prepare_output_image(liq_result *result, liq_image *input_image, rwpng_color_transform output_color, png8_image *output_image) { output_image->width = liq_image_get_width(input_image); output_image->height = liq_image_get_height(input_image); output_image->gamma = liq_get_output_gamma(result); output_image->output_color = output_color; /* ** Step 3.7 [GRR]: allocate memory for the entire indexed image */ output_image->indexed_data = malloc(output_image->height * output_image->width); output_image->row_pointers = malloc(output_image->height * sizeof(output_image->row_pointers[0])); if (!output_image->indexed_data || !output_image->row_pointers) { return OUT_OF_MEMORY_ERROR; } for(size_t row = 0; row < output_image->height; row++) { output_image->row_pointers[row] = output_image->indexed_data + row * output_image->width; } const liq_palette *palette = liq_get_palette(result); // tRNS, etc. output_image->num_palette = palette->count; return SUCCESS; }
select_ci.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Select CI */ #include <stdlib.h> #include <stdint.h> #include <string.h> #include <math.h> #include "config.h" #include <assert.h> #include "vhf/fblas.h" #include "np_helper/np_helper.h" #include "fci.h" #define BUFBASE 112 #define STRB_BLKSIZE 224 int SCIstr2addr(uint64_t str, uint64_t *strsbook, int nstrs) { int head = 0; int tail = nstrs; int mid; int addr = -1; while (head < tail) { mid = (head + tail) / 2; if (str == strsbook[mid]) { addr = mid; break; } else if (str < strsbook[mid]) { tail = mid; } else { head = mid + 1; } } return addr; } static void make_occ_vir(int *occ, int *vir, uint64_t str1, int norb) { int i, io, iv; for (i = 0, io = 0, iv = 0; i < norb; i++) { if (str1 & (1ULL<<i)) { occ[io] = i; io += 1; } else { vir[iv] = i; iv += 1; } } } void SCIcre_des_linkstr(int *link_index, int norb, int nstrs, int nocc, uint64_t *strs, int store_trilidx) { int ninter = nstrs; int occ[norb]; int vir[norb]; int nvir = norb - nocc; int nlink = nocc * nvir + nocc; int str_id, i, a, k, ai, addr; uint64_t str0, str1; int *tab; for (str_id = 0; str_id < ninter; str_id++) { str1 = strs[str_id]; make_occ_vir(occ, vir, str1, norb); tab = link_index + str_id * nlink * 4; if (store_trilidx) { for (k = 0; k < nocc; k++) { tab[k*4+0] = occ[k]*(occ[k]+1)/2+occ[k]; tab[k*4+2] = str_id; tab[k*4+3] = 1; } for (a = 0; a < nvir; a++) { for (i = 0; i < nocc; i++) { str0 = (str1^(1ULL<<occ[i])) | (1ULL<<vir[a]); addr = SCIstr2addr(str0, strs, nstrs); if (addr >= 0) { if (vir[a] > occ[i]) { ai = vir[a]*(vir[a]+1)/2+occ[i]; } else { ai = occ[i]*(occ[i]+1)/2+vir[a]; } tab[k*4+0] = ai; tab[k*4+2] = addr; tab[k*4+3] = FCIcre_des_sign(vir[a], occ[i], str1); k++; } } } } else { for (k = 0; k < nocc; k++) { tab[k*4+0] = occ[k]; tab[k*4+1] = occ[k]; tab[k*4+2] = str_id; tab[k*4+3] = 1; } for (a = 0; a < nvir; a++) { for (i = 0; i < nocc; i++) { str0 = (str1^(1ULL<<occ[i])) | (1ULL<<vir[a]); addr = SCIstr2addr(str0, strs, nstrs); if (addr >= 0) { tab[k*4+0] = vir[a]; tab[k*4+1] = occ[i]; tab[k*4+2] = addr; tab[k*4+3] = FCIcre_des_sign(vir[a], occ[i], str1); k++; } } } } } } void SCIdes_des_linkstr(int *link_index, int norb, int nocc, int nstrs, int ninter, uint64_t *strs, uint64_t *inter, int store_trilidx) { int occ[norb]; int vir[norb]; int str_id, i, j, k, addr; uint64_t str0, str1; int sign; int nvir = norb - nocc + 2; int nlink = nvir * nvir; int *tab; for (str_id = 0; str_id < ninter; str_id++) { str1 = inter[str_id]; make_occ_vir(occ, vir, str1, norb); tab = link_index + str_id * nlink * 4; if (store_trilidx) { for (k = 0, i = 1; i < nvir; i++) { for (j = 0; j < i; j++) { str0 = str1 | (1ULL<<vir[i]) | (1ULL<<vir[j]); addr = SCIstr2addr(str0, strs, nstrs); if (addr >= 0) { sign = FCIcre_sign(vir[i], str1); sign*= FCIdes_sign(vir[j], str0); tab[k*4+0] = vir[i]*(vir[i]-1)/2+vir[j];; tab[k*4+2] = addr; tab[k*4+3] = sign; k++; } } } } else { for (k = 0, i = 1; i < nvir; i++) { for (j = 0; j < i; j++) { str0 = str1 | (1ULL<<vir[i]) | (1ULL<<vir[j]); addr = SCIstr2addr(str0, strs, nstrs); if (addr >= 0) { sign = FCIcre_sign(vir[i], str1); sign*= FCIdes_sign(vir[j], str0); tab[k*4+0] = vir[i]; tab[k*4+1] = vir[j]; tab[k*4+2] = addr; tab[k*4+3] = sign; k++; tab[k*4+0] = vir[j]; tab[k*4+1] = vir[i]; tab[k*4+2] = addr; tab[k*4+3] =-sign; k++; } } } } } } int SCIdes_uniq_strs(uint64_t *uniq_strs, uint64_t *strs, int norb, int nocc, int nstrs) { int str_id, i; uint64_t str0, str1; int ninter = 0; for (str_id = 0; str_id < nstrs; str_id++) { str0 = strs[str_id]; for (i = 0; i < norb; i++) { if (str0 & (1ULL<<i)) { str1 = str0 ^ (1ULL<<i); uniq_strs[ninter] = str1; ninter++; } } } return ninter; } void SCIdes_linkstr(int *link_index, int norb, int nocc, int nstrs, int ninter, uint64_t *strs, uint64_t *inter) { int str_id, i, k, addr; uint64_t str0, str1; int nvir = norb - nocc + 1; int nlink = nvir; int *tab; for (str_id = 0; str_id < ninter; str_id++) { str1 = inter[str_id]; tab = link_index + str_id * nlink * 4; for (k = 0, i = 0; i < norb; i++) { if (!(str1 & (1ULL<<i))) { str0 = str1 | (1ULL<<i); addr = SCIstr2addr(str0, strs, nstrs); if (addr >= 0) { tab[k*4+0] = 0; tab[k*4+1] = i; tab[k*4+2] = addr; tab[k*4+3] = FCIdes_sign(i, str0); k++; } } } } } int SCIcre_uniq_strs(uint64_t *uniq_strs, uint64_t *strs, int norb, int nocc, int nstrs) { int str_id, i; uint64_t str0, str1; int ninter = 0; for (str_id = 0; str_id < nstrs; str_id++) { str0 = strs[str_id]; for (i = 0; i < norb; i++) { if (!(str0 & (1ULL<<i))) { str1 = str0 | (1ULL<<i); uniq_strs[ninter] = str1; ninter++; } } } return ninter; } void SCIcre_linkstr(int *link_index, int norb, int nocc, int nstrs, int ninter, uint64_t *strs, uint64_t *inter) { int str_id, i, k, addr; uint64_t str0, str1; int nlink = nocc + 1; int *tab; for (str_id = 0; str_id < ninter; str_id++) { str1 = inter[str_id]; tab = link_index + str_id * nlink * 4; for (k = 0, i = 0; i < norb; i++) { if (str1 & (1ULL<<i)) { str0 = str1 ^ (1ULL<<i); addr = SCIstr2addr(str0, strs, nstrs); if (addr >= 0) { tab[k*4+0] = i; tab[k*4+1] = 0; tab[k*4+2] = addr; tab[k*4+3] = FCIcre_sign(i, str0); k++; } } } } } int SCIselect_strs(uint64_t *inter, uint64_t *strs, double *eri, double *eri_pq_max, double *civec_max, double select_cutoff, int norb, int nocc, int nstrs) { int nn = norb * norb; int n3 = norb * nn; int occ[norb]; int vir[norb]; int nvir = norb - nocc; int str_id, i, a, j, b; uint64_t str0, str1; double ca; double *peri; int ninter = 0; for (str_id = 0; str_id < nstrs; str_id++) { str0 = strs[str_id]; make_occ_vir(occ, vir, str0, norb); ca = civec_max[str_id]; for (i = 0; i < nocc; i++) { for (a = 0; a < nvir; a++) { if (eri_pq_max[vir[a]*norb+occ[i]]*ca > select_cutoff) { str1 = (str0 ^ (1ULL<<occ[i])) | (1ULL<<vir[a]); inter[ninter] = str1; ninter++; if (occ[i] < nocc && vir[a] >= nocc) { peri = eri + n3 * vir[a] + nn * occ[i]; for (j = 0; j < i; j++) { for (b = a+1; b < nvir; b++) { if (fabs(peri[vir[b]*norb+occ[j]])*ca > select_cutoff) { inter[ninter] = (str1 ^ (1ULL<<occ[j])) | (1ULL<<vir[b]); ninter++; } } } } } } } } return ninter; } /* *********************************************************** * * Need the permutation symmetry * h2e[i,j,k,l] = h2e[j,i,k,l] = h2e[i,j,l,k] = h2e[j,i,l,k] * *********************************************************** */ static void ctr_bbaa_kern(double *eri, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * (norb+1) / 2; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; memset(t1, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount); FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); } void SCIcontract_2e_bbaa(double *eri, double *ci0, double *ci1, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); FCIcompress_link_tril(clinka, link_indexa, na, nlinka); FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb); #pragma omp parallel { int strk, ib, blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*(norb+1)+2)); double *ci1buf = NULL; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); #pragma omp for schedule(static) for (strk = 0; strk < na; strk++) { ctr_bbaa_kern(eri, ci0, ci1, ci1buf, t1buf, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } } free(t1buf); } free(clinka); free(clinkb); } static void ctr_aaaa_kern(double *eri, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * (norb-1) / 2; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; memset(t1, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); dgemm_(&TRANS_N, &TRANS_N, &bcount, &nnorb, &nnorb, &D1, t1, &bcount, eri, &nnorb, &D0, vt1, &bcount); FCIspread_a_t1(ci1buf, vt1, bcount, stra_id, 0, norb, bcount, nlinka, clink_indexa); } void SCIcontract_2e_aaaa(double *eri, double *ci0, double *ci1, int norb, int na, int nb, int inter_na, int nlinka, int *link_indexa) { _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * inter_na); FCIcompress_link_tril(clinka, link_indexa, inter_na, nlinka); _LinkTrilT *clinkb = NULL; double *ci1bufs[MAX_THREADS]; #pragma omp parallel { int strk, ib, blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*norb+2)); double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2)); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < inter_na; strk++) { ctr_aaaa_kern(eri, ci0, ci1, ci1buf, t1buf, blen, strk, ib, norb, na, nb, nlinka, 0, clinka, clinkb); } NPomp_dsum_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen); #pragma omp barrier } free(ci1buf); free(t1buf); } free(clinka); } /************************************************* * * 2-particle DM * *************************************************/ void SCIrdm2_a_t1ci(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int nstrb, int nlinka, _LinkT *clink_indexa) { ci0 += strb_id; int i, j, k, a, sign; size_t str1; const _LinkT *tab = clink_indexa + stra_id * nlinka; double *pt1, *pci; for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pci = ci0 + str1*nstrb; pt1 = t1 + (i*norb+a) * bcount; if (sign == 0) { break; } else if (sign > 0) { for (k = 0; k < bcount; k++) { pt1[k] += pci[k]; } } else { for (k = 0; k < bcount; k++) { pt1[k] -= pci[k]; } } } } void SCIrdm2kern_aaaa(double *rdm2, double *bra, double *ket, double *buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, _LinkT *clink_indexa) { const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; const double DN1 = -1; const int nnorb = norb * norb; memset(buf, 0, sizeof(double)*nnorb*bcount); SCIrdm2_a_t1ci(ket, buf, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); dgemm_(&TRANS_T, &TRANS_N, &nnorb, &nnorb, &bcount, &DN1, buf, &bcount, buf, &bcount, &D1, rdm2, &nnorb); } void SCIrdm2_aaaa(void (*dm2kernel)(), double *rdm2, double *bra, double *ket, int norb, int na, int nb, int inter_na, int nlinka, int *link_indexa) { const int nnorb = norb * norb; double *pdm2; memset(rdm2, 0, sizeof(double) * nnorb*nnorb); _LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * inter_na); FCIcompress_link(clinka, link_indexa, norb, inter_na, nlinka); #pragma omp parallel private(pdm2) { int strk, i, ib, blen; double *buf = malloc(sizeof(double) * (nnorb*BUFBASE*2+2)); pdm2 = calloc(nnorb*nnorb, sizeof(double)); #pragma omp for schedule(dynamic, 40) for (strk = 0; strk < inter_na; strk++) { for (ib = 0; ib < nb; ib += BUFBASE) { blen = MIN(BUFBASE, nb-ib); (*dm2kernel)(pdm2, bra, ket, buf, blen, strk, ib, norb, na, nb, nlinka, clinka); } } #pragma omp critical { for (i = 0; i < nnorb*nnorb; i++) { rdm2[i] += pdm2[i]; } } free(pdm2); free(buf); } free(clinka); int shape[] = {norb, nnorb, norb}; pdm2 = malloc(sizeof(double) * nnorb*nnorb); NPdtranspose_021(shape, rdm2, pdm2); memcpy(rdm2, pdm2, sizeof(double) * nnorb*nnorb); free(pdm2); } /*********************************************************************** * * With symmetry * ***********************************************************************/ static void ctr_bbaa_symm(double *eri, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb, int *dimirrep, int totirrep) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * (norb+1) / 2; int ir, p0; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; memset(t1, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); for (ir = 0, p0 = 0; ir < totirrep; ir++) { dgemm_(&TRANS_N, &TRANS_N, &bcount, dimirrep+ir, dimirrep+ir, &D1, t1+p0*bcount, &bcount, eri+p0*nnorb+p0, &nnorb, &D0, vt1+p0*bcount, &bcount); p0 += dimirrep[ir]; } FCIspread_b_t1(ci1, vt1, bcount, stra_id, strb_id, norb, nb, nlinkb, clink_indexb); } void SCIcontract_2e_bbaa_symm(double *eri, double *ci0, double *ci1, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb, int *dimirrep, int totirrep) { _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * na); _LinkTrilT *clinkb = malloc(sizeof(_LinkTrilT) * nlinkb * nb); FCIcompress_link_tril(clinka, link_indexa, na, nlinka); FCIcompress_link_tril(clinkb, link_indexb, nb, nlinkb); #pragma omp parallel { int strk, ib, blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*(norb+1)+2)); double *ci1buf = NULL; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); #pragma omp for schedule(static) for (strk = 0; strk < na; strk++) { ctr_bbaa_symm(eri, ci0, ci1, ci1buf, t1buf, blen, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb, dimirrep, totirrep); } } free(t1buf); } free(clinka); free(clinkb); } static void ctr_aaaa_symm(double *eri, double *ci0, double *ci1, double *ci1buf, double *t1buf, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkTrilT *clink_indexa, _LinkTrilT *clink_indexb, int *dimirrep, int totirrep) { const char TRANS_N = 'N'; const double D0 = 0; const double D1 = 1; const int nnorb = norb * (norb-1) / 2; int ir, p0; double *t1 = t1buf; double *vt1 = t1buf + nnorb*bcount; memset(t1, 0, sizeof(double)*nnorb*bcount); FCIprog_a_t1(ci0, t1, bcount, stra_id, strb_id, norb, nb, nlinka, clink_indexa); for (ir = 0, p0 = 0; ir < totirrep; ir++) { dgemm_(&TRANS_N, &TRANS_N, &bcount, dimirrep+ir, dimirrep+ir, &D1, t1+p0*bcount, &bcount, eri+p0*nnorb+p0, &nnorb, &D0, vt1+p0*bcount, &bcount); p0 += dimirrep[ir]; } FCIspread_a_t1(ci1buf, vt1, bcount, stra_id, 0, norb, bcount, nlinka, clink_indexa); } void SCIcontract_2e_aaaa_symm(double *eri, double *ci0, double *ci1, int norb, int na, int nb, int inter_na, int nlinka, int *link_indexa, int *dimirrep, int totirrep) { _LinkTrilT *clinka = malloc(sizeof(_LinkTrilT) * nlinka * inter_na); FCIcompress_link_tril(clinka, link_indexa, inter_na, nlinka); _LinkTrilT *clinkb = NULL; double *ci1bufs[MAX_THREADS]; #pragma omp parallel { int strk, ib, blen; double *t1buf = malloc(sizeof(double) * (STRB_BLKSIZE*norb*norb+2)); double *ci1buf = malloc(sizeof(double) * (na*STRB_BLKSIZE+2)); ci1bufs[omp_get_thread_num()] = ci1buf; for (ib = 0; ib < nb; ib += STRB_BLKSIZE) { blen = MIN(STRB_BLKSIZE, nb-ib); memset(ci1buf, 0, sizeof(double) * na*blen); #pragma omp for schedule(static) for (strk = 0; strk < inter_na; strk++) { ctr_aaaa_symm(eri, ci0, ci1, ci1buf, t1buf, blen, strk, ib, norb, na, nb, nlinka, 0, clinka, clinkb, dimirrep, totirrep); } NPomp_dsum_reduce_inplace(ci1bufs, blen*na); #pragma omp master FCIaxpy2d(ci1+ib, ci1buf, na, nb, blen); #pragma omp barrier } free(ci1buf); free(t1buf); } free(clinka); }
remarks_parallel_in_target_state_machine.c
// RUN: %clang_cc1 -verify=host -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -verify -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // RUN: %clang_cc1 -fexperimental-new-pass-manager -verify -Rpass=openmp-opt -Rpass-analysis=openmp-opt -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // host-no-diagnostics void baz(void) __attribute__((assume("omp_no_openmp"))); void bar(void) { #pragma omp parallel // #1 \ // expected-remark@#1 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} \ // expected-remark@#1 {{Parallel region is used in unknown ways; will not attempt to rewrite the state machine.}} { } } void foo(void) { #pragma omp target teams // #2 \ // expected-remark@#2 {{Generic-mode kernel is executed with a customized state machine [3 known parallel regions] (good).}} // expected-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}} \ // expected-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__2_wrapper, kernel ID: __omp_offloading}} { baz(); // expected-remark {{Kernel will be executed in generic-mode due to this potential side-effect, consider to add `__attribute__((assume("ompx_spmd_amenable")))` to the called function '_Z3bazv'.}} #pragma omp parallel // #3 \ // expected-remark@#3 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} \ // expected-remark@#3 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}} { } bar(); #pragma omp parallel // #4 \ // expected-remark@#4 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nested inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} \ // expected-remark@#4 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__2_wrapper, kernel ID: __omp_offloading}} { } } } void spmd(void) { // Verify we do not emit the remarks above for "SPMD" regions. #pragma omp target teams #pragma omp parallel { } #pragma omp target teams distribute parallel for for (int i = 0; i < 100; ++i) { } } // expected-remark@* {{OpenMP runtime call __kmpc_global_thread_num moved to beginning of OpenMP region}} // expected-remark@* {{OpenMP runtime call __kmpc_global_thread_num deduplicated}}
zunmlq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> s d c * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_unmlq * * Overwrites the general complex m-by-n matrix C with * * side = PlasmaLeft side = PlasmaRight * trans = PlasmaNoTrans Q * C C * Q * trans = Plasma_ConjTrans Q^H * C C * Q^H * * where Q is an orthogonal (or unitary) matrix defined as the product of k * elementary reflectors * * Q = H(1) H(2) . . . H(k) * * as returned by plasma_zgelqf. Q is of order m if side = PlasmaLeft * and of order n if side = PlasmaRight. * ******************************************************************************* * * @param[in] side * Intended usage: * - PlasmaLeft: apply Q or Q^H from the left; * - PlasmaRight: apply Q or Q^H from the right. * * @param[in] trans * Intended usage: * - PlasmaNoTrans: apply Q; * - Plasma_ConjTrans: apply Q^H. * * @param[in] m * The number of rows of the matrix C. m >= 0. * * @param[in] n * The number of columns of the matrix C. n >= 0. * * @param[in] k * The number of rows of elementary tile reflectors whose product * defines the matrix Q. * If side == PlasmaLeft, m >= k >= 0. * If side == PlasmaRight, n >= k >= 0. * * @param[in] pA * Details of the LQ factorization of the original matrix A as returned * by plasma_zgelqf. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,k). * * @param[in] T * Auxiliary factorization data, computed by plasma_zgelqf. * * @param[in,out] pC * On entry, pointer to the m-by-n matrix C. * On exit, C is overwritten by Q*C, Q^H*C, C*Q, or C*Q^H. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_zunmlq * @sa plasma_cunmlq * @sa plasma_dormlq * @sa plasma_sormlq * @sa plasma_zgelqf * ******************************************************************************/ int plasma_zunmlq(plasma_enum_t side, plasma_enum_t trans, int m, int n, int k, plasma_complex64_t *pA, int lda, plasma_desc_t T, plasma_complex64_t *pC, int ldc) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_error("illegal value of side"); return -1; } if ((trans != Plasma_ConjTrans) && (trans != PlasmaNoTrans)) { plasma_error("illegal value of trans"); return -2; } if (m < 0) { plasma_error("illegal value of m"); return -3; } if (n < 0) { plasma_error("illegal value of n"); return -4; } int an; if (side == PlasmaLeft) { an = m; } else { an = n; } if ((k < 0) || (k > an)) { plasma_error("illegal value of k"); return -5; } if (lda < imax(1, k)) { plasma_error("illegal value of lda"); return -7; } if (ldc < imax(1, m)) { plasma_error("illegal value of ldc"); return -10; } // quick return if (m == 0 || n == 0 || k == 0) return PlasmaSuccess; // Set tiling parameters. int ib = plasma->ib; int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t C; int retval; retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, k, an, 0, 0, k, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb, m, n, 0, 0, m, n, &C); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Allocate workspace. plasma_workspace_t work; size_t lwork = ib*nb; // unmlq: work retval = plasma_workspace_create(&work, lwork, PlasmaComplexDouble); if (retval != PlasmaSuccess) { plasma_error("plasma_workspace_create() failed"); return retval; } // Create sequence. plasma_sequence_t *sequence = NULL; retval = plasma_sequence_create(&sequence); if (retval != PlasmaSuccess) { plasma_error("plasma_sequence_create() failed"); return retval; } // Initialize request. plasma_request_t request = PlasmaRequestInitializer; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_zge2desc(pA, lda, A, sequence, &request); plasma_omp_zge2desc(pC, ldc, C, sequence, &request); // Call the tile async function. plasma_omp_zunmlq(side, trans, A, T, C, work, sequence, &request); // Translate back to LAPACK layout. plasma_omp_zdesc2ge(C, pC, ldc, sequence, &request); } // implicit synchronization plasma_workspace_destroy(&work); // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&C); // Return status. int status = sequence->status; plasma_sequence_destroy(sequence); return status; } /***************************************************************************//** * * @ingroup plasma_unmlq * * Non-blocking tile version of plasma_zunmlq(). * May return before the computation is finished. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] side * Intended usage: * - PlasmaLeft: apply Q or Q^H from the left; * - PlasmaRight: apply Q or Q^H from the right. * * @param[in] trans * Intended usage: * - PlasmaNoTrans: apply Q; * - Plasma_ConjTrans: apply Q^H. * * @param[in] A * Descriptor of matrix A stored in the tile layout. * Details of the QR factorization of the original matrix A as returned * by plasma_zgeqrf. * * @param[in] T * Descriptor of matrix T. * Auxiliary factorization data, computed by plasma_zgeqrf. * * @param[in,out] C * Descriptor of matrix C. * On entry, the m-by-n matrix C. * On exit, C is overwritten by Q*C, Q^H*C, C*Q, or C*Q^H. * * @param[in] work * Workspace for the auxiliary arrays needed by some coreblas kernels. * For multiplication by Q contains preallocated space for work * arrays. Allocated by the plasma_workspace_create function. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_zunmlq * @sa plasma_omp_cunmlq * @sa plasma_omp_dormlq * @sa plasma_omp_sormlq * @sa plasma_omp_zgelqf * ******************************************************************************/ void plasma_omp_zunmlq(plasma_enum_t side, plasma_enum_t trans, plasma_desc_t A, plasma_desc_t T, plasma_desc_t C, plasma_workspace_t work, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((side != PlasmaLeft) && (side != PlasmaRight)) { plasma_error("invalid value of side"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((trans != Plasma_ConjTrans) && (trans != PlasmaNoTrans)) { plasma_error("invalid value of trans"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(T) != PlasmaSuccess) { plasma_error("invalid T"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(C) != PlasmaSuccess) { plasma_error("invalid C"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (C.m == 0 || C.n == 0 || A.m == 0 || A.n == 0) return; // Call the parallel function. if (plasma->householder_mode == PlasmaTreeHouseholder) { plasma_pzunmlq_tree(side, trans, A, T, C, work, sequence, request); } else { plasma_pzunmlq(side, trans, A, T, C, work, sequence, request); } }
Gemm_MT_Loop5_MRxNRKernel_ver2.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include<immintrin.h> #define alpha( i,j ) A[ (j)*ldA + (i) ] // map alpha( i,j ) to array A #define beta( i,j ) B[ (j)*ldB + (i) ] // map beta( i,j ) to array B #define gamma( i,j ) C[ (j)*ldC + (i) ] // map gamma( i,j ) to array C #define min( x, y ) ( ( x ) < ( y ) ? x : y ) void LoopFive( int, int, int, double *, int, double *, int, double *, int ); void LoopFour( int, int, int, double *, int, double *, int, double *, int ); void LoopThree( int, int, int, double *, int, double *, double *, int ); void LoopTwo( int, int, int, double *, double *, double *, int ); void LoopOne( int, int, int, double *, double *, double *, int ); void Gemm_MRxNRKernel_Packed( int, double *, double *, double *, int ); void PackBlockA_MCxKC( int, int, double *, int, double * ); void PackPanelB_KCxNC( int, int, double *, int, double * ); void MyGemm( int m, int n, int k, double *A, int ldA, double *B, int ldB, double *C, int ldC ) { if ( m % MR != 0 || MC % MR != 0 ){ printf( "m and MC must be multiples of MR\n" ); exit( 0 ); } if ( n % NR != 0 || NC % NR != 0 ){ printf( "n and NC must be multiples of NR\n" ); exit( 0 ); } LoopFive( m, n, k, A, ldA, B, ldB, C, ldC ); } void LoopFive( int m, int n, int k, double *A, int ldA, double *B, int ldB, double *C, int ldC ) { int max_threads = omp_get_max_threads(); int loadbalanced_part = (n/(NC*max_threads)) * NC * max_threads; int remainder = n - loadbalanced_part; #pragma omp parallel for for ( int j=0; j< loadbalanced_part; j+=NC ) { int jb = NC; //min( NC, n-j ); /* Last loop may not involve a full block */ LoopFour( m, jb, k, A, ldA, &beta( 0,j ), ldB, &gamma( 0,j ), ldC ); } int remainder_per_thread = ((remainder / max_threads) / NR) * NR; if (remainder_per_thread == 0) remainder_per_thread = NR; #pragma omp parallel for for (int j = loadbalanced_part; j < n; j += remainder_per_thread) { int jb = min (remainder_per_thread, n-j ); LoopFour (m, jb, k , A, ldA, &beta(0, j ), ldB, &gamma(0, j ), ldC ); } } void LoopFour( int m, int n, int k, double *A, int ldA, double *B, int ldB, double *C, int ldC ) { double *Btilde = ( double * ) _mm_malloc( KC * NC * sizeof( double ), 64 ); for ( int p=0; p<k; p+=KC ) { int pb = min( KC, k-p ); /* Last loop may not involve a full block */ PackPanelB_KCxNC( pb, n, &beta( p, 0 ), ldB, Btilde ); LoopThree( m, n, pb, &alpha( 0, p ), ldA, Btilde, C, ldC ); } _mm_free( Btilde); } void LoopThree( int m, int n, int k, double *A, int ldA, double *Btilde, double *C, int ldC ) { double *Atilde = ( double * ) _mm_malloc( MC * KC * sizeof( double ), 64 ); for ( int i=0; i<m; i+=MC ) { int ib = min( MC, m-i ); /* Last loop may not involve a full block */ PackBlockA_MCxKC( ib, k, &alpha( i, 0 ), ldA, Atilde ); LoopTwo( ib, n, k, Atilde, Btilde, &gamma( i,0 ), ldC ); } _mm_free( Atilde); } void LoopTwo( int m, int n, int k, double *Atilde, double *Btilde, double *C, int ldC ) { for ( int j=0; j<n; j+=NR ) { int jb = min( NR, n-j ); LoopOne( m, jb, k, Atilde, &Btilde[ j*k ], &gamma( 0,j ), ldC ); } } void LoopOne( int m, int n, int k, double *Atilde, double *MicroPanelB, double *C, int ldC ) { for ( int i=0; i<m; i+=MR ) { int ib = min( MR, m-i ); Gemm_MRxNRKernel_Packed( k, &Atilde[ i*k ], MicroPanelB, &gamma( i,0 ), ldC ); } }
GB_unaryop__ainv_int8_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int8_fp32 // op(A') function: GB_tran__ainv_int8_fp32 // C type: int8_t // A type: float // cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8) // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int8_t z ; GB_CAST_SIGNED(z,x,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int8_fp32 ( int8_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int8_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gemm.c
#include "gemm.h" #include "utils.h" #include "cuda.h" #include <stdlib.h> #include <stdio.h> #include <math.h> void gemm_bin(int M, int N, int K, float ALPHA, char *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ char A_PART = A[i*lda+k]; if(A_PART){ for(j = 0; j < N; ++j){ C[i*ldc+j] += B[k*ldb+j]; } } else { for(j = 0; j < N; ++j){ C[i*ldc+j] -= B[k*ldb+j]; } } } } } float *random_matrix(int rows, int cols) { int i; float *m = (float*)calloc(rows*cols, sizeof(float)); for(i = 0; i < rows*cols; ++i){ m[i] = (float)rand()/RAND_MAX; } return m; } void time_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<10; ++i){ gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void gemm(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc); } void gemm_nn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[i*lda+k]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_nt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i*lda+k]*B[j*ldb + k]; } C[i*ldc+j] += sum; } } } void gemm_tn(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(k = 0; k < K; ++k){ register float A_PART = ALPHA*A[k*lda+i]; for(j = 0; j < N; ++j){ C[i*ldc+j] += A_PART*B[k*ldb+j]; } } } } void gemm_tt(int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float *C, int ldc) { int i,j,k; #pragma omp parallel for for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ register float sum = 0; for(k = 0; k < K; ++k){ sum += ALPHA*A[i+k*lda]*B[k+j*ldb]; } C[i*ldc+j] += sum; } } } void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A, int lda, float *B, int ldb, float BETA, float *C, int ldc) { //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc); int i, j; for(i = 0; i < M; ++i){ for(j = 0; j < N; ++j){ C[i*ldc + j] *= BETA; } } if(!TA && !TB) gemm_nn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(TA && !TB) gemm_tn(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else if(!TA && TB) gemm_nt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); else gemm_tt(M, N, K, ALPHA,A,lda, B, ldb,C,ldc); } #ifdef GPU #include <math.h> void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA, float *A_gpu, int lda, float *B_gpu, int ldb, float BETA, float *C_gpu, int ldc) { cublasHandle_t handle = blas_handle(); cublasStatus_t status = cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N), (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc); check_cublas_error(status); } #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> void time_gpu_random_matrix(int TA, int TB, int m, int k, int n) { float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); int i; clock_t start = clock(), end; for(i = 0; i<32; ++i){ gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); } end = clock(); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC); free(a); free(b); free(c); } void time_gpu(int TA, int TB, int m, int k, int n) { int iter = 10; float *a = random_matrix(m,k); float *b = random_matrix(k,n); int lda = (!TA)?k:m; int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *a_cl = cuda_make_array(a, m*k); float *b_cl = cuda_make_array(b, k*n); float *c_cl = cuda_make_array(c, m*n); int i; clock_t start = clock(), end; for(i = 0; i<iter; ++i){ gemm_gpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n); cudaThreadSynchronize(); } double flop = ((double)m)*n*(2.*k + 2.)*iter; double gflop = flop/pow(10., 9); end = clock(); double seconds = sec(end-start); printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds); cuda_free(a_cl); cuda_free(b_cl); cuda_free(c_cl); free(a); free(b); free(c); } void test_gpu_accuracy(int TA, int TB, int m, int k, int n) { srand(0); float *a; if(!TA) a = random_matrix(m,k); else a = random_matrix(k,m); int lda = (!TA)?k:m; float *b; if(!TB) b = random_matrix(k,n); else b = random_matrix(n,k); int ldb = (!TB)?n:k; float *c = random_matrix(m,n); float *c_gpu = random_matrix(m,n); memset(c, 0, m*n*sizeof(float)); memset(c_gpu, 0, m*n*sizeof(float)); int i; //pm(m,k,b); gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n); //printf("GPU\n"); //pm(m, n, c_gpu); gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n); //printf("\n\nCPU\n"); //pm(m, n, c); double sse = 0; for(i = 0; i < m*n; ++i) { //printf("%f %f\n", c[i], c_gpu[i]); sse += pow(c[i]-c_gpu[i], 2); } printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n)); free(a); free(b); free(c); free(c_gpu); } int test_gpu_blas() { /* test_gpu_accuracy(0,0,10,576,75); test_gpu_accuracy(0,0,17,10,10); test_gpu_accuracy(1,0,17,10,10); test_gpu_accuracy(0,1,17,10,10); test_gpu_accuracy(1,1,17,10,10); test_gpu_accuracy(0,0,1000,10,100); test_gpu_accuracy(1,0,1000,10,100); test_gpu_accuracy(0,1,1000,10,100); test_gpu_accuracy(1,1,1000,10,100); test_gpu_accuracy(0,0,10,10,10); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,64,2916,363); time_gpu(0,0,192,729,1600); time_gpu(0,0,384,196,1728); time_gpu(0,0,256,196,3456); time_gpu(0,0,256,196,2304); time_gpu(0,0,128,4096,12544); time_gpu(0,0,128,4096,4096); */ time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,75,12544); time_gpu(0,0,64,576,12544); time_gpu(0,0,256,2304,784); time_gpu(1,1,2304,256,784); time_gpu(0,0,512,4608,196); time_gpu(1,1,4608,512,196); return 0; } #endif
ballAlg.c
#include "ballAlg.h" #include "../lib/genPoints.h" #include "../lib/msort.h" #include "pointArith.h" #include <mpi.h> #include <omp.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> int nProcesses; int myRank; int nDims; int nNodes = 0; int nid = 1; int nodesCapacity; Node *nodes; MPI_Datatype mpiMedianInfo; int main(int argc, char *argv[]) { double execTime = -omp_get_wtime(); MPI_Init(&argc, &argv); defineMedianInfo(); MPI_Comm_size(MPI_COMM_WORLD, &nProcesses); MPI_Comm_rank(MPI_COMM_WORLD, &myRank); nid += myRank; int nPoints; double **points = getPoints(argc, argv, &nDims, &nPoints, myRank, nProcesses); double *pointsValues = *points; nodesCapacity = nPoints; nodes = (Node *) mallocSafe(sizeof(Node) * nodesCapacity); int nTeammates = nProcesses; int *teammatesRanks = nProcesses > 1 ? calcInitialTeammates(MY_STATE(nPoints), &nTeammates) : NULL; #pragma omp parallel #pragma omp single nTeammates == 1 ? buildTreeOMP(points, nPoints, omp_get_num_threads()) : buildTree(&points[nPoints], points, nPoints, teammatesRanks, nTeammates); if (myRank == 0) { if (nNodes > 0) nodes[nNodes - 1].nid = 0; execTime += omp_get_wtime(); fprintf(stderr, "%.1lf\n", execTime); fflush(stderr); } int nNodesGlobal = nNodes; if (nProcesses > 1) MPI_Reduce(&nNodes, &nNodesGlobal, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD); if (myRank == 0) { printf("%d %d\n", nDims, nNodesGlobal); fflush(stdout); } dumpTree(); if (nProcesses > 1) free(teammatesRanks); free(pointsValues); free(points); MPI_Finalize(); exit(EXIT_SUCCESS); } void defineMedianInfo() { MedianInfo dummyMedianInfo; int lengths[3] = {1, 2, 2}; MPI_Aint displacements[3]; MPI_Aint baseAddress; MPI_Get_address(&dummyMedianInfo, &baseAddress); MPI_Get_address(&dummyMedianInfo.medX, &displacements[0]); MPI_Get_address(dummyMedianInfo.medRanks, &displacements[1]); MPI_Get_address(dummyMedianInfo.medIdx, &displacements[2]); displacements[0] = MPI_Aint_diff(displacements[0], baseAddress); displacements[1] = MPI_Aint_diff(displacements[1], baseAddress); displacements[2] = MPI_Aint_diff(displacements[2], baseAddress); MPI_Datatype datatypes[3] = {MPI_DOUBLE, MPI_INT, MPI_INT}; MPI_Type_create_struct(3, lengths, displacements, datatypes, &mpiMedianInfo); MPI_Type_commit(&mpiMedianInfo); } int buildTree(double **initialP, double **points, int nPoints, const int *teammatesRanks, int nTeammates) { if (nPoints == 0) return -1; double *center = (double *) mallocSafe(sizeof(double) * nDims); if (nPoints == 1 && nTeammates == 1) { copy(points[0], center); return newNode(center, 0, -1, -1); } double *pA = calcFurthestPoint(points, nPoints, *initialP, teammatesRanks, nTeammates, POINT_A_TAG); double *pB = calcFurthestPoint(points, nPoints, pA, teammatesRanks, nTeammates, POINT_B_TAG); double *subBA = (double *) mallocSafe(sizeof(double) * nDims); double *projectionsXs = (double *) mallocSafe(sizeof(double) * nPoints); double *projectionsPoints = (double *) mallocSafe(sizeof(double) * nDims * nPoints); double **projections = (double **) mallocSafe(sizeof(double *) * nPoints); double **pointsTmp = (double **) mallocSafe(sizeof(double *) * nPoints); double **pointsL = (double **) mallocSafe(sizeof(double *) * nPoints); double **pointsR = (double **) mallocSafe(sizeof(double *) * nPoints); sub(pB, pA, subBA); const double squaredSubBA = innerProduct(subBA, subBA); for (int i = 0; i < nPoints; i++) { projections[i] = projectionsPoints + (i * nDims); projection(points[i], pA, subBA, squaredSubBA, projections[i]); } msort(projections, nPoints, pointsTmp); for (int i = 0; i < nPoints; i++) projectionsXs[i] = projections[i][0]; MedianInfo medInfo; if (teammatesRanks[0] == myRank) { medInfo = bcastMedianInfo(teammatesRanks, nTeammates, projectionsXs, nPoints); } else { medInfo = recvMedianInfo(teammatesRanks[0], projections, projectionsXs, &nPoints); } int nPointsL = 0; int nPointsR = 0; partitionTree(projectionsPoints, medInfo.medX, points, nPoints, pointsL, &nPointsL, pointsR, &nPointsR); pointsL = realloc(pointsL, sizeof(double *) * nPointsL); pointsR = realloc(pointsR, sizeof(double *) * nPointsR); double radius = -1; if (teammatesRanks[0] == myRank) { calcCenter(medInfo, projections, center); calcRadius(points, nPoints, center, teammatesRanks, nTeammates, &radius); } else { calcCandidateRadius(teammatesRanks[0], points, nPoints, center); } free(pA); free(pB); free(subBA); free(projectionsXs); free(projectionsPoints); free(projections); free(pointsTmp); int myNid = buildTreeLoop(initialP, center, radius, &pointsL, &nPointsL, &pointsR, &nPointsR, teammatesRanks, nTeammates); free(pointsL); free(pointsR); return myNid; } void bcastToMyTeam(void *buf, int bufSize, const int *teammatesRanks, int nTeammates, MPI_Datatype datatype, int TAG) { MPI_Request request; for (int i = 0; i < nTeammates; i++) { if (teammatesRanks[i] != myRank) { MPI_Isend(buf, bufSize, datatype, teammatesRanks[i], TAG, MPI_COMM_WORLD, &request); MPI_Request_free(&request); } } } double *calcFurthestPoint(double **points, int nPoints, const double *pivot, const int *teammatesRanks, int nTeammates, int TAG) { double *P = (double *) mallocSafe(sizeof(double) * nDims); double *pCmp = (double *) mallocSafe(sizeof(double) * nDims); double maxD; int iFurthest = calcFurthestIdx(points, nPoints, pivot, &maxD); copy(points[iFurthest], P); bcastToMyTeam(P, nDims, teammatesRanks, nTeammates, MPI_DOUBLE, TAG); for (int i = 0; i < nTeammates; i++) { if (teammatesRanks[i] != myRank) { MPI_Recv(pCmp, nDims, MPI_DOUBLE, teammatesRanks[i], TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); double d = squareDistance(pivot, pCmp); if (d > maxD) { maxD = d; copy(pCmp, P); } } } free(pCmp); return P; } int calcFurthestIdx(double **points, int nPoints, const double *pivot, double *maxD) { int iFurthest = 0; *maxD = -1; for (int i = 0; i < nPoints; i++) { if (points[i] != pivot) { double d = squareDistance(points[i], pivot); if ((*maxD) < d) { *maxD = d; iFurthest = i; } } } return iFurthest; } MedianInfo bcastMedianInfo(const int *teammatesRanks, int nTeammates, double *projectionsXs, int nProjectionsXs) { int nTeammatesXsSum = 0; int *nTeammatesXs = (int *) calloc(nTeammates, sizeof(int)); int *iTeammatesXs = (int *) calloc(nTeammates, sizeof(int)); double **teammatesXs = (double **) mallocSafe(sizeof(double *) * nTeammates); teammatesXs[0] = projectionsXs; nTeammatesXs[0] = nProjectionsXs; nTeammatesXsSum += nTeammatesXs[0]; for (int i = 1; i < nTeammates; i++) { MPI_Recv(&nTeammatesXs[i], 1, MPI_INT, teammatesRanks[i], PROJECTIONS_LEN_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); teammatesXs[i] = (double *) mallocSafe(sizeof(double) * nTeammatesXs[i]); MPI_Recv(teammatesXs[i], nTeammatesXs[i], MPI_DOUBLE, teammatesRanks[i], PROJECTIONS_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); nTeammatesXsSum += nTeammatesXs[i]; } for (int i = 0; i < nTeammatesXsSum / 2 - 1; i++) { iTeammatesXs[teammateMinX(nTeammates, teammatesXs, iTeammatesXs, nTeammatesXs)]++; } MedianInfo medInfo; medInfo.medX = 0; for (int i = 0; i < 2; i++) { int teammateId = teammateMinX(nTeammates, teammatesXs, iTeammatesXs, nTeammatesXs); if (i == 1 || nTeammatesXsSum % 2 == 0) { medInfo.medX += teammatesXs[teammateId][iTeammatesXs[teammateId]]; } medInfo.medRanks[i] = teammatesRanks[teammateId]; medInfo.medIdx[i] = iTeammatesXs[teammateId]++; } if (nTeammatesXsSum % 2 == 0) medInfo.medX /= 2; else { medInfo.medRanks[0] = -1; medInfo.medIdx[0] = -1; } bcastToMyTeam(&medInfo, 1, teammatesRanks, nTeammates, mpiMedianInfo, MEDIAN_REQUEST); for (int i = 1; i < nTeammates; i++) free(teammatesXs[i]); free(nTeammatesXs); free(iTeammatesXs); free(teammatesXs); return medInfo; } int teammateMinX(int nTeammates, double **teammatesXs, const int *iTeammatesXs, const int *nTeammatesXs) { int iMinX = -1; for (int i = 0; i < nTeammates; i++) { if (iTeammatesXs[i] < nTeammatesXs[i] && (iMinX < 0 || teammatesXs[i][iTeammatesXs[i]] < teammatesXs[iMinX][iTeammatesXs[iMinX]])) { iMinX = i; } } return iMinX; } MedianInfo recvMedianInfo(int leaderRank, double **projections, const double *projectionsXs, int *nProjectionsXs) { MPI_Request request; MPI_Isend(nProjectionsXs, 1, MPI_INT, leaderRank, PROJECTIONS_LEN_TAG, MPI_COMM_WORLD, &request); MPI_Request_free(&request); MPI_Isend(projectionsXs, (*nProjectionsXs), MPI_DOUBLE, leaderRank, PROJECTIONS_TAG, MPI_COMM_WORLD, &request); MPI_Request_free(&request); MedianInfo medInfo; MPI_Recv(&medInfo, 1, mpiMedianInfo, leaderRank, MEDIAN_REQUEST, MPI_COMM_WORLD, MPI_STATUS_IGNORE); int i = 0; double *medPoints = (double *) mallocSafe(sizeof(double) * nDims * 2); for (int j = 0; j < 2; j++) { if (medInfo.medRanks[j] == myRank) copy(projections[medInfo.medIdx[j]], &medPoints[nDims * i++]); } if (i > 0) { MPI_Isend(medPoints, nDims * i, MPI_DOUBLE, leaderRank, MEDIAN_REPLY, MPI_COMM_WORLD, &request); MPI_Request_free(&request); } return medInfo; } void calcCenter(MedianInfo medInfo, double **projections, double *center) { double *medPoints = (double *) mallocSafe(sizeof(double) * nDims * 2); if (medInfo.medRanks[0] < 0) { if (medInfo.medRanks[1] == myRank) { copy(projections[medInfo.medIdx[1]], center); } else { MPI_Recv(medPoints, nDims, MPI_DOUBLE, medInfo.medRanks[1], MEDIAN_REPLY, MPI_COMM_WORLD, MPI_STATUS_IGNORE); copy(medPoints, center); } } else if (medInfo.medRanks[0] == medInfo.medRanks[1]) { if (medInfo.medRanks[0] == myRank) { middle(projections[medInfo.medIdx[0]], projections[medInfo.medIdx[1]], center); } else { MPI_Recv(medPoints, nDims * 2, MPI_DOUBLE, medInfo.medRanks[0], MEDIAN_REPLY, MPI_COMM_WORLD, MPI_STATUS_IGNORE); middle(medPoints, &medPoints[nDims], center); } } else { for (int i = 0; i < 2; i++) { if (medInfo.medRanks[i] == myRank) { copy(projections[medInfo.medIdx[i]], &medPoints[i * nDims]); } else { MPI_Recv(&medPoints[i * nDims], nDims, MPI_DOUBLE, medInfo.medRanks[i], MEDIAN_REPLY, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } } middle(medPoints, &medPoints[nDims], center); } free(medPoints); } void calcRadius(double **points, int nPoints, double *center, const int *teammatesRanks, int nTeammates, double *radius) { bcastToMyTeam(center, nDims, teammatesRanks, nTeammates, MPI_DOUBLE, RADIUS_TAG); double maxD; int iFurthest = calcFurthestIdx(points, nPoints, center, &maxD); *radius = distance(center, points[iFurthest]); double candidateRadius; for (int i = 0; i < nTeammates; i++) { if (teammatesRanks[i] != myRank) { MPI_Recv(&candidateRadius, 1, MPI_DOUBLE, teammatesRanks[i], RADIUS_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); if (candidateRadius > (*radius)) *radius = candidateRadius; } } } void calcCandidateRadius(int leaderRank, double **points, int nPoints, double *center) { MPI_Recv(center, nDims, MPI_DOUBLE, leaderRank, RADIUS_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); double maxD; int iFurthest = calcFurthestIdx(points, nPoints, center, &maxD); double candidateRadius = distance(center, points[iFurthest]); MPI_Request request; MPI_Isend(&candidateRadius, 1, MPI_DOUBLE, leaderRank, RADIUS_TAG, MPI_COMM_WORLD, &request); MPI_Request_free(&request); } void partitionTree(const double *projectionsPoints, double medX, double **points, int nPoints, double **pointsL, int *nPointsL, double **pointsR, int *nPointsR) { for (int i = 0; i < nPoints; i++) { // projectionsPoints[i * nDims] == (projectionsPoints + (i * nDims))[0] if (projectionsPoints[i * nDims] < medX) { pointsL[(*nPointsL)++] = points[i]; } else { pointsR[(*nPointsR)++] = points[i]; } } } int buildTreeLoop(double **initialP, double *center, double radius, double ***pointsL, int *nPointsL, double ***pointsR, int *nPointsR, const int *teammatesRanks, int nTeammates) { int nidL = -1; int nidR = -1; if (nTeammates == 1) { int nThreads = omp_get_num_threads(); #pragma omp task shared(nidL) nidL = buildTreeOMP((*pointsL), (*nPointsL), nThreads / 2); nidR = buildTreeOMP((*pointsR), (*nPointsR), nThreads - nThreads / 2); #pragma omp taskwait return newNode(center, radius, nidL, nidR); } int teammateId = 0; for (int i = 0; i < nTeammates; i++) { if (teammatesRanks[i] == myRank) { teammateId = i; break; } } if (teammateId % 2 == 0) { if (teammateId == nTeammates - 1) { exchangePoints(pointsR, nPointsR, NULL, NULL, teammatesRanks[teammateId - 1], false); } else { exchangePoints(pointsR, nPointsR, pointsL, nPointsL, teammatesRanks[teammateId + 1], false); } } else { exchangePoints(pointsL, nPointsL, pointsR, nPointsR, teammatesRanks[teammateId - 1], true); if (teammateId == nTeammates - 2) { exchangePoints(NULL, NULL, pointsR, nPointsR, teammatesRanks[teammateId + 1], false); } } int myState = MY_STATE(teammateId % 2 == 0 ? (*nPointsL) : (*nPointsR)); bcastToMyTeam(&myState, 1, teammatesRanks, nTeammates, MPI_INT, TEAMMATE_STATE_TAG); if (myState == FINISHED && teammatesRanks[0] != myRank) return -1; int newNTeammates[2] = {0, 0}; int *newTeammatesRanks[2]; for (int i = 0; i < 2; i++) { newTeammatesRanks[i] = calcNewTeammates(myState, teammatesRanks, nTeammates, &newNTeammates[i], i); } if (myState == FINISHED && teammatesRanks[0] == myRank) { if (newNTeammates[0] > 0) MPI_Recv(&nidL, 1, MPI_INT, newTeammatesRanks[0][0], BRANCH_ID_LEFT, MPI_COMM_WORLD, MPI_STATUS_IGNORE); if (newNTeammates[1] > 0) MPI_Recv(&nidR, 1, MPI_INT, newTeammatesRanks[1][0], BRANCH_ID_RIGHT, MPI_COMM_WORLD, MPI_STATUS_IGNORE); for (int i = 0; i < 2; i++) free(newTeammatesRanks[i]); return newNode(center, radius, nidL, nidR); } if (newTeammatesRanks[teammateId % 2][0] == myRank) { *initialP = teammateId % 2 == 0 ? (*pointsL)[0] : (*pointsR)[0]; bcastToMyTeam(*initialP, nDims, newTeammatesRanks[teammateId % 2], newNTeammates[teammateId % 2], MPI_DOUBLE, INITIAL_POINT_TAG); } else { MPI_Recv(*initialP, nDims, MPI_DOUBLE, newTeammatesRanks[teammateId % 2][0], INITIAL_POINT_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } int myNid; if (teammateId % 2 == 0) { nidL = buildTree(initialP, (*pointsL), (*nPointsL), newTeammatesRanks[0], newNTeammates[0]); myNid = nidL; if (newTeammatesRanks[0][0] == myRank) { if (teammatesRanks[0] == myRank) { if (newNTeammates[1] > 0) { MPI_Recv(&nidR, 1, MPI_INT, newTeammatesRanks[1][0], BRANCH_ID_RIGHT, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } myNid = newNode(center, radius, nidL, nidR); } else { MPI_Request request; MPI_Isend(&nidL, 1, MPI_INT, teammatesRanks[0], BRANCH_ID_LEFT, MPI_COMM_WORLD, &request); MPI_Request_free(&request); } } } else { nidR = buildTree(initialP, (*pointsR), (*nPointsR), newTeammatesRanks[1], newNTeammates[1]); myNid = nidR; if (newTeammatesRanks[1][0] == myRank) { MPI_Request request; MPI_Isend(&nidR, 1, MPI_INT, teammatesRanks[0], BRANCH_ID_RIGHT, MPI_COMM_WORLD, &request); MPI_Request_free(&request); } } for (int i = 0; i < 2; i++) free(newTeammatesRanks[i]); return myNid; } void exchangePoints(double ***pointsToSend, int *nPointsToSend, double ***pointsToRecv, int *nPointsToRecv, int teammateRank, bool toMergeLeft) { if (nPointsToSend != NULL) { MPI_Request request; int nFlattedPointsToSend = (*nPointsToSend) * nDims; MPI_Isend(&nFlattedPointsToSend, 1, MPI_INT, teammateRank, POINTS_LEN_TAG, MPI_COMM_WORLD, &request); MPI_Request_free(&request); double *flattedPointsToSend = (double *) mallocSafe(sizeof(double) * nFlattedPointsToSend); flat(pointsToSend, nPointsToSend, flattedPointsToSend); MPI_Isend(flattedPointsToSend, nFlattedPointsToSend, MPI_DOUBLE, teammateRank, POINTS_TAG, MPI_COMM_WORLD, &request); MPI_Request_free(&request); } if (nPointsToRecv != NULL) { int nFlattedPointsToRecv; MPI_Recv(&nFlattedPointsToRecv, 1, MPI_INT, teammateRank, POINTS_LEN_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); double *flattedPointsToRecv = (double *) mallocSafe(sizeof(double) * nFlattedPointsToRecv); MPI_Recv(flattedPointsToRecv, nFlattedPointsToRecv, MPI_DOUBLE, teammateRank, POINTS_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); unflat(pointsToRecv, nPointsToRecv, flattedPointsToRecv, nFlattedPointsToRecv, toMergeLeft); } } void flat(double ***points, int *nPoints, double *flattedPoints) { for (int i = 0; i < (*nPoints); i++) copy((*points)[i], &flattedPoints[i * nDims]); } void unflat(double ***points, int *nPoints, double *flattedPoints, int nFlattedPoints, bool toMergeLeft) { *points = (double **) realloc((*points), ((*nPoints) + nFlattedPoints / nDims) * sizeof(double *)); if (toMergeLeft) { for (int i = (*nPoints) - 1; i >= 0; i--) (*points)[i + nFlattedPoints / nDims] = (*points)[i]; for (int i = 0; i < nFlattedPoints / nDims; i++) (*points)[i] = &flattedPoints[i * nDims]; } else { for (int i = 0; i < nFlattedPoints / nDims; i++) (*points)[(*nPoints) + i] = &flattedPoints[i * nDims]; } *nPoints += nFlattedPoints / nDims; } int *calcInitialTeammates(int myState, int *nTeammates) { int *teammatesRanks = mallocSafe(sizeof(int) * (*nTeammates)); for (int i = 0; i < (*nTeammates); i++) teammatesRanks[i] = i; bcastToMyTeam(&myState, 1, teammatesRanks, (*nTeammates), MPI_INT, TEAMMATE_STATE_TAG); *nTeammates = calcWorkingTeammates(myState, teammatesRanks, (*nTeammates)); teammatesRanks = (int *) realloc(teammatesRanks, sizeof(int) * (*nTeammates)); return teammatesRanks; } int *calcNewTeammates(int myState, const int *teammatesRanks, int nTeammates, int *newNTeammates, int iParity) { int *newTeammatesRanks = (int *) mallocSafe(sizeof(int) * nTeammates); for (int i = 0; i < nTeammates; i++) { if (i % 2 == iParity) newTeammatesRanks[(*newNTeammates)++] = teammatesRanks[i]; } *newNTeammates = calcWorkingTeammates(myState, newTeammatesRanks, (*newNTeammates)); newTeammatesRanks = (int *) realloc(newTeammatesRanks, sizeof(int) * (*newNTeammates)); return newTeammatesRanks; } int calcWorkingTeammates(int myState, int *teammatesRanks, int nTeammates) { int teammateState; int nTeammatesWorking = 0; for (int i = 0; i < nTeammates; i++) { if (teammatesRanks[i] != myRank) { MPI_Recv(&teammateState, 1, MPI_INT, teammatesRanks[i], TEAMMATE_STATE_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); } else { teammateState = myState; } if (teammateState == WORKING) teammatesRanks[nTeammatesWorking++] = teammatesRanks[i]; } return nTeammatesWorking; } int newNode(double *center, double radius, int nidL, int nidR) { int myNNodes, myNid; #pragma omp critical(newNode) { myNNodes = nNodes++; myNid = nid; nid += nProcesses; if (nNodes > nodesCapacity) { nodesCapacity *= 2; nodes = (Node *) realloc(nodes, sizeof(Node) * nodesCapacity); if (nodes == NULL) { fprintf(stderr, "FATAL: [realloc]!\n"); exit(EXIT_FAILURE); } } } Node *new = &nodes[myNNodes]; new->nid = myNid; new->center = center; new->radius = radius; new->nidL = nidL; new->nidR = nidR; return myNid; } void dumpTree() { if (myRank != 0) MPI_Recv(NULL, 0, MPI_INT, myRank - 1, PRINT_TAG, MPI_COMM_WORLD, MPI_STATUS_IGNORE); for (int i = 0; i < nNodes; i++) { printf("%d %d %d %.6lf", nodes[i].nid, nodes[i].nidL, nodes[i].nidR, nodes[i].radius); printPoint(nodes[i].center, nDims); free(nodes[i].center); } fflush(stdout); free(nodes); if (myRank != nProcesses - 1) MPI_Send(NULL, 0, MPI_INT, myRank + 1, PRINT_TAG, MPI_COMM_WORLD); } void *mallocSafe(size_t size) { void *allocBytes = malloc(size); if (allocBytes == NULL) { fprintf(stderr, "FATAL: [malloc]!\n"); exit(EXIT_FAILURE); } return allocBytes; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 24; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,16);t1++) { lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32)); ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(2*t1-2,3)),ceild(32*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(16*t1+Ny+29,24)),floord(32*t2+Ny+28,24)),floord(32*t1-32*t2+Nz+Ny+27,24));t3++) { for (t4=max(max(max(0,ceild(t1-127,128)),ceild(32*t2-Nz-2044,2048)),ceild(24*t3-Ny-2044,2048));t4<=min(min(min(min(floord(Nt+Nx-4,2048),floord(16*t1+Nx+29,2048)),floord(32*t2+Nx+28,2048)),floord(24*t3+Nx+20,2048)),floord(32*t1-32*t2+Nz+Nx+27,2048));t4++) { for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),24*t3-Ny+2),2048*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),24*t3+22),2048*t4+2046),32*t1-32*t2+Nz+29);t5++) { for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) { lbv=max(2048*t4,t5+1); ubv=min(2048*t4+2047,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
DataTypeConversions.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // Created by raver119 on 21.11.17. // #ifndef LIBND4J_DATATYPECONVERSIONS_H #define LIBND4J_DATATYPECONVERSIONS_H #include <pointercast.h> #include <helpers/logger.h> #include <op_boilerplate.h> #include <array/DataType.h> #include <types/float16.h> #include <helpers/BitwiseUtils.h> #include <loops/type_conversions.h> #include <dll.h> namespace nd4j { template <typename T> class ND4J_EXPORT DataTypeConversions { private: template <typename T2> static FORCEINLINE void rconv(bool isBe, bool canKeep, T *buffer, Nd4jLong length, void *src) { if (std::is_same<T, T2>::value && canKeep) { memcpy(buffer, src, length * sizeof(T)); } else { auto tmp = new T2[length]; memcpy(tmp, src, length * sizeof(T2)); #if __GNUC__ <= 4 if (!canKeep) for (Nd4jLong e = 0; e < length; e++) buffer[e] = BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e])); else TypeCast::convertGeneric<T2, T>(nullptr, tmp, length, buffer); #else #pragma omp parallel for simd schedule(guided) for (Nd4jLong e = 0; e < length; e++) buffer[e] = canKeep ? static_cast<T>(tmp[e]) : BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e])); #endif delete[] tmp; } } public: static FORCEINLINE void convertType(void* vbuffer, void* src, DataType dataType, ByteOrder order, Nd4jLong length) { auto buffer = reinterpret_cast<T *>(vbuffer); bool isBe = BitwiseUtils::isBE(); bool canKeep = (isBe && order == ByteOrder::BE) || (!isBe && order == ByteOrder::LE); switch (dataType) { case BOOL: { DataTypeConversions<T>::template rconv<bool>(isBe, canKeep, buffer, length, src); } break; case UINT8: { DataTypeConversions<T>::template rconv<uint8_t>(isBe, canKeep, buffer, length, src); } break; case INT8: { DataTypeConversions<T>::template rconv<int8_t>(isBe, canKeep, buffer, length, src); } break; case INT16: { DataTypeConversions<T>::template rconv<int16_t>(isBe, canKeep, buffer, length, src); } break; case INT32: { DataTypeConversions<T>::template rconv<int>(isBe, canKeep, buffer, length, src); } break; case INT64: { DataTypeConversions<T>::template rconv<Nd4jLong>(isBe, canKeep, buffer, length, src); } break; case FLOAT32: { if (std::is_same<T, float>::value && canKeep) { memcpy(buffer, src, length * sizeof(T)); } else { auto tmp = new float[length]; memcpy(tmp, src, length * sizeof(float)); #if __GNUC__ <= 4 if (!canKeep) for (Nd4jLong e = 0; e < length; e++) buffer[e] = BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e])); else TypeCast::convertGeneric<float, T>(nullptr, tmp, length, buffer); #else #pragma omp parallel for simd schedule(guided) for (Nd4jLong e = 0; e < length; e++) buffer[e] = canKeep ? static_cast<T>(tmp[e]) : BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e])); #endif delete[] tmp; } } break; case DOUBLE: { if (std::is_same<T, double>::value && canKeep) { memcpy(buffer, src, length * sizeof(T)); } else { auto tmp = new double[length]; memcpy(tmp, src, length * sizeof(double)); #if __GNUC__ <= 4 if (!canKeep) for (Nd4jLong e = 0; e < length; e++) buffer[e] = BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e])); else TypeCast::convertGeneric<double, T>(nullptr, tmp, length, buffer); #else #pragma omp parallel for schedule(static) for (Nd4jLong e = 0; e < length; e++) buffer[e] = canKeep ? static_cast<T>(tmp[e]) : BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e])); #endif delete[] tmp; } } break; case HALF: { if (std::is_same<T, float16>::value && canKeep) { memcpy(buffer, src, length * sizeof(T)); } else { auto tmp = new float16[length]; memcpy(tmp, src, length * sizeof(float16)); #if __GNUC__ <= 4 if (!canKeep) for (Nd4jLong e = 0; e < length; e++) buffer[e] = BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e])); else TypeCast::convertGeneric<float16, T>(nullptr, tmp, length, buffer); #else #pragma omp parallel for schedule(static) for (Nd4jLong e = 0; e < length; e++) buffer[e] = canKeep ? static_cast<T>(tmp[e]) : BitwiseUtils::swap_bytes<T>(static_cast<T>(tmp[e])); #endif delete[] tmp; } } break; default: { nd4j_printf("Unsupported DataType requested: [%i]\n", static_cast<int>(dataType)); throw std::runtime_error("Unsupported DataType"); } } } }; } #endif //LIBND4J_DATATYPECONVERSIONS_H
gi_topological_gradient_using_algorithms.h
/* * * Copyright (C) 2018 Attila Gyulassy <jediati@sci.utah.edu> * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef TOPOLOGICAL_GRADIENT_USING_ALGORITHMS_H #define TOPOLOGICAL_GRADIENT_USING_ALGORITHMS_H #include "gi_basic_types.h" #include "gi_discrete_gradient_labeling.h" //#include "gi_topological_explicit_mesh_function.h" //#include "gi_topological_regular_grid.h" //#include "gi_topological_regular_masked_restricted_grid.h" #include <vector> #include <queue> #include <map> #include <set> namespace GInt { template <class GridType, class MeshFunction, class LabelingType> class TopologicalGradientUsingAlgorithms { protected: MeshFunction* my_mesh_function; GridType* my_mesh_handler; LabelingType* my_grad_field; public: TopologicalGradientUsingAlgorithms( MeshFunction* mesh_function, GridType* mesh_handler, LabelingType* grad_field) : my_mesh_function(mesh_function), my_mesh_handler(mesh_handler), my_grad_field(grad_field) { } // trace "down" in gradient and fill in the result std::vector // with all cells that are found //void CheckGradientForLoops() { // DenseLabeling<char>* visited = new DenseLabeling<char>(my_mesh_handler->numCells()); // visited->SetAll(0); // typename GridType::AllCellsIterator allit(my_mesh_handler); // for (allit.begin(); allit.valid(); allit.advance()) { // INDEX_TYPE cellid = allit.value(); void _recCheckForLoops(INDEX_TYPE id, std::set<INDEX_TYPE>& current_path, DenseLabeling<char>* visited) { // this path down has been checked, so skip if (visited->GetLabel(id) != 0) return; if (my_grad_field->getCritical(id)) return; if (my_mesh_handler->dimension(my_grad_field->getPair(id) < my_mesh_handler->dimension(id))) return; current_path.insert(id); INDEX_TYPE head = my_grad_field->getPair(id); visited->SetLabel(id, 1); visited->SetLabel(head, 1); typename GridType::FacetsIterator fit(my_mesh_handler); for (fit.begin(id); fit.valid(); fit.advance()) { INDEX_TYPE fid = fit.value(); if (fid == id) continue; // skip going back to head if (my_mesh_handler->dimension(my_grad_field->getPair(fid) < my_mesh_handler->dimension(fid))) continue; // stick to dimension if (current_path.count(fid) != 0) { printf("cycle detected"); } _recCheckForLoops(fid, current_path, visited); } current_path.erase(id); } void CheckGradientForLoops() { DenseLabeling<char>* visited = new DenseLabeling<char>(my_mesh_handler->numCells()); visited->SetAll(0); // trace down typename GridType::AllCellsIterator allit(my_mesh_handler); for (allit.begin(); allit.valid(); allit.advance()) { INDEX_TYPE cellid = allit.value(); if (visited->GetLabel(cellid) != 0) continue; // skip critical points and heads of arrows, since we will search [tail->head->facets]* if (my_grad_field->getCritical(cellid)) continue; if (my_mesh_handler->dimension(my_grad_field->getPair(cellid) > my_mesh_handler->dimension(cellid))) continue; // start a path down std::set<INDEX_TYPE> path; _recCheckForLoops(cellid, path, visited); } } //} void CheckGradientConsistency() { typename GridType::AllCellsIterator allit(my_mesh_handler); for (allit.begin(); allit.valid(); allit.advance()) { INDEX_TYPE cellid = allit.value(); if (my_grad_field->getAssigned(cellid) == 0) { printf("CheckGradientConsistency(): error: cellid %d is not assigned\n",cellid); } if (my_grad_field->getCritical(cellid)) { } else { INDEX_TYPE pairid = my_grad_field->getPair(cellid); if (my_grad_field->getCritical(pairid)) { printf("CheckGradientConsistency(): error: cell %d is paired with critical cell %d\n", cellid, pairid); } else { INDEX_TYPE pairpair = my_grad_field->getPair(pairid); if (pairpair != cellid) { printf("CheckGradientConsistency(): error: pair pair is not cellid (%d -> %d -> %d)\n", cellid, pairid, pairpair); } } if (my_mesh_handler->dimension(pairid) != my_mesh_handler->dimension(cellid) - 1 && my_mesh_handler->dimension(pairid) != my_mesh_handler->dimension(cellid) + 1) { printf("CheckGradientConsistency(): error: dimensions of cell (%d) and pair (%d) dont match\n", my_mesh_handler->dimension(cellid), my_mesh_handler->dimension(pairid)); } } } } virtual void count_critical_points(int dim) { int* counts = new int[dim]; for (int i = 0; i < dim; i++) counts[i] = 0; for (INDEX_TYPE i = 0; i < my_mesh_handler->numCells(); i++) { if (my_grad_field->getCritical(i)) counts[my_mesh_handler->dimension(i)]++; } for (int i = 0; i < dim; i++) printf("index-%d=%d\n", i, counts[i]); } virtual void trace_down_cells(const INDEX_TYPE& cellid, std::vector<INDEX_TYPE>& result) { std::queue<INDEX_TYPE> cell_queue; cell_queue.push(cellid); result.clear(); std::set<INDEX_TYPE> cell_visited; while (!cell_queue.empty()) { INDEX_TYPE current = cell_queue.front(); cell_queue.pop(); cell_visited.insert(current); result.push_back(current); typename GridType::FacetsIterator fit(my_mesh_handler); for (fit.begin(current); fit.valid(); fit.advance()) { INDEX_TYPE temp_id = fit.value(); if (my_grad_field->getCritical(temp_id) && cell_visited.count(temp_id) == 0) { result.push_back(temp_id); cell_visited.insert(temp_id); } else if (cell_visited.count(temp_id) == 0) { INDEX_TYPE pair = my_grad_field->getPair(temp_id); result.push_back(temp_id); result.push_back(pair); cell_visited.insert(temp_id); cell_visited.insert(pair); cell_queue.push(pair); } } } } virtual void trace_up_cells(const INDEX_TYPE& cellid, std::vector<INDEX_TYPE>& result) const { std::queue<INDEX_TYPE> cell_queue; cell_queue.push(cellid); result.clear(); std::set<INDEX_TYPE> cell_visited; while (!cell_queue.empty()) { INDEX_TYPE current = cell_queue.front(); cell_queue.pop(); cell_visited.insert(current); result.push_back(current); typename GridType::CofacetsIterator cofacets(my_mesh_handler); for (cofacets.begin(current); cofacets.valid(); cofacets.advance()) { INDEX_TYPE temp_id = cofacets.value(); if (my_grad_field->getCritical(temp_id) && cell_visited.count(temp_id) == 0) { result.push_back(temp_id); cell_visited.insert(temp_id); } else if (cell_visited.count(temp_id) == 0) { INDEX_TYPE pair = my_grad_field->getPair(temp_id); result.push_back(temp_id); result.push_back(pair); cell_visited.insert(temp_id); cell_visited.insert(pair); cell_queue.push(pair); } } } } virtual void trace_down_cells_restricted(const INDEX_TYPE& cellid, std::vector<INDEX_TYPE>& result) { std::queue<INDEX_TYPE> cell_queue; cell_queue.push(cellid); DIM_TYPE temp_dim = my_grad_field->getDimAscMan(cellid) + 1; result.clear(); std::set<INDEX_TYPE> cell_visited; while (!cell_queue.empty()) { INDEX_TYPE current = cell_queue.front(); cell_queue.pop(); cell_visited.insert(current); result.push_back(current); typename GridType::FacetsIterator fit(my_mesh_handler); for (fit.begin(current); fit.valid(); fit.advance()) { INDEX_TYPE temp_id = fit.value(); if (my_grad_field->getCritical(temp_id) && cell_visited.count(temp_id) == 0) { result.push_back(temp_id); cell_visited.insert(temp_id); } else if (cell_visited.count(temp_id) == 0 && my_grad_field->getDimAscMan(temp_id) == temp_dim) { INDEX_TYPE pair = my_grad_field->getPair(temp_id); result.push_back(temp_id); result.push_back(pair); cell_visited.insert(temp_id); cell_visited.insert(pair); cell_queue.push(pair); } } } } virtual void trace_down_cells_restricted_counting(const INDEX_TYPE& cellid, std::vector<INDEX_TYPE>& result, std::vector<int>& counts) { std::queue<INDEX_TYPE> cell_queue; cell_queue.push(cellid); DIM_TYPE temp_dim = my_grad_field->getDimAscMan(cellid) + 1; result.clear(); counts.clear(); std::set<INDEX_TYPE> cell_visited; // build the graph std::map<INDEX_TYPE, std::set<INDEX_TYPE> > node_graph; std::map<INDEX_TYPE, int > visit_counts; while (!cell_queue.empty()) { INDEX_TYPE current = cell_queue.front(); cell_queue.pop(); std::set<INDEX_TYPE> neighbors; cell_visited.insert(current); typename GridType::FacetsIterator fit(my_mesh_handler); for (fit.begin(current); fit.valid(); fit.advance()) { INDEX_TYPE temp_id = fit.value(); if (my_grad_field->getCritical(temp_id)) { neighbors.insert(temp_id); if (visit_counts.count(temp_id) == 0) { visit_counts[temp_id] = 1; } else { visit_counts[temp_id]++; } cell_visited.insert(temp_id); } else if (my_grad_field->getDimAscMan(temp_id) == temp_dim) { INDEX_TYPE pair = my_grad_field->getPair(temp_id); if (current == pair) continue; neighbors.insert(pair); if (visit_counts.count(pair) == 0) { visit_counts[pair] = 1; } else { visit_counts[pair]++; } if (cell_visited.count(pair) == 0) { cell_queue.push(pair); } cell_visited.insert(temp_id); cell_visited.insert(pair); } } node_graph[current].insert(neighbors.begin(), neighbors.end()); } //print graph printf("\ngraph of %d:\n", cellid); for (std::map<INDEX_TYPE, std::set<INDEX_TYPE> >::iterator mit = node_graph.begin(); mit != node_graph.end(); mit++) { INDEX_TYPE tempid = (*mit).first; printf(" n=%d\n", tempid); for (std::set<INDEX_TYPE>::iterator sit = (*mit).second.begin(); sit != (*mit).second.end(); sit++) printf(" -->%d\n", *sit); } // traverse graph from root cell_queue.push(cellid); while (!cell_queue.empty()) { INDEX_TYPE current = cell_queue.front(); cell_queue.pop(); result.push_back(current); counts.push_back(0); for (std::set<INDEX_TYPE>::iterator it = node_graph[current].begin(); it != node_graph[current].end(); it++) { INDEX_TYPE tempid = *it; visit_counts[tempid]--; if (visit_counts[tempid] == 0) { cell_queue.push(tempid); } } } // the base case, 1 path from cell to itself visit_counts[cellid] = 1; for (int i = 0; i < result.size(); i++) { INDEX_TYPE current = result[i]; int temp_count = visit_counts[current]; counts[i] = temp_count; for (std::set<INDEX_TYPE>::iterator it = node_graph[current].begin(); it != node_graph[current].end(); it++) { INDEX_TYPE tempid = *it; visit_counts[tempid] += temp_count; } } } void rec_man_trace_up(INDEX_TYPE& cellid, std::set<INDEX_TYPE>& res) { res.insert(cellid); INDEX_TYPE current = cellid; DIM_TYPE cdim = this->my_mesh_handler->dimension(cellid); typename GridType::CofacetsIterator cofacets(my_mesh_handler); for (cofacets.begin(current); cofacets.valid(); cofacets.advance()) { INDEX_TYPE temp_id = cofacets.value(); if (this->my_grad_field->getCritical(temp_id) || !my_grad_field->getAssigned(temp_id)) continue; INDEX_TYPE temp_pair = my_grad_field->getPair(temp_id); if (temp_pair == cellid) continue; if (my_mesh_handler->dimension(temp_pair) != cdim) continue; rec_man_trace_up(temp_pair, res); } } protected: void rec_man_trace_up_marking(INDEX_TYPE& cellid, DIM_TYPE value) { if (my_grad_field->getDimAscMan(cellid) == value) return; my_grad_field->setDimAscMan(cellid, value); INDEX_TYPE current = cellid; // DIM_TYPE cdim = this->my_mesh_handler->dimension(cellid); typename GridType::CofacetsIterator cofacets(my_mesh_handler); for (cofacets.begin(current); cofacets.valid(); cofacets.advance()) { INDEX_TYPE temp_id = cofacets.value(); if (this->my_grad_field->getCritical(temp_id) || !my_grad_field->getAssigned(temp_id)) continue; INDEX_TYPE temp_pair = my_grad_field->getPair(temp_id); if (temp_pair == cellid) continue; //if (my_mesh_handler->dimension(temp_pair) != cdim) continue; if (my_grad_field->getDimAscMan(temp_id) == value) continue; my_grad_field->setDimAscMan(temp_id, value); rec_man_trace_up_marking(temp_pair, value); } } public: void setAscendingManifoldDimensions() { std::vector<INDEX_TYPE> criticals[4]; std::vector<INDEX_TYPE> topo_index_partition; int num_threads; #pragma omp parallel { #pragma omp single { num_threads = omp_get_num_threads(); ArrayIndexPartitioner::EvenChunkSplit(my_mesh_handler->numCells(), num_threads, topo_index_partition); } int thread_num = omp_get_thread_num(); typename GridType::AllCellsIterator all_cells_iterator(my_mesh_handler, topo_index_partition[thread_num], topo_index_partition[thread_num + 1]); for (all_cells_iterator.begin(); all_cells_iterator.valid(); all_cells_iterator.advance()) { INDEX_TYPE cell_id = all_cells_iterator.value(); //my_grad_field->setMark(cell_id, 0); my_grad_field->setDimAscMan(cell_id, 3); if (my_grad_field->getCritical(cell_id)) { DIM_TYPE tdim = my_mesh_handler->dimension(cell_id); #pragma omp critical { criticals[tdim].push_back(cell_id); } } } } // no now every cell is assigned to 3-manifold, and have list of critical points of each dimension //printf("found %d %d %d %d crits\n", criticals[0].size(), criticals[1].size(), criticals[2].size(), criticals[3].size()); INDEX_TYPE num_1s = criticals[1].size(); #pragma omp parallel for schedule(dynamic) for (INDEX_TYPE vid = 0; vid < num_1s; vid++) { INDEX_TYPE cid = criticals[1][vid]; rec_man_trace_up_marking(cid, 2); } INDEX_TYPE num_2s = criticals[2].size(); #pragma omp parallel for schedule(dynamic) for (INDEX_TYPE vid = 0; vid < num_2s; vid++) { INDEX_TYPE cid = criticals[2][vid]; rec_man_trace_up_marking(cid, 1); } INDEX_TYPE num_3s = criticals[3].size(); #pragma omp parallel for schedule(static) for (INDEX_TYPE vid = 0; vid < num_3s; vid++) { INDEX_TYPE cid = criticals[3][vid]; my_grad_field->setDimAscMan(cid, 0); } } }; } #endif
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ''fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ''classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "MagickCore/studio.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { double center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { double tau; ssize_t left, right; double mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { double tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static double OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const double,double *), ZeroCrossHistogram(double *,const double,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const double cluster_threshold, % const double weighting_exponent, % const MagickBooleanType verbose,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType Classify(Image *image,short **extrema, const double cluster_threshold, const double weighting_exponent,const MagickBooleanType verbose, ExceptionInfo *exception) { #define SegmentImageTag "Segment/Image" CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickOffsetType progress; double *free_squares; MagickStatusType status; register ssize_t i; register double *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++,2* image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowBinaryException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(double) i*(double) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *cluster; register const PixelInfo *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,0,q); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <= (cluster->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(image,(Quantum) cluster->id,q); break; } } if (cluster == (Cluster *) NULL) { double distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(image,(Quantum) j,q); } } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image,exception); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(double *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const double *histogram, % double *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of doubles is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const double *histogram, double *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % PixelInfo *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, PixelInfo *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; double threshold; register const Quantum *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetPixelInfo(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const Quantum *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++; p+=GetPixelChannels(image); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register double sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(double) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireMagickMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % double OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static double OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; double average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) return(0.0); for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(double *) AcquireQuantumMemory(256,sizeof(*derivative)); second_derivative=(double *) AcquireQuantumMemory(256, sizeof(*second_derivative)); if ((derivative == (double *) NULL) || (second_derivative == (double *) NULL)) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDerivatives"); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(double) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(double *) RelinquishMagickMemory(derivative); second_derivative=(double *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) return(0.0); /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(double) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const double tau, % double *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const double tau, double *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateGammaMap"); alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI)); beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=alpha*sum; } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold, ExceptionInfo *exception) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace,exception); InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose, exception); (void) TransformImageColorspace(image,previous_colorspace,exception); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(double *second_derivative, % const double smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of doubles representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(double *second_derivative, const double smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
test_dma.c
/******************************************************************************* * Copyright 2019 UChicago Argonne, LLC. * (c.f. AUTHORS, LICENSE) * * This file is part of the AML project. * For more info, see https://github.com/anlsys/aml * * SPDX-License-Identifier: BSD-3-Clause *******************************************************************************/ #include "aml.h" #define PTR_OFF(ptr, sign, off) (void *)((intptr_t)(ptr)sign(intptr_t)(off)) void test_dma_memcpy(struct aml_area *area, struct aml_area_mmap_options *area_opts, struct aml_dma *dma, aml_dma_operator memcpy_op) { const size_t size = 1UL << 20; // 1MiB void *src_buf, *test_buf, *dma_buf; src_buf = malloc(size); test_buf = malloc(size); dma_buf = aml_area_mmap(area, size, area_opts); assert(src_buf); assert(test_buf); assert(dma_buf); memset(src_buf, 1, size); memset(test_buf, 0, size); assert(aml_dma_copy_custom(dma, dma_buf, src_buf, memcpy_op, (void *)size) == AML_SUCCESS); assert(aml_dma_copy_custom(dma, test_buf, dma_buf, memcpy_op, (void *)size) == AML_SUCCESS); assert(!memcmp(test_buf, src_buf, size)); free(src_buf); free(test_buf); aml_area_munmap(area, dma_buf, size); } void test_dma_barrier(struct aml_area *area, struct aml_area_mmap_options *area_opts, struct aml_dma *dma, aml_dma_operator memcpy_op) { const size_t n = 128; const size_t size = 1UL << 10; // 1KiB void *src_buf, *test_buf, *dma_buf[n]; // Initialization src_buf = malloc(size * n); test_buf = malloc(size * n); assert(src_buf); assert(test_buf); memset(test_buf, 0, size * n); for (size_t i = 0; i < n; i++) { memset(PTR_OFF(src_buf, +, i * size), i + 1, size); dma_buf[i] = aml_area_mmap(area, size, area_opts); assert(dma_buf[i]); } // Copy to device area #ifdef _OPENMP #pragma omp parallel for #endif for (size_t i = 0; i < n; i++) { assert(aml_dma_async_copy_custom(dma, NULL, dma_buf[i], PTR_OFF(src_buf, +, size * i), memcpy_op, (void *)size) == AML_SUCCESS); } // Wait all copies assert(aml_dma_barrier(dma) == AML_SUCCESS); // Copy back from device area to host #ifdef _OPENMP #pragma omp parallel for #endif for (size_t i = 0; i < n; i++) { assert(aml_dma_async_copy_custom(dma, NULL, PTR_OFF(test_buf, +, size * i), dma_buf[i], memcpy_op, (void *)size) == AML_SUCCESS); } // Wait all copies assert(aml_dma_barrier(dma) == AML_SUCCESS); // Byte wise comparison for (size_t i = 0; i < n; i++) assert(!memcmp(PTR_OFF(test_buf, +, size * i), PTR_OFF(src_buf, +, size * i), size)); // Cleanup free(src_buf); free(test_buf); for (size_t i = 0; i < n; i++) aml_area_munmap(area, dma_buf[i], size); }
2218.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "atax.h" /* Array initialization. */ static void init_array (int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny)) { int i, j; for (i = 0; i < ny; i++) x[i] = i * M_PI; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) A[i][j] = ((DATA_TYPE) i*(j+1)) / nx; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int nx, DATA_TYPE POLYBENCH_1D(y,NX,nx)) { int i; for (i = 0; i < nx; i++) { fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]); if (i % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_atax(int nx, int ny, DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny), DATA_TYPE POLYBENCH_1D(x,NY,ny), DATA_TYPE POLYBENCH_1D(y,NY,ny), DATA_TYPE POLYBENCH_1D(tmp,NX,nx)) { int i, j; #pragma scop { #pragma omp target teams distribute for (i = 0; i < _PB_NY; i++) { y[i] = 0; } #pragma omp target teams distribute for (i = 0; i < _PB_NX; i++) { tmp[i] = 0; for (j = 0; j < _PB_NY; j++) tmp[i] = tmp[i] + A[i][j] * x[j]; for (j = 0; j < _PB_NY; j++) y[j] = y[j] + A[i][j] * tmp[i]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int nx = NX; int ny = NY; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny); POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny); POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx); /* Initialize array(s). */ init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_atax (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x), POLYBENCH_ARRAY(y), POLYBENCH_ARRAY(tmp)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(x); POLYBENCH_FREE_ARRAY(y); POLYBENCH_FREE_ARRAY(tmp); return 0; }
2013.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m), DATA_TYPE POLYBENCH_1D(stddev,M,m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(#P11) #same issue as atax { #pragma omp parallel for schedule(dynamic, 4) simd num_threads(4) for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp parallel for schedule(dynamic, 4) simd num_threads(4) for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* The following in an inelegant but usual way to handle near-zero std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp parallel for schedule(dynamic, 4) simd num_threads(4) for (i = 0; i < _PB_N; i++) { #pragma omp parallel for schedule(dynamic, 4) simd num_threads(4) for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } } /* Calculate the m * m correlation matrix. */ #pragma omp parallel for schedule(dynamic, 4) simd num_threads(4) for (j1 = 0; j1 < _PB_M-1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1+1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M-1][_PB_M-1] = 1.0; } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
oyranos_cmm_oyra_image_scale.c
/** @file oyranos_cmm_oyra_image.c * * Oyranos is an open source Color Management System * * @par Copyright: * 2013-2016 (C) Kai-Uwe Behrmann * * @brief modules for Oyranos * @internal * @author Kai-Uwe Behrmann <ku.b@gmx.de> * @par License: * new BSD <http://www.opensource.org/licenses/BSD-3-Clause> * @since 2013/06/10 */ #include "oyCMMapi4_s.h" #include "oyCMMapi7_s.h" #include "oyCMMui_s.h" #include "oyConnectorImaging_s.h" #include "oyRectangle_s.h" #include "oyRectangle_s_.h" #include "oyranos_cmm.h" #include "oyranos_cmm_oyra.h" #include "oyranos_helper.h" #include "oyranos_i18n.h" #include "oyranos_string.h" #include <math.h> #include <stdarg.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #ifdef HAVE_POSIX #include <stdint.h> /* UINT32_MAX */ #endif /* OY_IMAGE_SCALE_REGISTRATION */ /* OY_IMAGE_SCALE_REGISTRATION ----------------------------------------------*/ /** @func oyraFilter_ImageScaleRun * @brief implement oyCMMFilter_GetNext_f() * * @version Oyranos: 0.9.6 * @date 2016/04/04 * @since 2013/06/10 (Oyranos: 0.9.5) */ int oyraFilter_ImageScaleRun ( oyFilterPlug_s * requestor_plug, oyPixelAccess_s * ticket ) { int result = 0, error = 0; oyFilterSocket_s * socket = 0; oyFilterNode_s * input_node = 0, * node = 0; oyFilterPlug_s * plug = 0; oyImage_s * image = 0; int image_width; int dirty = 0; socket = oyFilterPlug_GetSocket( requestor_plug ); node = oyFilterSocket_GetNode( socket ); image = (oyImage_s*)oyFilterSocket_GetData( socket ); if(!image) { result = 1; goto clean_scale1; } image_width = oyImage_GetWidth(image); if(oy_debug) oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "image [%d](%d)\n",OY_DBG_ARGS_,oyStruct_GetId((oyStruct_s*)image),oyImage_GetWidth(image) ); { oyRectangle_s_ ticket_roi_pix_ = {oyOBJECT_RECTANGLE_S,0,0,0, 0,0,0,0}; oyRectangle_s * ticket_roi_pix = (oyRectangle_s*)&ticket_roi_pix_; double scale = 1.0; oyOptions_s * node_opts = oyFilterNode_GetOptions( node, 0 ); if(!node_opts) dirty = 1; if(dirty) { result = dirty; goto clean_scale2; } plug = oyFilterNode_GetPlug( node, 0 ); /* select node */ input_node = oyFilterNode_GetPlugNode( node, 0 ); /* find filters own scale factor */ error = oyOptions_FindDouble( node_opts, "//" OY_TYPE_STD "/scale/scale", 0, &scale ); if(error) WARNc2_S("%s %d", _("found issues"),error); oyPixelAccess_RoiToPixels( ticket, NULL, &ticket_roi_pix ); if(oy_debug > 2) oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "%s",OY_DBG_ARGS_, oyPixelAccess_Show(ticket)); if(scale != 1.0 && scale > 0) { oyImage_s * output_image = oyPixelAccess_GetOutputImage( ticket ); int output_image_width = oyImage_GetWidth( output_image ); oyRectangle_s_ new_ticket_array_roi_pix_ = {oyOBJECT_RECTANGLE_S,0,0,0, 0,0,0,0}; oyRectangle_s * new_ticket_array_roi = NULL, * new_ticket_array_roi_pix = (oyRectangle_s*)&new_ticket_array_roi_pix_, * image_pix = oyRectangle_NewWith( 0,0,+ oyImage_GetWidth(image), oyImage_GetHeight(image), 0); oyPixelAccess_s * new_ticket = 0; /* start_xy is defined relative to the tickets output image width */ double start_x_src_pixel = oyPixelAccess_GetStart( ticket, 0 ) * output_image_width, start_y_src_pixel = oyPixelAccess_GetStart( ticket, 1 ) * output_image_width, start_x_dst_pixel,start_y_dst_pixel; int layout_src = oyImage_GetPixelLayout( image, oyLAYOUT ), layout_dst = oyImage_GetPixelLayout( output_image, oyLAYOUT ); int channels_src = oyToChannels_m( layout_src ); int channels_dst = oyToChannels_m( layout_dst ); /* avoid division by zero */ if(!channels_src) channels_src = 1; if(!channels_dst) channels_dst = 1; new_ticket = oyPixelAccess_Copy( ticket, ticket->oy_ ); oyPixelAccess_SetArray( new_ticket, 0, 0 ); oyPixelAccess_SetOutputImage( new_ticket, image ); if(oy_debug) { oyArray2d_s * a_dest = oyPixelAccess_GetArray( ticket ); int a_width_dest = oyArray2d_GetWidth( a_dest ) / channels_dst; oyra_msg( oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "output_image [%d](%d*%d)-array[%d](w%d) image [%d](%d*%d)\n", OY_DBG_ARGS_, oyStruct_GetId((oyStruct_s*)output_image), oyImage_GetWidth(output_image),channels_dst, oyStruct_GetId((oyStruct_s*)a_dest), a_width_dest, oyStruct_GetId((oyStruct_s*)image), oyImage_GetWidth(image), channels_src ); oyArray2d_Release( &a_dest ); } oyPixelAccess_RoiToPixels( ticket, NULL, &new_ticket_array_roi_pix ); /* scale */ oyRectangle_Scale( new_ticket_array_roi_pix, 1.0/scale ); oyRectangle_Round( new_ticket_array_roi_pix ); /* convert to new_ticket relative dimensions */ oyPixelAccess_PixelsToRoi( new_ticket, new_ticket_array_roi_pix, &new_ticket_array_roi ); /* adapt the access start and write relative to new tickets image width */ start_x_dst_pixel = OY_ROUND(start_x_src_pixel / scale); start_y_dst_pixel = OY_ROUND(start_y_src_pixel / scale); oyPixelAccess_ChangeRectangle( new_ticket, start_x_dst_pixel / image_width, start_y_dst_pixel / image_width, new_ticket_array_roi ); if(oy_debug) oyMessageFunc_p( oy_debug?oyMSG_DBG:oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "ticket: %s image[%d](%d) -> [%d](%d) scale %f\n",OY_DBG_ARGS_, oyPixelAccess_Show( ticket ), oyStruct_GetId((oyStruct_s*)image),oyImage_GetWidth(image), oyStruct_GetId((oyStruct_s*)output_image),oyImage_GetWidth(output_image), scale ); if(oy_debug) { char * troi; troi = strdup( oyRectangle_Show(ticket_roi_pix) ); oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "ticket_roi_pix: %s %s %f new_ticket_array_roi_pix: %s",OY_DBG_ARGS_, troi, "scale factor:", scale, oyRectangle_Show(new_ticket_array_roi_pix) ); if(troi) free(troi); oyra_msg( oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "image_pix: %s start_x:%g start_y:%g",OY_DBG_ARGS_, oyRectangle_Show(image_pix), oyPixelAccess_GetStart( new_ticket, 0 )*image_width, oyPixelAccess_GetStart( new_ticket, 1 )*image_width ); } if(oyRectangle_CountPoints( new_ticket_array_roi ) > 0) { int nw,nh,w,h,x,y,xs,ys; oyArray2d_s * array_in, * array_out; uint8_t ** array_in_data, ** array_out_data; /* get pixel layout infos for copying */ oyDATATYPE_e data_type_in = oyToDataType_m( layout_src ), data_type_out = oyToDataType_m( layout_dst ); int bps_in = oyDataTypeGetSize( data_type_in ), bps_out = oyDataTypeGetSize( data_type_out ); int issue = 0; /* get the source pixels */ if(oy_debug > 2) oyra_msg( oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "%s %s",OY_DBG_ARGS_, "Run new_ticket", oyPixelAccess_Show( new_ticket ) ); result = oyFilterNode_Run( input_node, plug, new_ticket ); /* prepare the current ticket */ oyPixelAccess_SetArrayFocus( ticket, 0 ); /* get the channel buffers */ array_in = oyPixelAccess_GetArray( new_ticket ); array_out = oyPixelAccess_GetArray( ticket ); array_in_data = oyArray2d_GetData( array_in ); array_out_data = oyArray2d_GetData( array_out ); w = oyArray2d_GetWidth ( array_out ) / channels_dst; h = oyArray2d_GetHeight( array_out ); nw = oyArray2d_GetWidth ( array_in ) / channels_src; nh = oyArray2d_GetHeight( array_in ); if(nw < (int)OY_ROUND(w/scale)) issue |= 1; if(nh < (int)OY_ROUND(h/scale)) issue |= 2; if(issue || oy_debug > 2) { oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "ticket: %s",OY_DBG_ARGS_, oyPixelAccess_Show(ticket)); oyra_msg( oyMSG_WARN, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "new_ti: %s",OY_DBG_ARGS_, oyPixelAccess_Show(new_ticket)); } if(issue || oy_debug) { char *a,*b,*c; a = strdup(oyRectangle_Show( ticket_roi_pix )); b = strdup(oyRectangle_Show( image_pix )); c = strdup(oyRectangle_Show( new_ticket_array_roi_pix )); oyra_msg( issue?oyMSG_ERROR:oyMSG_DBG, (oyStruct_s*)ticket, OY_DBG_FORMAT_ "node [%d] scale: %.02f old roi %s/%s(image) -> new roi %s array_in[%d](%dx%d)%dc w/scale=%g h/scale=%g-> array_out[%d](%dx%d)%dc" "%s%s%s",OY_DBG_ARGS_, oyStruct_GetId( (oyStruct_s*)node ), scale, a,b,c, oyStruct_GetId( (oyStruct_s*)array_in ), nw,nh,channels_src, w/scale, h/scale, oyStruct_GetId( (oyStruct_s*)array_out ), w,h, channels_dst, issue?" found issue(s): too":"", issue & 1 ? " wide":"", issue & 2 ? " heigh":"" ); if(a) {free(a);} if(b) {free(b);} if(c) {free(c);} } /* do the scaling while copying the channels */ #if defined(USE_OPENMP) #pragma omp parallel for private(x,xs,ys) #endif for(y = 0; y < h; ++y) { ys = y/scale; if(OY_ROUNDp(ys) >= nh) { if(oy_debug || (OY_ROUNDp(ys) >= (nh + 1))) oyra_msg( oy_debug?oyMSG_DBG:oyMSG_ERROR, (oyStruct_s*)ticket, OY_DBG_FORMAT_"scale:%g y:%d h:%d ys:%d/%g nh:%d\n", OY_DBG_ARGS_, scale, y,h,ys,y/scale,nh); } else for(x = 0; x < w; ++x) { xs = x/scale; if(OY_ROUNDp(xs) < nw) { #if 0 /* optimisations which have not much benefit */ int chars = channels_src*bps_in, b; uint32_t ** array_out_4 = (uint32_t**)array_out_data; uint32_t ** array_in_4 = (uint32_t**)array_in_data; if(bps_in == 4) for( b = 0; b < channels_src; ++b ) array_out_4[y] [x *channels_dst+b] = array_in_4 [ys][xs *channels_src+b]; else for( b = 0; b < chars; ++b ) array_out_data[y] [x *channels_dst*bps_out+b] = array_in_data [ys][xs *channels_src*bps_in +b]; #else memmove( &array_out_data[y] [x *channels_dst*bps_out], &array_in_data [ys][xs *channels_src*bps_in], channels_src*bps_in ); #endif } } } oyPixelAccess_Release( &new_ticket ); oyArray2d_Release( &array_in ); oyArray2d_Release( &array_out ); } oyRectangle_Release( &new_ticket_array_roi ); //oyRectangle_Release( &new_ticket_array_roi_pix ); oyRectangle_Release( &image_pix ); } else /* scale == 1.0 */ { result = oyFilterNode_Run( input_node, plug, ticket ); } clean_scale2: oyOptions_Release( &node_opts ); oyFilterPlug_Release( &plug ); //oyRectangle_Release( &ticket_roi_pix ); oyFilterNode_Release( &input_node ); } clean_scale1: oyImage_Release( &image ); oyFilterSocket_Release( &socket ); oyFilterNode_Release( &node ); return result; } #define OY_IMAGE_SCALE_REGISTRATION OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "scale" /** @brief oyra oyCMMapi7_s implementation * * a filter providing a scale image filter * * @version Oyranos: 0.9.5 * @since 2013/06/14 (Oyranos: 0.9.5) * @date 2013/06/14 */ oyCMMapi_s * oyraApi7ImageScaleCreate(void) { oyCMMapi7_s * scale7; int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C}, module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C}; static oyDATATYPE_e data_types[7] = {oyUINT8, oyUINT16, oyUINT32, oyHALF, oyFLOAT, oyDOUBLE, 0}; oyConnectorImaging_s * plug = oyConnectorImaging_New(0), * socket = oyConnectorImaging_New(0); static oyConnectorImaging_s * plugs[2] = {0,0}, * sockets[2] = {0,0}; plugs[0] = plug; sockets[0] = socket; oyConnectorImaging_SetDataTypes( plug, data_types, 6 ); oyConnectorImaging_SetReg( plug, "//" OY_TYPE_STD "/manipulator.data" ); oyConnectorImaging_SetMatch( plug, oyFilterSocket_MatchImagingPlug ); oyConnectorImaging_SetTexts( plug, oyCMMgetImageConnectorPlugText, oy_image_connector_texts ); oyConnectorImaging_SetIsPlug( plug, 1 ); oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 ); oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 ); oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 ); oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 ); oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 ); oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 ); oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 ); oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 ); oyConnectorImaging_SetCapability( plug, oyCONNECTOR_IMAGING_CAP_ID, 1 ); oyConnectorImaging_SetDataTypes( socket, data_types, 6 ); oyConnectorImaging_SetReg( socket, "//" OY_TYPE_STD "/manipulator.data" ); oyConnectorImaging_SetMatch( socket, oyFilterSocket_MatchImagingPlug ); oyConnectorImaging_SetTexts( socket, oyCMMgetImageConnectorSocketText, oy_image_connector_texts ); oyConnectorImaging_SetIsPlug( socket, 0 ); oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_OFFSET, -1 ); oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_CHANNELS_COUNT, 1 ); oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_CHANNELS_COUNT, 255 ); oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MIN_COLOR_COUNT, 1 ); oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_MAX_COLOR_COUNT, 255 ); oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_INTERWOVEN, 1 ); oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_PREMULTIPLIED_ALPHA, 1 ); oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_CAN_NONPREMULTIPLIED_ALPHA, 1 ); oyConnectorImaging_SetCapability( socket, oyCONNECTOR_IMAGING_CAP_ID, 1 ); scale7 = oyCMMapi7_Create ( oyraCMMInit, oyraCMMMessageFuncSet, OY_IMAGE_SCALE_REGISTRATION, cmm_version, module_api, NULL, oyraFilter_ImageScaleRun, (oyConnector_s**)plugs, 1, 0, (oyConnector_s**)sockets, 1, 0, 0, 0 ); return (oyCMMapi_s*) scale7; } const char * oyraApi4UiImageScaleGetText ( const char * select, oyNAME_e type, oyStruct_s * context OY_UNUSED ) { if(strcmp(select,"name") == 0) { if(type == oyNAME_NICK) return "image_scale"; else if(type == oyNAME_NAME) return _("Image[scale]"); else if(type == oyNAME_DESCRIPTION) return _("Scale Image Filter Object"); } else if(strcmp(select,"help") == 0) { if(type == oyNAME_NICK) return "help"; else if(type == oyNAME_NAME) return _("The filter is used to reduce pixels."); else if(type == oyNAME_DESCRIPTION) { static char * help_desc = NULL; if(!help_desc) oyStringAddPrintf( &help_desc, 0,0, "%s\n" " %s \n" " \n" " start_xy %s \n" " | / \n" " +-----|---------------/--------------+ \n" " | | / | \n" " | | / +--- %s \n" " | ---+------------/----------+ | \n" " | | / +---------- %s \n" " | | +------+--------+ | | \n" " | | | | | | \n" " | | | | | | \n" " | | +---------------+ | | \n" " | | | | \n" " | +-----------------------+ | \n" " | | \n" " +------------------------------------+ \n" "", _("The filter will expect a \"scale\" double option and will create, fill and process a according data version with a new job ticket. The new job tickets image, array and output_array_roi will be divided by the supplied \"scale\" factor. It's plug will request the divided image sizes from the source socket."), _("Relation of positional parameters:"), /* output image region of interesst */ _("output_array_roi"), _("source image"), _("output image") ); return help_desc; } } else if(strcmp(select,"category") == 0) { if(type == oyNAME_NICK) return "category"; else if(type == oyNAME_NAME) return _("Image/Simple Image[scale]"); else if(type == oyNAME_DESCRIPTION) return _("The filter is used to reduce pixels."); } return 0; } /** @brief oyra oyCMMapi4_s implementation * * a filter providing a scale image filter * * @version Oyranos: 0.9.5 * @since 2013/06/14 (Oyranos: 0.9.5) * @date 2013/06/14 */ oyCMMapi_s * oyraApi4ImageScaleCreate(void) { static const char * oyra_api4_ui_image_scale_texts[] = {"name", "help", "category", 0}; oyCMMui_s * ui = oyCMMui_Create( "Image/Simple Image[scale]", /* category */ oyraApi4UiImageScaleGetText, oyra_api4_ui_image_scale_texts, 0 ); int32_t cmm_version[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C}, module_api[3] = {OYRANOS_VERSION_A,OYRANOS_VERSION_B,OYRANOS_VERSION_C}; oyCMMapi4_s * scale4 = oyCMMapi4_Create( oyraCMMInit, oyraCMMMessageFuncSet, OY_IMAGE_SCALE_REGISTRATION, cmm_version, module_api, NULL, NULL, NULL, ui, NULL ); return (oyCMMapi_s*)scale4; } /* OY_IMAGE_SCALE_REGISTRATION ----------------------------------------------*/ /* ---------------------------------------------------------------------------*/
eltwise.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_ELTWISE_H_ #define MACE_KERNELS_ELTWISE_H_ #include <algorithm> #include <cmath> #include <functional> #include <memory> #include <utility> #include <vector> #include "mace/core/future.h" #include "mace/core/tensor.h" #include "mace/kernels/kernel.h" #include "mace/utils/quantize.h" namespace mace { namespace kernels { enum EltwiseType { SUM = 0, SUB = 1, PROD = 2, DIV = 3, MIN = 4, MAX = 5, NEG = 6, ABS = 7, SQR_DIFF = 8, POW = 9, EQUAL = 10, NONE = 11, }; static bool IsLogicalType(EltwiseType type) { return type == EQUAL; } inline index_t GetIndex(const std::vector<index_t> &shape, const std::vector<index_t> &index) { index_t idx = 0; for (size_t i = 0; i < shape.size(); ++i) { if (shape[i] > 1) { idx = idx * shape[i] + index[i]; } } return idx; } inline void IncreaseIndex(const std::vector<index_t> &shape, std::vector<index_t> *index) { for (index_t i = static_cast<index_t>(shape.size()) - 1; i >= 0; --i) { ++(*index)[i]; if ((*index)[i] >= shape[i]) { (*index)[i] -= shape[i]; } else { break; } } } template <typename T, typename DstType> inline void TensorGeneralBroadcastEltwise( const EltwiseType type, const T *input0, const T *input1, const std::vector<float> &coeff, const bool swapped, const std::vector<index_t> &input0_shape, const std::vector<index_t> &input1_shape, const std::vector<index_t> &output_shape, DstType *output) { const index_t output_size = std::accumulate( output_shape.begin(), output_shape.end(), 1, std::multiplies<index_t>()); std::vector<index_t> out_index(output_shape.size(), 0); switch (type) { case SUM: if (coeff.empty()) { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input0[idx0] + input1[idx1]; IncreaseIndex(output_shape, &out_index); } } else { std::vector<float> coeff_copy = coeff; if (swapped) { std::swap(coeff_copy[0], coeff_copy[1]); } for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input0[idx0] * coeff_copy[0] + input1[idx1] * coeff_copy[1]; IncreaseIndex(output_shape, &out_index); } } break; case SUB: if (!swapped) { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input0[idx0] - input1[idx1]; IncreaseIndex(output_shape, &out_index); } } else { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input1[idx1] - input0[idx0]; IncreaseIndex(output_shape, &out_index); } } break; case PROD: for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input0[idx0] * input1[idx1]; IncreaseIndex(output_shape, &out_index); } break; case DIV: if (!swapped) { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input0[idx0] / input1[idx1]; IncreaseIndex(output_shape, &out_index); } } else { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input1[idx1] / input0[idx0]; IncreaseIndex(output_shape, &out_index); } } break; case MIN: for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = std::min(input1[idx1], input0[idx0]); IncreaseIndex(output_shape, &out_index); } break; case MAX: for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = std::max(input1[idx1], input0[idx0]); IncreaseIndex(output_shape, &out_index); } break; case SQR_DIFF: for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = std::pow(input1[idx1] - input0[idx0], 2.f); IncreaseIndex(output_shape, &out_index); } break; case POW: if (!swapped) { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = std::pow(input0[idx0], input1[idx1]); IncreaseIndex(output_shape, &out_index); } } else { for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = std::pow(input1[idx1], input0[idx0]); IncreaseIndex(output_shape, &out_index); } } break; case EQUAL: for (index_t i = 0; i < output_size; ++i) { const index_t idx0 = GetIndex(input0_shape, out_index); const index_t idx1 = GetIndex(input1_shape, out_index); output[i] = input1[idx1] == input0[idx0]; IncreaseIndex(output_shape, &out_index); } break; default: LOG(FATAL) << "Eltwise op not support type " << type; } } template <typename T, typename DstType> inline void TensorBroadcastEltwise(const EltwiseType type, const T *input0, const T *input1, const std::vector<float> &coeff, const index_t diff_size, const index_t common_size, const bool swapped, DstType *output) { switch (type) { case SUM: if (coeff.empty()) { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input0[i + d * common_size] + input1[i]; } } } else { std::vector<float> coeff_copy = coeff; if (swapped) { std::swap(coeff_copy[0], coeff_copy[1]); } #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input0[i + d * common_size] * coeff_copy[0] + input1[i] * coeff_copy[1]; } } } break; case SUB: if (!swapped) { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input0[i + d * common_size] - input1[i]; } } } else { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input1[i] - input0[i + d * common_size]; } } } break; case PROD: #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input0[i + d * common_size] * input1[i]; } } break; case DIV: if (!swapped) { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input0[i + d * common_size] / input1[i]; } } } else { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input1[i] / input0[i + d * common_size]; } } } break; case MIN: #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = std::min(input0[i + d * common_size], input1[i]); } } break; case MAX: #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = std::max(input0[i + d * common_size], input1[i]); } } break; case SQR_DIFF: #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = std::pow(input0[i + d * common_size] - input1[i], 2.f); } } break; case POW: if (!swapped) { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = std::pow(input0[i + d * common_size], input1[i]); } } } else { #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = std::pow(input1[i], input0[i + d * common_size]); } } } break; case NEG: #pragma omp parallel for for (index_t i = 0; i < diff_size * common_size; ++i) { output[i] = -input0[i]; } break; case ABS: #pragma omp parallel for for (index_t i = 0; i < diff_size * common_size; ++i) { output[i] = std::fabs(input0[i]); } break; case EQUAL: #pragma omp parallel for collapse(2) for (index_t d = 0; d < diff_size; ++d) { for (index_t i = 0; i < common_size; ++i) { output[i + d * common_size] = input0[i + d * common_size] == input1[i]; } } break; default: LOG(FATAL) << "Eltwise op not support type " << type; } } // Multiplication is costly, so we specialize the following case. template <typename T, typename DstType> inline void TensorEltwise(const EltwiseType type, const T *input0, const T *input1, const std::vector<float> &coeff, const index_t size, const bool swapped, DstType *output) { switch (type) { case SUM: if (coeff.empty()) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] + input1[i]; } } else { std::vector<float> coeff_copy = coeff; if (swapped) { std::swap(coeff_copy[0], coeff_copy[1]); } #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] * coeff_copy[0] + input1[i] * coeff_copy[1]; } } break; case SUB: if (!swapped) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] - input1[i]; } } else { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input1[i] - input0[i]; } } break; case PROD: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] * input1[i]; } break; case DIV: if (!swapped) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] / input1[i]; } } else { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input1[i] / input0[i]; } } break; case MIN: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::min(input0[i], input1[i]); } break; case MAX: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::max(input0[i], input1[i]); } break; case SQR_DIFF: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::pow(input0[i] - input1[i], 2.f); } break; case POW: if (!swapped) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::pow(input0[i], input1[i]); } } else { for (index_t i = 0; i < size; ++i) { output[i] = std::pow(input1[i], input0[i]); } } break; case NEG: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = -input0[i]; } break; case ABS: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::fabs(input0[i]); } break; case EQUAL: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] == input1[i]; } break; default: LOG(FATAL) << "Eltwise op not support type " << type; } } // Multiplication is costly, so we specialize the following case. template <typename T, typename DstType> inline void TensorScalarEltwise(const EltwiseType type, const T *input0, const T input1, const std::vector<float> &coeff, const index_t size, const bool swapped, DstType *output) { switch (type) { case SUM: if (coeff.empty()) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] + input1; } } else { std::vector<float> coeff_copy = coeff; if (swapped) { std::swap(coeff_copy[0], coeff_copy[1]); } #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] * coeff_copy[0] + input1 * coeff_copy[1]; } } break; case SUB: if (!swapped) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] - input1; } } else { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input1 - input0[i]; } } break; case PROD: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] * input1; } break; case DIV: if (!swapped) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] / input1; } } else { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input1 / input0[i]; } } break; case MIN: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::min(input0[i], input1); } break; case MAX: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::max(input0[i], input1); } break; case SQR_DIFF: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::pow(input0[i] - input1, 2.f); } break; case POW: if (!swapped) { #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::pow(input0[i], input1); } } else { for (index_t i = 0; i < size; ++i) { output[i] = std::pow(input1, input0[i]); } } break; case NEG: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = -input0[i]; } break; case ABS: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = std::fabs(input0[i]); } break; case EQUAL: #pragma omp parallel for for (index_t i = 0; i < size; ++i) { output[i] = input0[i] == input1; } break; default: LOG(FATAL) << "Eltwise op not support type " << type; } } template <typename T, typename DstType> inline void TensorEltwisePerChannel(const EltwiseType type, const T *input0, const T *input1, const std::vector<float> &coeff, const index_t batch0, const index_t batch1, const index_t channel, const index_t image_size, const bool swapped, DstType *output) { switch (type) { case SUM: if (coeff.empty()) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in0_ptr[i] + in1_ptr[c]; } } } } else { std::vector<float> coeff_copy = coeff; if (swapped) { std::swap(coeff_copy[0], coeff_copy[1]); } #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in0_ptr[i] * coeff_copy[0] + in1_ptr[c] * coeff_copy[1]; } } } } break; case SUB: if (!swapped) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in0_ptr[i] - in1_ptr[c]; } } } } else { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in1_ptr[c] - in0_ptr[i]; } } } } break; case PROD: #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in0_ptr[i] * in1_ptr[c]; } } } break; case DIV: if (!swapped) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in0_ptr[i] / in1_ptr[c]; } } } } else { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in1_ptr[c] / in0_ptr[i]; } } } } break; case MIN: #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = std::min(in0_ptr[i], in1_ptr[c]); } } } break; case MAX: #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = std::max(in0_ptr[i], in1_ptr[c]); } } } break; case SQR_DIFF: #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = std::pow(in0_ptr[i] - in1_ptr[c], 2.f); } } } break; case POW: if (!swapped) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = std::pow(in0_ptr[i], in1_ptr[c]); } } } } else { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = std::pow(in1_ptr[c], in0_ptr[i]); } } } } break; case NEG: #pragma omp parallel for for (index_t i = 0; i < batch0 * channel * image_size; ++i) { output[i] = -input0[i]; } break; case ABS: #pragma omp parallel for for (index_t i = 0; i < batch0 * channel * image_size; ++i) { output[i] = std::fabs(input0[i]); } break; case EQUAL: #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch0; ++b) { for (index_t c = 0; c < channel; ++c) { const T *in0_ptr = input0 + ((b * channel) + c) * image_size; const T *in1_ptr = input1 + (batch1 > 1 ? b * channel : 0); DstType *out_ptr = output + ((b * channel) + c) * image_size; for (index_t i = 0; i < image_size; ++i) { out_ptr[i] = in0_ptr[i] == in1_ptr[c]; } } } break; default: LOG(FATAL) << "Eltwise op not support type " << type; } } template <DeviceType D, typename T> struct EltwiseFunctor : OpKernel { EltwiseFunctor(OpKernelContext *context, const EltwiseType type, const std::vector<float> &coeff, const float scalar_input, // float as it comes from arg const int32_t scalar_input_index, const DataFormat data_format) : OpKernel(context), type_(type), coeff_(coeff), scalar_input_(scalar_input), scalar_input_index_(scalar_input_index), data_format_(data_format) {} template <typename DstType> MaceStatus DoEltwise(const Tensor *input0, const Tensor *input1, Tensor *output) { bool swapped = false; if (input0->size() < input1->size()) { std::swap(input0, input1); swapped = true; } if (scalar_input_index_ == 0) { swapped = !swapped; } // check if we can broadcast tensor uint32_t rank_diff = static_cast<uint32_t>(input0->dim_size() - input1->dim_size()); if (data_format_ == NCHW) { MACE_CHECK( (input0->dim_size() == 4) && ((input1->dim_size() == 0) || (input1->dim_size() == 4 && input1->dim(1) == input0->dim(1) && (input1->dim(0) == input0->dim(0) || input1->dim(0) == 1)) || (input1->dim_size() == 1 && input1->dim(0) == input0->dim(1))), "only support broadcast channel dimension"); } else { for (uint32_t i = 0; i < input1->dim_size(); ++i) { MACE_CHECK(input0->dim(rank_diff + i) == 1 || input1->dim(i) == 1 || input0->dim(rank_diff + i) == input1->dim(i), "Element-Wise op only support tail dimensions broadcast"); } } Tensor::MappingGuard input0_guard(input0); Tensor::MappingGuard input1_guard(input1); const T *input0_ptr = input0->data<T>(); const T *input1_ptr = input1->data<T>(); if (data_format_ == NCHW && input1->dim_size() > 0 && input1->size() < input0->size()) { MACE_RETURN_IF_ERROR(output->ResizeLike(input0)); Tensor::MappingGuard output_guard(output); DstType *output_ptr = output->mutable_data<DstType>(); TensorEltwisePerChannel( type_, input0_ptr, input1_ptr, coeff_, input0->dim(0), input1->dim_size() == 1 ? 1 : input1->dim(0), input0->dim(1), input0->dim(2) * input0->dim(3), swapped, output_ptr); } else { const std::vector<index_t> &input0_shape = input0->shape(); std::vector<index_t> input1_shape(rank_diff, 1); input1_shape.insert(input1_shape.end(), input1->shape().begin(), input1->shape().end()); std::vector<index_t> output_shape(input0->dim_size(), 0); for (unsigned int i = 0; i < input0_shape.size(); ++i) { output_shape[i] = std::max(input0_shape[i], input1_shape[i]); } MACE_RETURN_IF_ERROR(output->Resize(output_shape)); Tensor::MappingGuard output_guard(output); DstType *output_ptr = output->mutable_data<DstType>(); bool need_general_broadcast = false; for (uint32_t i = 0; i < input1->dim_size(); ++i) { if ((input0->dim(rank_diff + i) == 1 && input1->dim(i) > 1) || (input0->dim(rank_diff + i) > 1 && input1->dim(i) == 1)) { need_general_broadcast = true; break; } } if (need_general_broadcast) { TensorGeneralBroadcastEltwise(type_, input0_ptr, input1_ptr, coeff_, swapped, input0_shape, input1_shape, output_shape, output_ptr); } else if (input1->size() == input0->size()) { TensorEltwise(type_, input0_ptr, input1_ptr, coeff_, input0->size(), swapped, output_ptr); } else if (input1->size() < input0->size()) { if (input1->size() > 1) { index_t common_size = input1->size(); index_t diff_size = input0->size() / common_size; TensorBroadcastEltwise(type_, input0_ptr, input1_ptr, coeff_, diff_size, common_size, swapped, output_ptr); } else { TensorScalarEltwise(type_, input0_ptr, input1_ptr[0], coeff_, input0->size(), swapped, output_ptr); } } } return MACE_SUCCESS; } MaceStatus operator()(const Tensor *input0, const Tensor *input1, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); if (input1 == nullptr) { scalar_tensor_.Resize({}); Tensor::MappingGuard guard(&scalar_tensor_); auto scalar_data = scalar_tensor_.mutable_data<T>(); scalar_data[0] = static_cast<T>(scalar_input_); input1 = &scalar_tensor_; } if (IsLogicalType(type_)) { // as we do not have bool-type tensor, we use int type return DoEltwise<int32_t>(input0, input1, output); } else { return DoEltwise<T>(input0, input1, output); } } EltwiseType type_; std::vector<float> coeff_; float scalar_input_; int32_t scalar_input_index_; DataFormat data_format_; Tensor scalar_tensor_; }; template <> struct EltwiseFunctor<DeviceType::CPU, uint8_t> : OpKernel { EltwiseFunctor(OpKernelContext *context, const EltwiseType type, const std::vector<float> &coeff, const float scalar_input, // float as it comes from arg const int32_t scalar_input_index, const DataFormat data_format) : OpKernel(context), type_(type), coeff_(coeff), scalar_input_(scalar_input), scalar_input_index_(scalar_input_index), data_format_(data_format) {} MaceStatus operator()(const Tensor *input0, const Tensor *input1, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); MACE_CHECK(type_ == SUM, "Only support Elementwise SUM now. "); MACE_CHECK(input0->size() == input1->size(), "input0 and input1 must have the same shape."); MACE_CHECK(output->scale() != 0); MACE_RETURN_IF_ERROR(output->Resize(input0->shape())); constexpr int left_shift = 20; const double doubled_scale = 2 * std::max(input0->scale(), input1->scale()); const double adjusted_input0_scale = input0->scale() / doubled_scale; const double adjusted_input1_scale = input1->scale() / doubled_scale; const double adjusted_output_scale = doubled_scale / ((1 << left_shift) * output->scale()); int32_t input0_multiplier; int32_t input1_multiplier; int32_t output_multiplier; int32_t input0_shift; int32_t input1_shift; int32_t output_shift; QuantizeMultiplier(adjusted_input0_scale, &input0_multiplier, &input0_shift); QuantizeMultiplier(adjusted_input1_scale, &input1_multiplier, &input1_shift); QuantizeMultiplier(adjusted_output_scale, &output_multiplier, &output_shift); Tensor::MappingGuard input0_guard(input0); Tensor::MappingGuard input1_guard(input1); Tensor::MappingGuard output_guard(output); auto input0_ptr = input0->data<uint8_t>(); auto input1_ptr = input1->data<uint8_t>(); auto output_ptr = output->mutable_data<uint8_t>(); index_t handled_output_size = 0; #ifdef MACE_ENABLE_NEON #pragma omp parallel for for (index_t i = handled_output_size; i <= output->size() - 8; i += 8) { const auto input0_val = vld1_u8(input0_ptr + i); const auto input1_val = vld1_u8(input1_ptr + i); const auto input0_val_s16 = vreinterpretq_s16_u16(vmovl_u8(input0_val)); const auto input1_val_s16 = vreinterpretq_s16_u16(vmovl_u8(input1_val)); const auto offset_input0 = vaddq_s16(input0_val_s16, vdupq_n_s16(-input0->zero_point())); const auto offset_input1 = vaddq_s16(input1_val_s16, vdupq_n_s16(-input1->zero_point())); auto input0_low_s32 = vmovl_s16(vget_low_s16(offset_input0)); auto input0_high_s32 = vmovl_s16(vget_high_s16(offset_input0)); auto input1_low_s32 = vmovl_s16(vget_low_s16(offset_input1)); auto input1_high_s32 = vmovl_s16(vget_high_s16(offset_input1)); const auto left_shift_dup = vdupq_n_s32(left_shift); input0_low_s32 = vshlq_s32(input0_low_s32, left_shift_dup); input0_high_s32 = vshlq_s32(input0_high_s32, left_shift_dup); input1_low_s32 = vshlq_s32(input1_low_s32, left_shift_dup); input1_high_s32 = vshlq_s32(input1_high_s32, left_shift_dup); input0_low_s32 = vqrdmulhq_n_s32(input0_low_s32, input0_multiplier); input0_high_s32 = vqrdmulhq_n_s32(input0_high_s32, input0_multiplier); input1_low_s32 = vqrdmulhq_n_s32(input1_low_s32, input1_multiplier); input1_high_s32 = vqrdmulhq_n_s32(input1_high_s32, input1_multiplier); const auto input0_shift_dup = vdupq_n_s32(input0_shift); const auto input1_shift_dup = vdupq_n_s32(input1_shift); input0_low_s32 = vshlq_s32(input0_low_s32, input0_shift_dup); input0_high_s32 = vshlq_s32(input0_high_s32, input0_shift_dup); input1_low_s32 = vshlq_s32(input1_low_s32, input1_shift_dup); input1_high_s32 = vshlq_s32(input1_high_s32, input1_shift_dup); auto sum_low = vaddq_s32(input0_low_s32, input1_low_s32); auto sum_high = vaddq_s32(input0_high_s32, input1_high_s32); sum_low = vqrdmulhq_n_s32(sum_low, output_multiplier); sum_high = vqrdmulhq_n_s32(sum_high, output_multiplier); sum_low = gemmlowp::RoundingDivideByPOT(sum_low, -output_shift); sum_high = gemmlowp::RoundingDivideByPOT(sum_high, -output_shift); const auto sum_low_s16 = vmovn_s32(sum_low); const auto sum_high_s16 = vmovn_s32(sum_high); const auto output_val = vaddq_s16(vcombine_s16(sum_low_s16, sum_high_s16), vdupq_n_s16(output->zero_point())); vst1_u8(output_ptr + i, vqmovun_s16(output_val)); } handled_output_size = output->size() - output->size() % 8; #endif // NEON #pragma omp parallel for for (index_t i = handled_output_size; i < output->size(); ++i) { const int32_t offset_input0 = input0_ptr[i] - input0->zero_point(); const int32_t offset_input1 = input1_ptr[i] - input1->zero_point(); const int32_t shifted_input0 = offset_input0 * (1 << left_shift); const int32_t shifted_input1 = offset_input1 * (1 << left_shift); const int32_t multiplied_input0 = gemmlowp::RoundingDivideByPOT( gemmlowp::SaturatingRoundingDoublingHighMul(shifted_input0, input0_multiplier), -input0_shift); const int32_t multiplied_input1 = gemmlowp::RoundingDivideByPOT( gemmlowp::SaturatingRoundingDoublingHighMul(shifted_input1, input1_multiplier), -input1_shift); const int32_t sum = multiplied_input0 + multiplied_input1; const int32_t output_val = gemmlowp::RoundingDivideByPOT( gemmlowp::SaturatingRoundingDoublingHighMul(sum, output_multiplier), -output_shift) + output->zero_point(); output_ptr[i] = Saturate<uint8_t>(output_val); } return MACE_SUCCESS; } EltwiseType type_; std::vector<float> coeff_; float scalar_input_; int32_t scalar_input_index_; DataFormat data_format_; Tensor scalar_tensor_; }; #ifdef MACE_ENABLE_OPENCL class OpenCLEltwiseKernel { public: virtual MaceStatus Compute( OpKernelContext *context, const Tensor *input0, const Tensor *input1, Tensor *output, StatsFuture *future) = 0; MACE_VIRTUAL_EMPTY_DESTRUCTOR(OpenCLEltwiseKernel); }; template <typename T> struct EltwiseFunctor<DeviceType::GPU, T> : OpKernel { EltwiseFunctor(OpKernelContext *context, const EltwiseType type, const std::vector<float> &coeff, const float scalar_input, const int32_t scalar_input_index, const DataFormat data_format); MaceStatus operator()(const Tensor *input0, const Tensor *input1, Tensor *output, StatsFuture *future); std::unique_ptr<OpenCLEltwiseKernel> kernel_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_ELTWISE_H_
coll.h
#ifndef G24_LIB_COLL #define G24_LIB_COLL /********************************************** * * * BIBLIOTECA DE FUNÇÕES MULTI-DIMENSIONAIS * * * * Por: Gonçalo Vaz (87321) * * Nuno Fernandes (87342) * * [Antigo Grupo 24 de FC 2017-2018] * * [Antigo Grupo 25 de Programação 2016-2017] * * * **********************************************/ /*! \file coll.h \brief Defines `g24_lib::coll`, a mostly deprecated class that holds N-dimensional collections of objects. \author Nuno Fernandes (main coder, current maintainer) \author Gonçalo Vaz (auxiliary coder and tester) */ namespace g24_lib { /*! \brief Holds a run-time defined number of objects in a run-time defined number of dimensions. \tparam Type The type of the elements of the array. \tparam indexer The type that should be able to hold the size of the array. \pre \p indexer must satisfy the constraints placed on the choice of \ref g24_lib::Definitions::default_unsigned_indexer. \details Suppose we want to store something in `n+1` dimensions, with `k_0`, `k_1`, ... , `k_n` elements in each dimension. `g24_lib::coll` will allocate an array of `k_0 * k_1 * ... * k_n` elements. The elements at each dimension will be stored at decreasing strides, much in the same way as native multidimensional static arrays (such as `int x[10][11][12]`). \details The strides between each element in a given dimension will be: `k_1 * k_2 * ... * k_n`, `k_2 * ... * k_n`, ... , `k_n` , `1` (that is, in the last dimension the elements are stored contiguosly). \deprecated After seeing much use during the projects that originated the first versions of this library, the authors eventually concluded that the run-time defined number of dimensions was not a significant advantage, quite the opposite, since it forced the usage of the (also deprecated) `g24_lib::point` class and more dynamic allocations than desireable. A more reasonable alternative is to use `g24_lib::ndview` to specify a run-time number of elements in a compile-time number of dimensions and couple this with any array (not necessarily `g24_lib::simple_array`, though that would be the most natural choice for this library...) using `g24_lib::ndview::array_size()` to get the necessary number of elements and access ordered positions through `g24_lib::ndview`. */ template <class Type, class indexer = g24_lib::Definitions::default_unsigned_indexer> class coll { public: typedef Type value_type; typedef indexer indexer_type; private: /*! \brief The actual array. */ Type *a; /*! \brief The number of dimensions. */ indexer dim; /*! \brief The total size of the array. */ indexer Size; /*! \brief The number of elements per dimension. */ point<indexer, indexer> nums; /*! \brief The stride between the elements in each dimension. */ point<indexer, indexer> sep; public: /*! \brief Returns the number of dimensions of the `coll`. */ inline indexer dimension() const { return dim; } /*! \brief Returns the total number of elements of the `coll`. */ inline indexer size() const { return Size; } /*! \brief Returns a `g24_lib::point` that holds the number of elements in each dimension. */ inline point<indexer, indexer> numbers() const { return nums; } /*! \brief Returns the number of elements in the dimension \p i. */ inline indexer numbers(const indexer i) const { return nums[i]; } /*! \brief Returns a `g24_lib::point` that holds the stride between each element in each dimension. */ inline point<indexer, indexer> separation() const { return sep; } /*! \brief Returns the stride between each element in the dimension \p i. */ inline indexer separation(const indexer i) const { return sep[i]; } /*! \brief Gives direct access to the underlying array. \warning This gives actual access to the raw array. It shouldn't absolutely be used unless the user is really, really sure of what will be done. \deprecated This is kept for compatibility with older code, where copying to CUDA and so on wasn't as well abstracted as it is now. It really, really shouldn't be needed anymore. */ inline Type *& get_access_to_array() { return a; } /*! \brief Gives direct access to the underlying array. \warning This gives actual access to the raw array. It shouldn't absolutely be used unless the user is really, really sure of what will be done. \deprecated This is kept for compatibility with older code, where copying to CUDA and so on wasn't as well abstracted as it is now. It really, really shouldn't be needed anymore. */ inline const Type * get_access_to_array() const { return a; } /*! \brief Empty construct. */ coll() { dim = 0; Size = 0; a = nullptr; } /*! \brief Construct with \p d dimensions with \p n elements in each dimension and no initialization. */ coll(const indexer d, const indexer n): nums(d, n), sep(d) { indexer i, j; Size = fastpow(n,d); dim = d; j = Size; for (i = 0, j = Size; i < dim; ++i) { j /= n; sep[i] = j; }; a = new Type[Size]; } /*! \brief Construct with \p d dimensions with \p n elements in each dimension, with every element being initialized as \p def. */ coll(const indexer d, const indexer n, const Type &def): nums(d, n), sep(d) { indexer i, j; Size = fastpow(n,d); dim = d; j = Size; for (i = 0, j = Size; i < dim; ++i) { j /= n; sep[i] = j; }; a = new Type[Size]; #ifdef _OPENMP #pragma omp parallel for #endif for (indexer k = 0; k < Size; ++k) { a[k] = def; } } /*! \brief Construct with \p d dimensions with the number of elements in each dimension being given by \p ns, with no initialization. */ coll(const indexer d, const point<indexer, indexer> &ns) : nums(ns), sep(d) { indexer i, j; dim = d; Size = 1; for (i = 0; i < d; ++i) { Size *=ns[i]; }; for (i = 0, j = Size; i < dim; ++i) { j /= ns[i]; sep[i] = j; }; a = new Type[Size]; } /*! \brief Construct with \p d dimensions with the number of elements in each dimension being given by \p ns, with every element being initialized as \p def. */ coll(const indexer d, const point<indexer, indexer> &ns, const Type &def) : nums(ns), sep(d) { indexer i, j; dim = d; Size = 1; for (i = 0; i < d; ++i) { Size *=ns[i]; }; for (i = 0, j = Size; i < dim; ++i) { j /= ns[i]; sep[i] = j; }; a = new Type[Size]; #ifdef _OPENMP #pragma omp parallel for #endif for (indexer k = 0; k < Size; ++k) { a[k] = def; } } /*! \brief Copy constructor. */ coll(const coll &c): nums(c.nums), sep(c.sep) { Size = c.Size; dim = c.dim; a = new Type[c.Size]; memcpy(a, c.a, (c.Size) * sizeof(Type)); } /*! \brief Move constructor. */ coll(coll &&c): nums(c.nums), sep(c.sep) { Size = c.Size; dim = c.dim; a = c.a; c.a = nullptr; c.Size = 0; c.a.set_all(0); } coll& operator= (const coll& c) { if(&c == this) { return (*this); } nums = c.nums; sep = c.sep; Size = c.Size; dim = c.dim; if (a != nullptr) { delete[] a; } a = new Type[c.Size]; memcpy(a, c.a, (c.Size) * sizeof(Type)); return (*this); } coll& operator= (coll&& c) { if(&c == this) { return (*this); } nums = c.nums; sep = c.sep; Size = c.Size; dim = c.dim; if (a != nullptr) { delete[] a; } a = c.a; c.a = nullptr; return (*this); } /*! \brief Access operator through an index. */ inline Type& operator[](const indexer i) { return a[i]; } /*! \brief Access operator through an index. */ inline const Type& operator[](const indexer i) const { return a[i]; } /*! \brief Bounded access operator through an index. \throw In case the access is out of bounds, uses g24_lib::exceptions::throw_exception to possibly throw a g24_lib::exceptions::out_of_bounds_access. */ inline Type& operator()(const indexer i) { if (i >= Size) { exceptions::throw_exception(exceptions::out_of_bounds_access("Coll deprecated, but it's accessing outside.")); } return a[i]; } /*! \brief Bounded access operator through an index. \throw In case the access is out of bounds, uses g24_lib::exceptions::throw_exception to possibly throw a g24_lib::exceptions::out_of_bounds_access. */ inline const Type& operator()(const indexer i) const { if (i >= Size) { exceptions::throw_exception(exceptions::out_of_bounds_access("Coll deprecated, but it's accessing outside.")); } return a[i]; } /*! \brief Converts from an ordered position to an index. \remark In essence, this is the dot product between the ordered position (understood as a vector) and the stride at each dimension (as a vector as well). \remark To understand the algorithm, take for example a 3d collection of elements with `{11, 12, 13}` in each dimension. The strides will be `{156,13,1}`. \remark Suppose one wants to find the index that corresponds to the position `{4,5,6}`. Given the way positions are translated into indexes, one needs to compute: `4 * 156 + 5 * 13 + 1 * 6 = 624 + 65 + 6 = 695`. \sa g24_lib::coll::to_point */ inline indexer to_elem(const point<indexer, indexer> &p) const { indexer i, elem = 0; for (i = 0; i < dim; ++i) { elem += p[i]*sep[i]; } return elem; } /*! \brief Access operator with an ordered position. Has worse performance than through an index. \sa g24_lib::coll::to_elem */ inline Type& operator[] (const point<indexer, indexer> p) { return (*this)[to_elem(p)]; } /*! \brief Access operator with an ordered position. Has worse performance than through an index. \sa g24_lib::coll::to_elem */ inline const Type& operator[] (const point<indexer, indexer> p) const { return (*this)[to_elem(p)]; } /*! \brief Bounded access operator with an ordered position. Has worse performance than through an index. \throw In case the access is out of bounds, uses g24_lib::exceptions::throw_exception to possibly throw a g24_lib::exceptions::out_of_bounds_access. \sa g24_lib::coll::to_elem */ inline Type& operator() (const point<indexer, indexer> p) { return (*this)(to_elem(p)); } /*! \brief Bounded access operator with an ordered position. Has worse performance than through an index. \throw In case the access is out of bounds, uses g24_lib::exceptions::throw_exception to possibly throw a g24_lib::exceptions::out_of_bounds_access. \sa g24_lib::coll::to_elem */ inline const Type& operator() (const point<indexer, indexer> p) const { return (*this)(to_elem(p)); } /*! \brief Converts from an index to an ordered position. \remark To understand the algorithm, take for example a 3d collection of elements with `{11, 12, 13}` in each dimension. The strides will be `{156,13,1}`. \remark Suppose one wants to find the ordered position that corresponds to the index 123. Given the way positions are translated into indexes, one needs to compute: #- `123 / 156 = 0` (in integer arithmetic), `123 % 156 = 123`: The first component of the position is 0, we proceed with the number `123`. #- `123 / 13 = 9`, `123 % 13 = 6`: The second component of the position is 9, we proceed with the number `6` #- `6 / 1 = 6`: The third and last component of the position is `6`. \sa g24_lib::coll::to_elem */ inline point<indexer, indexer> to_point(const indexer elem) const { indexer j, m = elem; point<indexer, indexer> res(dim); for (j = 0; j < dim; ++j) { res[j] = m/sep[j]; m = m % sep[j]; } return res; } /*! \brief Returs the elements of the `coll` that are direct neighbors to the specified element. \param elem The index of the element whose neighbors will be returned. \param p The ordered position of the element whose neighbors will be returned. \pre \p elem and \p p must obviously refer to the same element. \returns An `std::vector` containing the neighbors of the specified element. For an element with an ordered position `(a,b,...,d)`, the return vector will be ordered as: `(a+1,b,...,d)`,`(a-1,b,...,d)`,`(a,b+1,...,d)`,`(a,b-1,...,d)`,...,`(a,b,...,d-1)`, with any invalid positions being ommitted (if the specified element is in the border of the collection). \remark Of course, this is extremely inefficient for large types, which is another reason for this class to be deprecated... */ std::vector<Type> get_near(const indexer elem, const point<indexer, indexer> &p) const { indexer i, j = 0; std::vector<Type> ret; ret.reserve(dim*2); for (i = 0; i < dim; ++i) { if (p[i] > 0) { ret.push_back(a[elem + sep[i]]); ++j; } if (p[i] > 0 && p[i] < nums[i] - 1) { ret.push_back(a[elem - sep[i]]); ++j; } } ret.shrink_to_fit(); return ret; } /*! \brief Returs the elements of the `coll` that are direct neighbors to the specified element. \param p The ordered position of the element whose neighbors will be returned. \returns An `std::vector` containing the neighbors of the specified element. For an element with an ordered position `(a,b,...,d)`, the return vector will be ordered as: `(a+1,b,...,d)`,`(a-1,b,...,d)`,`(a,b+1,...,d)`,`(a,b-1,...,d)`,...,`(a,b,...,d-1)`, with any invalid positions being ommitted (if the specified element is in the border of the collection). \remark Of course, this is extremely inefficient for large types, which is another reason for this class to be deprecated... */ inline std::vector<Type> get_near(const point<indexer, indexer> &p) const { return get_near(to_elem(p), p); } /*! \brief Returs the elements of the `coll` that are direct neighbors to the specified element. \param elem The index of the element whose neighbors will be returned. \returns An `std::vector` containing the neighbors of the specified element. For an element with an ordered position `(a,b,...,d)`, the return vector will be ordered as: `(a+1,b,...,d)`,`(a-1,b,...,d)`,`(a,b+1,...,d)`,`(a,b-1,...,d)`,...,`(a,b,...,d-1)`, with any invalid positions being ommitted (if the specified element is in the border of the collection). \remark Of course, this is extremely inefficient for large types, which is another reason for this class to be deprecated... */ inline std::vector<Type> get_near(const indexer elem) const //Obtém os pontos mais próximos do elemento. { return get_near(elem, to_point(elem)); } /*! \brief Sets all of the elements in the border of the collection to \p v. \remark When the authors finished writing the algorithm for this function, though in retrospective it doesn't seem that impressive, they were very glad to have been able to figure out a way to select every element of the border in linear time and without repetition. */ void set_border(const Type &v) { indexer i,j, n; for (i = 0; i < dim; ++i) //i fixes the coordinate where we will consider 0 or max. { n = 0; for(j = 0; j < i; ++j) { n += sep[j]; } //To start at (1, ... , 1, 0, ...) and avoid repetition. while(n < Size - sep[0]) { #ifdef _OPENMP #pragma omp parallel for #endif for (indexer k = 0; k < sep[i]; ++k) { a[k+n] = v; a[k+n+(nums[i]-1)*sep[i]] = v; } if(i > 0) { n += sep[i-1]; } else { n = Size; } for(j = i - 1; j < i; --j) //This can come outside the conditional //since i = 0 invalidates j < i //and i = 1 is taken care of through Size - sep[0] { if(j > 0 && n % sep[j-1] == 0) { n += sep[j]; } else if(j > 0 && (n % sep[j-1])/sep[j] == (nums[j]-1)) { n += 2*sep[j]; } } } } } /*! \brief Returns the number of elements in the border of the collection. */ indexer count_border() const //Conta o número de elementos da fronteira. { indexer i, j, n, count = 0; for (i = 0; i < dim; ++i) //i fixes the coordinate where we will consider 0 or max. { n = 0; for(j = 0; j < i; ++j) { n += sep[j]; } //To start at (1, ... , 1, 0, ...) and avoid repetition. while(n < Size - sep[0]) { count += 2*sep[i]; if(i > 0) { n += sep[i-1]; } else { n = Size; } for(j = i - 1; j < i; --j) //This can come outside the conditional //since i = 0 invalidates j < i //and i = 1 is taken care of through Size - sep[0] { if(j > 0 && n % sep[j-1] == 0) { n += sep[j]; } else if(j > 0 && (n % sep[j-1])/sep[j] == (nums[j]-1)) { n += 2*sep[j]; } } } } return count; } /* void operate_on_border(Type (*f) (const Type &, const indexer, const coll<Type, indexer> &, void*), void *par = nullptr) { #ifdef _OPENMP #pragma omp parallel for #endif for (indexer i = 0; i < Size; ++i) { a[i] = f(a[i], i, (*this), par); } } void operate_on_all(Type (*f) (const Type &, const point<indexer, indexer> &, const coll<Type, indexer> &, void *), void *par = nullptr) { #ifdef _OPENMP #pragma omp parallel for for (indexer i = 0; i < Size; ++i) { point<indexer, indexer> p = to_point (i); a[i] = f(a[i], p, (*this), par); } #else point<indexer, indexer> p(dim,0); indexer i; for (i = 0; i < Size; ++i) { a[i] = f(a[i], p, (*this), par); p.inc_with_wrap(nums, dim-1, true, true); } #endif } */ /*! \brief Checks if an element belongs to the border of the collection. */ inline bool is_border(const point <indexer, indexer> &p) const { indexer i; for(i = 0; i < dim; ++i) { if (p[i] == 0 || p[i] == nums[i] - 1) { return true; } } return false; } /*! \brief Checks if an element belongs to the border of the collection. */ inline bool is_border(const indexer elem) const { return is_border(to_point(elem)); } /*! \brief Operates on all elements of the collection. \param f A function that takes the current element, its index, a reference to the current `coll` and an extra `void *` as arguments and returns the new element. \param par A pointer to any extra arguments. \remark This heavily foregoes type safety and leans too much towards C and not C++. The authors blame that on their lack of experience with the language at the point this was written. `g24_lib::ndview` implements this in a more generic and idiomatic way, using template parameter packs and other C++ syntax goodies. */ inline void operate_on_all(Type (*f) (const Type &, const indexer, const coll<Type, indexer> &, void*), void *par = nullptr) { #ifdef _OPENMP #pragma omp parallel for #endif for (indexer i = 0; i < Size; ++i) { a[i] = f(a[i], i, (*this), par); } } /*! \brief Operates on all elements of the collection. \param f A function that takes the current element, its ordered position, a reference to the current `coll` and an extra `void *` as arguments and returns the new element. \param par A pointer to any extra arguments. \remark This heavily foregoes type safety and leans too much towards C and not C++. The authors blame that on their lack of experience with the language at the point this was written. `g24_lib::ndview` implements this in a more generic and idiomatic way, using template parameter packs and other C++ syntax goodies. */ inline void operate_on_all(Type (*f) (const Type &, const point<indexer, indexer> &, const coll<Type, indexer> &, void *), void *par = nullptr) { #ifdef _OPENMP #pragma omp parallel for for (indexer i = 0; i < Size; ++i) { point<indexer, indexer> p = to_point (i); a[i] = f(a[i], p, (*this), par); } #else point<indexer, indexer> p(dim,(indexer) 0); indexer i; for (i = 0; i < Size; ++i) { a[i] = f(a[i], p, (*this), par); p.inc_with_wrap(nums, dim-1, true, true); } #endif } /*! \brief Sets all the elements with coordinate \p coord in dimension \p dms to \p v. */ void set_with_specific_coord(const Type &v, const indexer dms, const indexer coord) { indexer j = 0, n = coord*sep[dms]; while(n + j + sep[dms] - 1 < Size) { #ifdef _OPENMP #pragma omp parallel for #endif for (indexer i = 0; i < sep[dms]; ++i) { a[i + n + j] = v; } if(dms > 0) { j += sep[dms - 1]; } else { return; } } } ~coll() { delete[] a; } /*! \brief Prints a collection in a roughly understantable format. \warning For proper serialization, use `textual_output`. */ friend std::ostream& operator<< (std::ostream &s, const coll<Type, indexer> &c) //Imprime uma colecção. //Não é necessariamente uma forma de armazenar a colecção, //antes para um output visualmente compreensível. { indexer i; s << "\n"; if (c.Size) { for(i = 0; i < c.size(); ++i) { if(c.dim > 2 && i % c.separation(c.dim - 3) == 0) { s << "\n\n" << (c.to_point(i) >> 2) << "\n\n"; } if(c.dim > 1 && i % c.separation(c.dim-2) == 0) { s << "\n"; } s << c.a[i] << " "; } } return s; } /*! \brief Prints a `coll` to a stream in a futurely readable way. \details Prints the number of dimensions, followed by the number of elements in each dimension, followed by the stride in each dimenson, then the total size and finally the elements, all with `operator<<`. \deprecated Old way of printing the contents of the `coll`, provided for backwards compatibility reasons. Please use `textual_output` for proper serialization. */ template <class stream> void raw_print(stream &s) { s << dim << " "; nums.raw_print(s, false); s << " "; sep.raw_print(s, false); s << Size << " "; for(indexer i = 0; i < Size; ++i) { s << a[i] << " "; } } /*! \brief Reads a `coll` in the format \details Reads the number of dimensions, followed by the number of elements in each dimension, followed by the stride in each dimension, then the total size and finally the elements, all with `operator>>`. \deprecated Old way of reading the contents of the `coll`, provided for backwards compatibility reasons. Please use `textual_input` for proper serialization. */ template <class stream> void raw_read(stream &s) { s >> dim; nums.raw_read(s, dim); sep.raw_read(s, dim); indexer temp; s >> temp; if (temp != Size) { delete[] a; a = new Type[temp]; Size = temp; } for(indexer i = 0; i < Size; ++i) { s >> a[i]; } } template<class stream, class str = std::basic_string<typename stream::char_type>> void textual_output(stream &s, const str& separator = " ") const { g24_lib::textual_output(s, nums, separator); s << separator; for (indexer i = 0; i < Size; ++i) { g24_lib::textual_output(s, a[i], separator); s << separator; } } template<class stream> void textual_input(stream &s) { point<Type, indexer> new_nums; g24_lib::textual_input(s, new_nums); if(s.fail()) { exceptions::throw_exception(exceptions::array_length_misread("")); } coll temp(new_nums.size(), new_nums); for (indexer i = 0; i < temp.size(); ++i) { g24_lib::textual_input(s, temp.a[i]); } (*this) = temp; } template<class stream> void binary_output(stream &s) const { g24_lib::binary_output(s, nums); for (indexer i = 0; i < Size; ++i) { g24_lib::binary_output(s, a[i]); } } template<class stream> void binary_input(stream &s) { point<Type, indexer> new_nums; g24_lib::binary_input(s, new_nums); if(s.fail()) { exceptions::throw_exception(exceptions::array_length_misread("")); } coll temp(new_nums.size(), new_nums); for (indexer i = 0; i < temp.size(); ++i) { g24_lib::binary_input(s, temp.arr[i]); } (*this) = temp; } /*! \brief An old kludge to enable output of `colls` of `g24_lib::fspoints` way before `serialization_helpers.h` was written. \deprecated Currently, it simply calls `g24_lib::coll::binary_output` since this already works with `g24_lib::fspoints`, but the function is kept here for backwards compatibility. */ template <class stream> friend void binary_output_special(stream &s, const coll &c) { c.binary_output(s); /* s.write( (char*) &c.dim, sizeof(indexer)); s.write( (char*) c.nums.get_access_to_array(), c.dim*sizeof(indexer)); s.write( (char*) c.sep.get_access_to_array(), c.dim*sizeof(indexer)); s.write( (char*) &c.Size, sizeof(indexer)); for(indexer i = 0; i < c.Size; ++i) { s.write( (char*) c.a[i].get_access_to_array(), c.a[i].size() * sizeof(c.a[i][0])); } */ } /*! \brief An old kludge to enable input of `colls` of `g24_lib::fspoints` way before `serialization_helpers.h` was written. \deprecated Currently, it simply calls `g24_lib::coll::binary_input` since this already works with `g24_lib::fspoints`, but the function is kept here for backwards compatibility. */ template <class stream> friend void binary_input_special(stream &s, coll &c) { c.binary_input(s); /* s.read( (char*) &c.dim, sizeof(indexer)); c.nums.assure_size(c.dim); s.read( (char*) c.nums.get_access_to_array(), c.dim*sizeof(indexer)); c.sep.assure_size(c.dim); s.read( (char*) c.sep.get_access_to_array(), c.dim*sizeof(indexer)); indexer temp; s.read( (char*) &temp, sizeof(indexer)); if(temp != c.Size) { c.Size = temp; delete[] c.a; c.a = new Type[c.Size]; } for(indexer i = 0; i < c.Size; ++i) { s.read( (char*) c.a[i].get_access_to_array(), c.a[i].size() * sizeof(c.a[i][0])); } */ } }; } #endif
mem_managerRACBVH.h
#ifndef MEM_NANAGER_RACBVH_H #define MEM_NANAGER_RACBVH_H #include "App.h" #include "VDTActiveList.h" #include <math.h> #include "memory_map.h" // Application specific part template <class T> class RACBVH; template <class T> extern bool loadCluster(RACBVH<T> *pBVH, unsigned int CN, T* posCluster, long diskClusterOffset, int threadNum); // read only memory manager based on LRU template <class T> class CMemElementRACBVH{ public: int m_PageID; // Page idx in the original data int m_CachedPageID; // idx of cached Page T * m_Element; int m_NumElement; CMemElementRACBVH <T> * m_pNext, * m_pPrev; CMemElementRACBVH (void) { m_CachedPageID = m_PageID = -1; m_Element = NULL; m_pNext = m_pPrev = NULL; } CMemElementRACBVH (int PageID, int CachedPageID, int NumElement) { m_PageID = PageID; m_CachedPageID = CachedPageID; m_NumElement = NumElement; m_Element = new T [NumElement]; m_pNext = m_pPrev = NULL; } ~CMemElementRACBVH (void) { if (m_Element) { delete [] m_Element; m_Element = NULL; } } }; class CMemElementRACBVHCompressedCluster{ public: int m_PageID; // Page idx in the original data unsigned char *m_CachedCluster; // Pointer of cached compressed cluster int m_ClusterSize; CMemElementRACBVHCompressedCluster * m_pNext, * m_pPrev; CMemElementRACBVHCompressedCluster (void) { m_CachedCluster = (unsigned char*)-1; m_PageID = -1; m_pNext = m_pPrev = NULL; m_ClusterSize = 0; } CMemElementRACBVHCompressedCluster (int PageID, int ClusterSize) { m_PageID = PageID; m_ClusterSize = ClusterSize; m_CachedCluster = new unsigned char[ClusterSize]; m_pNext = m_pPrev = NULL; } ~CMemElementRACBVHCompressedCluster (void) { if (m_CachedCluster) { delete [] m_CachedCluster; m_CachedCluster = NULL; } } }; template <class T> class CMemManagerRACBVH// : public CMemoryMappedFile <T> { public: static bool IsPowerOfTwo (unsigned int Src, int & Power) { const int NumTests = 32; static const unsigned int powConst[NumTests] = { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, 67108864, 134217728, 268435456, 536870912, 1073741824, 2147483648U }; int i; for (i = 0;i < NumTests;i++) if (Src == powConst [i]) { Power = i; return true; } else if (Src < powConst [i]) { return false; } return false; } static float _log_2 (float x) { float Result = log (x) / log (float (2)); return Result; } int UNLOADED; char m_ObjName [255]; // Manager Name, for each debug int m_ObjID; // Manager ID int m_MaxNumPage; // maximum different Pages in the original data int m_NumCachedPage; // maximum cached Pages in the manager int m_CurNumCachedPage; // current # of cached Pages int m_PageSize; // Page size in terms of element int m_LocalIDMask, m_PageLocalBit; // bit mask and # of bit corresponding to slot size int m_LastAccessedPage[NUM_THREADS]; int * m_Loaded; // indicate idx of cached Page if loaded long * m_DiskClusterOffset; // disk offsets of compressed clusters CMemElementRACBVH <T> ** m_pPages; CActiveList <CMemElementRACBVH <T> * > m_LRUList; #ifdef USE_DM int m_MaxCachedMemCCluster; int m_UsedCacedMemCCluster; unsigned char **m_LoadedCCluster; CMemElementRACBVHCompressedCluster ** m_pPagesCCluster; CActiveList <CMemElementRACBVHCompressedCluster *> m_LRUListCCluster; #endif #ifdef _USE_OPENMP omp_lock_t *lck; #endif CMemManagerRACBVH (void) { m_Loaded = NULL; m_DiskClusterOffset = NULL; m_pPages = NULL; m_pRACBVH = NULL; UNLOADED = -1; } // PageSize should be power of two for efficiency bool Init (char * pName, int NumElement, int NumCachedPage, int PageSize) { bool Result = IsPowerOfTwo (PageSize, m_PageLocalBit); if (Result == false) { printf ("Page size (%d) is not power of two\n", PageSize); exit (-1); } m_NumCachedPage = NumCachedPage; m_MaxNumPage = int (ceil (float (NumElement) / float (PageSize))); if (m_MaxNumPage < m_NumCachedPage) m_NumCachedPage = m_MaxNumPage; m_LocalIDMask = PageSize - 1; m_CurNumCachedPage = -1; m_PageSize = PageSize; int i; for(i=0;i<NUM_THREADS;i++) m_LastAccessedPage[i] = -1; strcpy (m_ObjName, pName); m_Loaded = new int [m_MaxNumPage]; m_DiskClusterOffset = new long [m_MaxNumPage]; for (i = 0;i < m_MaxNumPage;i++) { m_Loaded [i] = UNLOADED; m_DiskClusterOffset [i] = 0; } m_pPages = new CMemElementRACBVH <T> * [m_NumCachedPage]; { // init LRU list CMemElementRACBVH <T> * pStartHead = new CMemElementRACBVH <T>; CMemElementRACBVH <T> * pEndHead = new CMemElementRACBVH <T>; m_LRUList.InitList (pStartHead, pEndHead); } fprintf (stderr, "%d (among %d) Pages created (total size = %dK)\n", m_NumCachedPage, m_MaxNumPage, PageSize * m_NumCachedPage * sizeof (T) / 1024); m_pRACBVH = NULL; #ifdef _USE_OPENMP lck = new omp_lock_t[m_MaxNumPage]; for(i=0;i<m_MaxNumPage;i++) { omp_init_lock(&lck[i]); } #endif return true; } ~CMemManagerRACBVH (void) { if (m_Loaded) { delete [] m_Loaded; m_Loaded = NULL; } if (m_DiskClusterOffset) { delete [] m_DiskClusterOffset; m_DiskClusterOffset = NULL; } if (m_pPages) { delete [] m_pPages; m_pPages = NULL; } #ifdef _USE_OPENMP int i; for(i=0;i<m_MaxNumPage;i++) { omp_destroy_lock(&lck[i]); } delete[] lck; #endif } const bool IsElementLoaded (unsigned int i) { int PageID = i >> m_PageLocalBit; if (m_Loaded [PageID] == UNLOADED) return false; return true; } bool SetPageAccessed (unsigned int PageID) { assert (PageID < m_MaxNumPage); assert (m_Loaded [PageID] != UNLOADED); int CachedPageID = m_Loaded [PageID]; CMemElementRACBVH <T> * pPage = m_pPages [CachedPageID]; /* if (PageID != m_LastAccessedPage) { // manage LRU list, already loaded. So put it front. m_LRUList.ForceAdd (pPage); m_LastAccessedPage = PageID; } */ return true; } T & operator [] (unsigned int i) { int PageID = i >> m_PageLocalBit; int LocalID = i & m_LocalIDMask; if (m_Loaded [PageID] == UNLOADED) { #ifdef _USE_OPENMP omp_set_lock(&lck[PageID]); //cout << "[" << omp_get_thread_num() << "] " << "lock setted (" << lck << ")" << endl; #endif if (m_Loaded [PageID] == UNLOADED) { if (m_CurNumCachedPage < m_NumCachedPage) { m_CurNumCachedPage++; int curNumCachedPage = m_CurNumCachedPage; m_pPages [curNumCachedPage] = new CMemElementRACBVH <T> (PageID, curNumCachedPage, m_PageSize); // require application specific load job Load (m_pPages [curNumCachedPage], PageID); m_LRUList.ForceAdd (m_pPages [curNumCachedPage]); m_Loaded [PageID] = curNumCachedPage; } else { CMemElementRACBVH <T> * pLeastUsed; #ifdef _USE_OPENMP #pragma omp critical #endif { pLeastUsed = m_LRUList.m_pEnd->m_pPrev; Unload (pLeastUsed); m_Loaded [pLeastUsed->m_PageID] = -1; } m_LRUList.ForceAdd (pLeastUsed); // require application specific load job // Map.Load (StartPos, m_AccessibleSize, m_FileSize); Load (pLeastUsed, PageID); m_Loaded [PageID] = pLeastUsed->m_CachedPageID; } } #ifdef _USE_OPENMP //cout << "[" << omp_get_thread_num() << "] " << "lock unsetted (" << lck << ")" << endl; omp_unset_lock(&lck[PageID]); #endif } int CachedPageID = m_Loaded [PageID]; CMemElementRACBVH <T> * pPage = m_pPages [CachedPageID]; #ifdef _USE_OPENMP int thread_num = omp_get_thread_num(); #else int thread_num = 0; #endif if (PageID != m_LastAccessedPage[thread_num]) { // manage LRU list, already loaded. So put it front. m_LRUList.ForceAdd (pPage); m_LastAccessedPage[thread_num] = PageID; } return pPage->m_Element [LocalID]; } T & GetReference (unsigned int i) { int PageID = i >> m_PageLocalBit; int LocalID = i & m_LocalIDMask; if (m_Loaded [PageID] == UNLOADED) { #ifdef _USE_OPENMP omp_set_lock(&lck[PageID]); #endif if (m_Loaded [PageID] == UNLOADED) { if (m_CurNumCachedPage < m_NumCachedPage) { m_CurNumCachedPage++; int curNumCachedPage = m_CurNumCachedPage; m_pPages [curNumCachedPage] = new CMemElementRACBVH <T> (PageID, curNumCachedPage); // require application specific load job Load (m_pPages [curNumCachedPage], PageID); m_LRUList.ForceAdd (m_pPages [curNumCachedPage]); m_Loaded [PageID] = curNumCachedPage; } else { CMemElementRACBVH <T> * pLeastUsed; #ifdef _USE_OPENMP #pragma omp critical #endif { pLeastUsed = m_LRUList.m_pEnd->m_pPrev; Unload (pLeastUsed); m_Loaded [pLeastUsed->m_PageID] = -1; } // require application specific load job // Map.Load (StartPos, m_AccessibleSize, m_FileSize); Load (pLeastUsed, PageID); m_LRUList.ForceAdd (pLeastUsed); m_Loaded [PageID] = pLeastUsed->m_CachedPageID; } } #ifdef _USE_OPENMP omp_unset_lock(&lck[PageID]); #endif } int CachedPageID = m_Loaded [PageID]; CMemElementRACBVH <T> * pPage = m_pPages [CachedPageID]; #ifdef _USE_OPENMP int thread_num = omp_get_thread_num(); #else int thread_num = 0; #endif if (PageID != m_LastAccessedPage[thread_num]) { // manage LRU list, already loaded. So put it front. m_LRUList.ForceAdd (pPage); m_LastAccessedPage[thread_num] = PageID; } return pPage->m_Element [LocalID]; } const T & GetConstRefWithoutLRU (unsigned int i) { int PageID = i >> m_PageLocalBit; int LocalID = i & m_LocalIDMask; if (m_Loaded [PageID] == UNLOADED) { fprintf (stderr, "GetConstRefWithoutLRU should not be called here\n"); exit (-1); } int CachedPageID = m_Loaded [PageID]; CMemElementRACBVH <T> * pPage = m_pPages [CachedPageID]; return pPage->m_Element [LocalID]; } T & GetReferenceWithoutLRU (unsigned int i) { int PageID = i >> m_PageLocalBit; int LocalID = i & m_LocalIDMask; if (m_Loaded [PageID] == UNLOADED) { fprintf (stderr, "GetReferenceWithoutLRU should not be called here\n"); exit (-1); } int CachedPageID = m_Loaded [PageID]; CMemElementRACBVH <T> * pPage = m_pPages [CachedPageID]; return pPage->m_Element [LocalID]; } // application specific data and functions // TODO, we can do this by inheriting and virtualization RACBVH<T> * m_pRACBVH; // class holding data bool Unload (CMemElementRACBVH <T> * pElement) { /* if (m_ObjID == 0) printf ("Obj ID = %d, Unload %d Page\n", m_ObjID, pElement->m_PageID); */ if (m_pRACBVH); else { //UnloadMapPage ((char *) pElement->m_Element); pElement->m_Element = NULL; } return true; } bool Load (CMemElementRACBVH <T> * pElement, int PageID) { pElement->m_PageID = PageID; if(m_pRACBVH) { int threadNum = 0; #ifdef _USE_OPENMP threadNum = omp_get_thread_num(); #endif loadCluster(m_pRACBVH, PageID, pElement->m_Element, m_DiskClusterOffset[PageID], threadNum); } else { /* if (m_UseFileMap) { __int64 StartPos; StartPos = (__int64) PageID * m_PageSize * sizeof (T); //printf ("load %d unit\n", WhichMap); pElement->m_Element = (T *) LoadPage (StartPos, m_PageSize * sizeof (T), m_FileSize, m_MappingMode); } */ } return true; } bool Flush (void) { return true; } }; #endif
common.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_COMMON_H_ #define LIGHTGBM_UTILS_COMMON_H_ #if ((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))) #include <LightGBM/utils/common_legacy_solaris.h> #endif #include <LightGBM/utils/json11.h> #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <limits> #include <string> #include <algorithm> #include <chrono> #include <cmath> #include <cstdint> #include <cstdio> #include <cstdlib> #include <cstring> #include <functional> #include <iomanip> #include <iterator> #include <map> #include <memory> #include <sstream> #include <type_traits> #include <unordered_map> #include <utility> #include <vector> #if (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__)))) #define FMT_HEADER_ONLY #include "../../../external_libs/fmt/include/fmt/format.h" #endif #include "../../../external_libs/fast_double_parser/include/fast_double_parser.h" #ifdef _MSC_VER #include <intrin.h> #pragma intrinsic(_BitScanReverse) #endif #if defined(_MSC_VER) #include <malloc.h> #elif MM_MALLOC #include <mm_malloc.h> // https://gcc.gnu.org/onlinedocs/cpp/Common-Predefined-Macros.html // https://www.oreilly.com/library/view/mac-os-x/0596003560/ch05s01s02.html #elif defined(__GNUC__) && defined(HAVE_MALLOC_H) #include <malloc.h> #define _mm_malloc(a, b) memalign(b, a) #define _mm_free(a) free(a) #else #include <stdlib.h> #define _mm_malloc(a, b) malloc(a) #define _mm_free(a) free(a) #endif namespace LightGBM { namespace Common { using json11::Json; /*! * Imbues the stream with the C locale. */ static void C_stringstream(std::stringstream &ss) { ss.imbue(std::locale::classic()); } inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitBrackets(const char* c_str, char left_delimiter, char right_delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; bool open = false; while (pos < str.length()) { if (str[pos] == left_delimiter) { open = true; ++pos; i = pos; } else if (str[pos] == right_delimiter && open) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } open = false; ++pos; } else { ++pos; } } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::string GetFromParserConfig(std::string config_str, std::string key) { // parser config should follow json format. std::string err; Json config_json = Json::parse(config_str, &err); if (!err.empty()) { Log::Fatal("Invalid parser config: %s. Please check if follow json format.", err.c_str()); } return config_json[key].string_value(); } inline static std::string SaveToParserConfig(std::string config_str, std::string key, std::string value) { std::string err; Json config_json = Json::parse(config_str, &err); if (!err.empty()) { Log::Fatal("Invalid parser config: %s. Please check if follow json format.", err.c_str()); } CHECK(config_json.is_object()); std::map<std::string, Json> config_map = config_json.object_items(); config_map.insert(std::pair<std::string, Json>(key, Json(value))); return Json(config_map).dump(); } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } // Use fast_double_parse and strtod (if parse failed) to parse double. inline static const char* AtofPrecise(const char* p, double* out) { const char* end = fast_double_parser::parse_number(p, out); if (end != nullptr) { return end; } // Rare path: Not in RFC 7159 format. Possible "inf", "nan", etc. Fallback to standard library: char* end2; errno = 0; // This is Required before calling strtod. *out = std::strtod(p, &end2); // strtod is locale aware. if (end2 == p) { Log::Fatal("no conversion to double for: %s", p); } if (errno == ERANGE) { Log::Warning("convert to double got underflow or overflow: %s", p); } return end2; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<std::vector<T>> StringToArrayofArrays( const std::string& str, char left_bracket, char right_bracket, char delimiter) { std::vector<std::string> strs = SplitBrackets(str.c_str(), left_bracket, right_bracket); std::vector<std::vector<T>> ret; for (const auto& s : strs) { ret.push_back(StringToArray<T>(s, delimiter)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK_EQ(strs.size(), static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter, const bool force_C_locale = false) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; if (force_C_locale) { C_stringstream(str_buf); } str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<> inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter, const bool force_C_locale) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; if (force_C_locale) { C_stringstream(str_buf); } str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << static_cast<int16_t>(strs[0]); for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << static_cast<int16_t>(strs[i]); } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter, const bool force_C_locale = false) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; if (force_C_locale) { C_stringstream(str_buf); } str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformation on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (auto t = input.begin(); t !=input.end(); ++t) { ret.push_back(t->get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; auto& ref_key = *keys; auto& ref_value = *values; for (size_t i = start; i < keys->size(); ++i) { arr.emplace_back(ref_key[i], ref_value[i]); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { ref_key[i] = arr[i].first; ref_value[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) { std::vector<T*> ptr(data->size()); auto& ref_data = *data; for (size_t i = 0; i < data->size(); ++i) { ptr[i] = ref_data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (std::isnan(x)) { return 0.0; } else if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (std::isnan(x)) { return 0.0f; } else if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = OMP_NUM_THREADS(); if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t> EmptyBitset(int n) { int size = n / 32; if (n % 32 != 0) ++size; return std::vector<uint32_t>(size); } template<typename T> inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) { auto& ref_v = *vec; int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec->size()) < i1 + 1) { vec->resize(i1 + 1, 0); } ref_v[i1] |= (1 << i2); } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY); } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } inline bool CheckAllowedJSON(const std::string& s) { unsigned char char_code; for (auto c : s) { char_code = static_cast<unsigned char>(c); if (char_code == 34 // " || char_code == 44 // , || char_code == 58 // : || char_code == 91 // [ || char_code == 93 // ] || char_code == 123 // { || char_code == 125 // } ) { return false; } } return true; } inline int RoundInt(double x) { return static_cast<int>(x + 0.5f); } template <typename T, std::size_t N = 32> class AlignmentAllocator { public: typedef T value_type; typedef std::size_t size_type; typedef std::ptrdiff_t difference_type; typedef T* pointer; typedef const T* const_pointer; typedef T& reference; typedef const T& const_reference; inline AlignmentAllocator() throw() {} template <typename T2> inline AlignmentAllocator(const AlignmentAllocator<T2, N>&) throw() {} inline ~AlignmentAllocator() throw() {} inline pointer adress(reference r) { return &r; } inline const_pointer adress(const_reference r) const { return &r; } inline pointer allocate(size_type n) { return (pointer)_mm_malloc(n * sizeof(value_type), N); } inline void deallocate(pointer p, size_type) { _mm_free(p); } inline void construct(pointer p, const value_type& wert) { new (p) value_type(wert); } inline void destroy(pointer p) { p->~value_type(); } inline size_type max_size() const throw() { return size_type(-1) / sizeof(value_type); } template <typename T2> struct rebind { typedef AlignmentAllocator<T2, N> other; }; bool operator!=(const AlignmentAllocator<T, N>& other) const { return !(*this == other); } // Returns true if and only if storage allocated from *this // can be deallocated from other, and vice versa. // Always returns true for stateless allocators. bool operator==(const AlignmentAllocator<T, N>&) const { return true; } }; class Timer { public: Timer() { #ifdef TIMETAG int num_threads = OMP_NUM_THREADS(); start_time_.resize(num_threads); stats_.resize(num_threads); #endif // TIMETAG } ~Timer() { Print(); } #ifdef TIMETAG void Start(const std::string& name) { auto tid = omp_get_thread_num(); start_time_[tid][name] = std::chrono::steady_clock::now(); } void Stop(const std::string& name) { auto cur_time = std::chrono::steady_clock::now(); auto tid = omp_get_thread_num(); if (stats_[tid].find(name) == stats_[tid].end()) { stats_[tid][name] = std::chrono::duration<double, std::milli>(0); } stats_[tid][name] += cur_time - start_time_[tid][name]; } #else void Start(const std::string&) {} void Stop(const std::string&) {} #endif // TIMETAG void Print() const { #ifdef TIMETAG std::unordered_map<std::string, std::chrono::duration<double, std::milli>> stats(stats_[0].begin(), stats_[0].end()); for (size_t i = 1; i < stats_.size(); ++i) { for (auto it = stats_[i].begin(); it != stats_[i].end(); ++it) { if (stats.find(it->first) == stats.end()) { stats[it->first] = it->second; } else { stats[it->first] += it->second; } } } std::map<std::string, std::chrono::duration<double, std::milli>> ordered( stats.begin(), stats.end()); for (auto it = ordered.begin(); it != ordered.end(); ++it) { Log::Info("%s costs:\t %f", it->first.c_str(), it->second * 1e-3); } #endif // TIMETAG } #ifdef TIMETAG std::vector< std::unordered_map<std::string, std::chrono::steady_clock::time_point>> start_time_; std::vector<std::unordered_map<std::string, std::chrono::duration<double, std::milli>>> stats_; #endif // TIMETAG }; // Note: this class is not thread-safe, don't use it inside omp blocks class FunctionTimer { public: #ifdef TIMETAG FunctionTimer(const std::string& name, Timer& timer) : timer_(timer) { timer.Start(name); name_ = name; } ~FunctionTimer() { timer_.Stop(name_); } private: std::string name_; Timer& timer_; #else FunctionTimer(const std::string&, Timer&) {} #endif // TIMETAG }; } // namespace Common extern Common::Timer global_timer; /*! * Provides locale-independent alternatives to Common's methods. * Essential to make models robust to locale settings. */ namespace CommonC { template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { return LightGBM::Common::Join(strs, delimiter, true); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { return LightGBM::Common::Join(strs, start, end, delimiter, true); } inline static const char* Atof(const char* p, double* out) { return LightGBM::Common::Atof(p, out); } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return LightGBM::Common::Atoi(p, out); } }; /*! * \warning Beware that ``Common::Atof`` in ``__StringToTHelperFast``, * has **less** floating point precision than ``__StringToTHelper``. * Both versions are kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision. * Check ``StringToArrayFast`` and ``StringToArray`` for more details on this. */ template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; LightGBM::Common::Atoi(str.c_str(), &ret); return ret; } }; /*! * \warning Beware that ``Common::Atof`` in ``__StringToTHelperFast``, * has **less** floating point precision than ``__StringToTHelper``. * Both versions are kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision. * Check ``StringToArrayFast`` and ``StringToArray`` for more details on this. * \note It is possible that ``fast_double_parser::parse_number`` is faster than ``Common::Atof``. */ template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { double tmp; const char* end = Common::AtofPrecise(str.c_str(), &tmp); if (end == str.c_str()) { Log::Fatal("Failed to parse double: %s", str.c_str()); } return static_cast<T>(tmp); } }; /*! * \warning Beware that due to internal use of ``Common::Atof`` in ``__StringToTHelperFast``, * this method has less precision for floating point numbers than ``StringToArray``, * which calls ``__StringToTHelper``. * As such, ``StringToArrayFast`` and ``StringToArray`` are not equivalent! * Both versions were kept to maintain bit-for-bit the "legacy" LightGBM behaviour in terms of precision. */ template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } /*! * \warning Do not replace calls to this method by ``StringToArrayFast``. * This method is more precise for floating point numbers. * Check ``StringToArrayFast`` for more details. */ template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = LightGBM::Common::Split(str.c_str(), ' '); CHECK_EQ(strs.size(), static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } /*! * \warning Do not replace calls to this method by ``StringToArrayFast``. * This method is more precise for floating point numbers. * Check ``StringToArrayFast`` for more details. */ template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = LightGBM::Common::Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } #if (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__)))) /*! * Safely formats a value onto a buffer according to a format string and null-terminates it. * * \note It checks that the full value was written or forcefully aborts. * This safety check serves to prevent incorrect internal API usage. * Correct usage will never incur in this problem: * - The received buffer size shall be sufficient at all times for the input format string and value. */ template <typename T> inline static void format_to_buf(char* buffer, const size_t buf_len, const char* format, const T value) { auto result = fmt::format_to_n(buffer, buf_len, format, value); if (result.size >= buf_len) { Log::Fatal("Numerical conversion failed. Buffer is too small."); } buffer[result.size] = '\0'; } template<typename T, bool is_float, bool high_precision> struct __TToStringHelper { void operator()(T value, char* buffer, size_t buf_len) const { format_to_buf(buffer, buf_len, "{}", value); } }; template<typename T> struct __TToStringHelper<T, true, false> { void operator()(T value, char* buffer, size_t buf_len) const { format_to_buf(buffer, buf_len, "{:g}", value); } }; template<typename T> struct __TToStringHelper<T, true, true> { void operator()(T value, char* buffer, size_t buf_len) const { format_to_buf(buffer, buf_len, "{:.17g}", value); } }; /*! * Converts an array to a string with with values separated by the space character. * This method replaces Common's ``ArrayToString`` and ``ArrayToStringFast`` functionality * and is locale-independent. * * \note If ``high_precision_output`` is set to true, * floating point values are output with more digits of precision. */ template<bool high_precision_output = false, typename T> inline static std::string ArrayToString(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelper<T, std::is_floating_point<T>::value, high_precision_output> helper; const size_t buf_len = high_precision_output ? 32 : 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; Common::C_stringstream(str_buf); helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } #endif // (!((defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__)))) } // namespace CommonC } // namespace LightGBM #endif // LIGHTGBM_UTILS_COMMON_H_
nested_targ.c
#include <stdio.h> #include "assert.h" #include <unistd.h> #pragma omp declare target int foo(); #pragma omp declare target int bar(); #pragma omp end declare target #pragma omp end declare target int main() { return 0; }
alloca-1.c
#define size 10 int i, j, k; int main () { char *s = __builtin_malloc (size + 1); #pragma omp target teams { #pragma omp distribute parallel for default(none) private(i) shared(s) for (i = 0; i < size; ++i) { char *buffer = __builtin_alloca (10); buffer[5] = 97 + i; s[i] = buffer[5]; } } for (i = 0; i < size; ++i) if (s[i] != 97 + i) __builtin_abort (); return 0; }
cblas_wrapper.c
/* # # * The source code in this file is developed independently by NEC Corporation. # # # NLCPy License # # # Copyright (c) 2020-2021 NEC Corporation # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither NEC Corporation nor the names of its contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # */ #include <cblas.h> #include "nlcpy.h" uint64_t wrapper_cblas_sdot(ve_arguments *args, int32_t *psw) { #ifdef _OPENMP #pragma omp single #endif /* _OPENMP */ { ve_array *x = &(args->binary.x); ve_array *y = &(args->binary.y); ve_array *z = &(args->binary.z); float *px = (float *)x->ve_adr; if (px == NULL) { px = (float *)nlcpy__get_scalar(x); if (px == NULL) { return NLCPY_ERROR_MEMORY; } } float *py = (float *)y->ve_adr; if (py == NULL) { py = (float *)nlcpy__get_scalar(y); if (py == NULL) { return NLCPY_ERROR_MEMORY; } } float *pz = (float *)z->ve_adr; if (pz == NULL) { return (uint64_t)NLCPY_ERROR_MEMORY; } assert(x->ndim <= 1); assert(y->ndim <= 1); assert(z->ndim <= 1); assert(x->size == y->size); *pz = cblas_sdot(x->size, px, x->strides[0] / x->itemsize, py, y->strides[0] / y->itemsize); } /* omp single */ retrieve_fpe_flags(psw); return (uint64_t)NLCPY_ERROR_OK; } uint64_t wrapper_cblas_ddot(ve_arguments *args, int32_t *psw) { #ifdef _OPENMP #pragma omp single #endif /* _OPENMP */ { ve_array *x = &(args->binary.x); ve_array *y = &(args->binary.y); ve_array *z = &(args->binary.z); double *px = (double *)x->ve_adr; if (px == NULL) { px = (double *)nlcpy__get_scalar(x); if (px == NULL) { return NLCPY_ERROR_MEMORY; } } double *py = (double *)y->ve_adr; if (py == NULL) { py = (double *)nlcpy__get_scalar(y); if (py == NULL) { return NLCPY_ERROR_MEMORY; } } double *pz = (double *)z->ve_adr; if (pz == NULL) { return (uint64_t)NLCPY_ERROR_MEMORY; } assert(x->ndim <= 1); assert(y->ndim <= 1); assert(z->ndim <= 1); assert(x->size == y->size); *pz = cblas_ddot(x->size, px, x->strides[0] / x->itemsize, py, y->strides[0] / y->itemsize); } /* omp single */ retrieve_fpe_flags(psw); return (uint64_t)NLCPY_ERROR_OK; } uint64_t wrapper_cblas_cdotu_sub(ve_arguments *args, int32_t *psw) { #ifdef _OPENMP #pragma omp single #endif /* _OPENMP */ { ve_array *x = &(args->binary.x); ve_array *y = &(args->binary.y); ve_array *z = &(args->binary.z); float _Complex *px = (float _Complex *)x->ve_adr; if (px == NULL) { px = (float _Complex *)nlcpy__get_scalar(x); if (px == NULL) { return NLCPY_ERROR_MEMORY; } } float _Complex *py = (float _Complex *)y->ve_adr; if (py == NULL) { py = (float _Complex *)nlcpy__get_scalar(y); if (py == NULL) { return NLCPY_ERROR_MEMORY; } } float _Complex *pz = (float _Complex *)z->ve_adr; if (pz == NULL) { return (uint64_t)NLCPY_ERROR_MEMORY; } assert(x->ndim <= 1); assert(y->ndim <= 1); assert(z->ndim <= 1); assert(x->size == y->size); cblas_cdotu_sub(x->size, px, x->strides[0] / x->itemsize, py, y->strides[0] / y->itemsize, pz); } /* omp single */ retrieve_fpe_flags(psw); return (uint64_t)NLCPY_ERROR_OK; } uint64_t wrapper_cblas_zdotu_sub(ve_arguments *args, int32_t *psw) { #ifdef _OPENMP #pragma omp single #endif /* _OPENMP */ { ve_array *x = &(args->binary.x); ve_array *y = &(args->binary.y); ve_array *z = &(args->binary.z); double _Complex *px = (double _Complex *)x->ve_adr; if (px == NULL) { px = (double _Complex *)nlcpy__get_scalar(x); if (px == NULL) { return NLCPY_ERROR_MEMORY; } } double _Complex *py = (double _Complex *)y->ve_adr; if (py == NULL) { py = (double _Complex *)nlcpy__get_scalar(y); if (py == NULL) { return NLCPY_ERROR_MEMORY; } } double _Complex *pz = (double _Complex *)z->ve_adr; if (pz == NULL) { return (uint64_t)NLCPY_ERROR_MEMORY; } assert(x->ndim <= 1); assert(y->ndim <= 1); assert(z->ndim <= 1); assert(x->size == y->size); cblas_zdotu_sub(x->size, px, x->strides[0] / x->itemsize, py, y->strides[0] / y->itemsize, pz); } /* omp single */ retrieve_fpe_flags(psw); return (uint64_t)NLCPY_ERROR_OK; } uint64_t wrapper_cblas_sgemm(ve_arguments *args, int32_t *psw) { const int32_t order = args->gemm.order; const int32_t transA = args->gemm.transA; const int32_t transB = args->gemm.transB; const int32_t m = args->gemm.m; const int32_t n = args->gemm.n; const int32_t k = args->gemm.k; const float alpha = *((float *)nlcpy__get_scalar(&(args->gemm.alpha))); float* const a = (float *)args->gemm.a.ve_adr; const int32_t lda = args->gemm.lda; float* const b = (float *)args->gemm.b.ve_adr; const int32_t ldb = args->gemm.ldb; const float beta = *((float *)nlcpy__get_scalar(&(args->gemm.beta))); float* const c = (float *)args->gemm.c.ve_adr; const int32_t ldc = args->gemm.ldc; if (a == NULL || b == NULL || c == NULL) { return NLCPY_ERROR_MEMORY; } #ifdef _OPENMP const int32_t nt = omp_get_num_threads(); const int32_t it = omp_get_thread_num(); #else const int32_t nt = 1; const int32_t it = 0; #endif /* _OPENMP */ const int32_t m_s = m * it / nt; const int32_t m_e = m * (it + 1) / nt; const int32_t m_d = m_e - m_s; const int32_t n_s = n * it / nt; const int32_t n_e = n * (it + 1) / nt; const int32_t n_d = n_e - n_s; int32_t mode = 1; if ( n > nt ) { mode = 2; } int32_t iar, iac, ibr, ibc, icr, icc; if (transA == CblasNoTrans ) { iar = 1; iac = lda; } else { iar = lda; iac = 1; } if (transB == CblasNoTrans ) { ibr = 1; ibc = ldb; } else { ibr = ldb; ibc = 1; } if (order == CblasColMajor ) { icr = 1; icc = ldc; } else { icr = ldc; icc = 1; } if (order == CblasColMajor) { if ( mode == 1 ) { // split 'm' cblas_sgemm(order, transA, transB, m_d, n, k, alpha, a + m_s * iar, lda, b, ldb, beta, c + m_s * icr, ldc); } else { // split 'n' cblas_sgemm(order, transA, transB, m, n_d, k, alpha, a, lda, b + n_s * ibc, ldb, beta, c + n_s * icc, ldc); } } else { if ( mode == 1 ) { // split 'm' cblas_sgemm(order, transA, transB, m_d, n, k, alpha, a + m_s * iac, lda, b, ldb, beta, c + m_s * icr, ldc); } else { // split 'n' cblas_sgemm(order, transA, transB, m, n_d, k, alpha, a, lda, b + n_s * ibr, ldb, beta, c + n_s * icc, ldc); } } retrieve_fpe_flags(psw); return (uint64_t)NLCPY_ERROR_OK; } uint64_t wrapper_cblas_dgemm(ve_arguments *args, int32_t *psw) { const int32_t order = args->gemm.order; const int32_t transA = args->gemm.transA; const int32_t transB = args->gemm.transB; const int32_t m = args->gemm.m; const int32_t n = args->gemm.n; const int32_t k = args->gemm.k; const double alpha = *((double *)nlcpy__get_scalar(&(args->gemm.alpha))); double* const a = (double *)args->gemm.a.ve_adr; const int32_t lda = args->gemm.lda; double* const b = (double *)args->gemm.b.ve_adr; const int32_t ldb = args->gemm.ldb; const double beta = *((double *)nlcpy__get_scalar(&(args->gemm.beta))); double* const c = (double *)args->gemm.c.ve_adr; const int32_t ldc = args->gemm.ldc; if (a == NULL || b == NULL || c == NULL) { return NLCPY_ERROR_MEMORY; } #ifdef _OPENMP const int32_t nt = omp_get_num_threads(); const int32_t it = omp_get_thread_num(); #else const int32_t nt = 1; const int32_t it = 0; #endif /* _OPENMP */ const int32_t m_s = m * it / nt; const int32_t m_e = m * (it + 1) / nt; const int32_t m_d = m_e - m_s; const int32_t n_s = n * it / nt; const int32_t n_e = n * (it + 1) / nt; const int32_t n_d = n_e - n_s; int32_t mode = 1; if ( n > nt ) { mode = 2; } int32_t iar, iac, ibr, ibc, icr, icc; if (transA == CblasNoTrans ) { iar = 1; iac = lda; } else { iar = lda; iac = 1; } if (transB == CblasNoTrans ) { ibr = 1; ibc = ldb; } else { ibr = ldb; ibc = 1; } if (order == CblasColMajor ) { icr = 1; icc = ldc; } else { icr = ldc; icc = 1; } if (order == CblasColMajor) { if ( mode == 1 ) { // split 'm' cblas_dgemm(order, transA, transB, m_d, n, k, alpha, a + m_s * iar, lda, b, ldb, beta, c + m_s * icr, ldc); } else { // split 'n' cblas_dgemm(order, transA, transB, m, n_d, k, alpha, a, lda, b + n_s * ibc, ldb, beta, c + n_s * icc, ldc); } } else { if ( mode == 1 ) { // split 'm' cblas_dgemm(order, transA, transB, m_d, n, k, alpha, a + m_s * iac, lda, b, ldb, beta, c + m_s * icr, ldc); } else { // split 'n' cblas_dgemm(order, transA, transB, m, n_d, k, alpha, a, lda, b + n_s * ibr, ldb, beta, c + n_s * icc, ldc); } } retrieve_fpe_flags(psw); return (uint64_t)NLCPY_ERROR_OK; } uint64_t wrapper_cblas_cgemm(ve_arguments *args, int32_t *psw) { const int32_t order = args->gemm.order; const int32_t transA = args->gemm.transA; const int32_t transB = args->gemm.transB; const int32_t m = args->gemm.m; const int32_t n = args->gemm.n; const int32_t k = args->gemm.k; const void *alpha = (void *)nlcpy__get_scalar(&(args->gemm.alpha)); if (alpha == NULL) return (uint64_t)NLCPY_ERROR_MEMORY; float _Complex* const a = (float _Complex *)args->gemm.a.ve_adr; const int32_t lda = args->gemm.lda; float _Complex* const b = (float _Complex *)args->gemm.b.ve_adr; const int32_t ldb = args->gemm.ldb; const void *beta = (void *)nlcpy__get_scalar(&(args->gemm.beta)); if (beta == NULL) return (uint64_t)NLCPY_ERROR_MEMORY; float _Complex* const c = (float _Complex *)args->gemm.c.ve_adr; const int32_t ldc = args->gemm.ldc; if (a == NULL || b == NULL || c == NULL) { return NLCPY_ERROR_MEMORY; } #ifdef _OPENMP const int32_t nt = omp_get_num_threads(); const int32_t it = omp_get_thread_num(); #else const int32_t nt = 1; const int32_t it = 0; #endif /* _OPENMP */ const int32_t m_s = m * it / nt; const int32_t m_e = m * (it + 1) / nt; const int32_t m_d = m_e - m_s; const int32_t n_s = n * it / nt; const int32_t n_e = n * (it + 1) / nt; const int32_t n_d = n_e - n_s; int32_t mode = 1; if ( n > nt ) { mode = 2; } int32_t iar, iac, ibr, ibc, icr, icc; if (transA == CblasNoTrans ) { iar = 1; iac = lda; } else { iar = lda; iac = 1; } if (transB == CblasNoTrans ) { ibr = 1; ibc = ldb; } else { ibr = ldb; ibc = 1; } if (order == CblasColMajor ) { icr = 1; icc = ldc; } else { icr = ldc; icc = 1; } if (order == CblasColMajor) { if ( mode == 1 ) { // split 'm' cblas_cgemm(order, transA, transB, m_d, n, k, alpha, a + m_s * iar, lda, b, ldb, beta, c + m_s * icr, ldc); } else { // split 'n' cblas_cgemm(order, transA, transB, m, n_d, k, alpha, a, lda, b + n_s * ibc, ldb, beta, c + n_s * icc, ldc); } } else { if ( mode == 1 ) { // split 'm' cblas_cgemm(order, transA, transB, m_d, n, k, alpha, a + m_s * iac, lda, b, ldb, beta, c + m_s * icr, ldc); } else { // split 'n' cblas_cgemm(order, transA, transB, m, n_d, k, alpha, a, lda, b + n_s * ibr, ldb, beta, c + n_s * icc, ldc); } } retrieve_fpe_flags(psw); return (uint64_t)NLCPY_ERROR_OK; } uint64_t wrapper_cblas_zgemm(ve_arguments *args, int32_t *psw) { const int32_t order = args->gemm.order; const int32_t transA = args->gemm.transA; const int32_t transB = args->gemm.transB; const int32_t m = args->gemm.m; const int32_t n = args->gemm.n; const int32_t k = args->gemm.k; const void *alpha = (void *)nlcpy__get_scalar(&(args->gemm.alpha)); if (alpha == NULL) return (uint64_t)NLCPY_ERROR_MEMORY; double _Complex* const a = (double _Complex *)args->gemm.a.ve_adr; const int32_t lda = args->gemm.lda; double _Complex* const b = (double _Complex *)args->gemm.b.ve_adr; const int32_t ldb = args->gemm.ldb; const void *beta = (void *)nlcpy__get_scalar(&(args->gemm.beta)); if (beta == NULL) return (uint64_t)NLCPY_ERROR_MEMORY; double _Complex* const c = (double _Complex *)args->gemm.c.ve_adr; const int32_t ldc = args->gemm.ldc; if (a == NULL || b == NULL || c == NULL) { return NLCPY_ERROR_MEMORY; } #ifdef _OPENMP const int32_t nt = omp_get_num_threads(); const int32_t it = omp_get_thread_num(); #else const int32_t nt = 1; const int32_t it = 0; #endif /* _OPENMP */ const int32_t m_s = m * it / nt; const int32_t m_e = m * (it + 1) / nt; const int32_t m_d = m_e - m_s; const int32_t n_s = n * it / nt; const int32_t n_e = n * (it + 1) / nt; const int32_t n_d = n_e - n_s; int32_t mode = 1; if ( n > nt ) { mode = 2; } int32_t iar, iac, ibr, ibc, icr, icc; if (transA == CblasNoTrans ) { iar = 1; iac = lda; } else { iar = lda; iac = 1; } if (transB == CblasNoTrans ) { ibr = 1; ibc = ldb; } else { ibr = ldb; ibc = 1; } if (order == CblasColMajor ) { icr = 1; icc = ldc; } else { icr = ldc; icc = 1; } if (order == CblasColMajor) { if ( mode == 1 ) { // split 'm' cblas_zgemm(order, transA, transB, m_d, n, k, alpha, a + m_s * iar, lda, b, ldb, beta, c + m_s * icr, ldc); } else { // split 'n' cblas_zgemm(order, transA, transB, m, n_d, k, alpha, a, lda, b + n_s * ibc, ldb, beta, c + n_s * icc, ldc); } } else { if ( mode == 1 ) { // split 'm' cblas_zgemm(order, transA, transB, m_d, n, k, alpha, a + m_s * iac, lda, b, ldb, beta, c + m_s * icr, ldc); } else { // split 'n' cblas_zgemm(order, transA, transB, m, n_d, k, alpha, a, lda, b + n_s * ibr, ldb, beta, c + n_s * icc, ldc); } } retrieve_fpe_flags(psw); return (uint64_t)NLCPY_ERROR_OK; }
GB_binop__isle_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__isle_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__isle_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__isle_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint32) // A*D function (colscale): GB (_AxD__isle_uint32) // D*A function (rowscale): GB (_DxB__isle_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint32) // C=scalar+B GB (_bind1st__isle_uint32) // C=scalar+B' GB (_bind1st_tran__isle_uint32) // C=A+scalar GB (_bind2nd__isle_uint32) // C=A'+scalar GB (_bind2nd_tran__isle_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_UINT32 || GxB_NO_ISLE_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__isle_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
structure_factors_direct.h
#ifndef CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_H #define CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_H #include <cctbx/xray/scattering_type_registry.h> #include <cctbx/xray/hr_ht_cache.h> #include <cctbx/math/cos_sin_table.h> #include <omptbx/omp_or_stubs.h> #define CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_NO_PRAGMA_OMP namespace cctbx { namespace xray { namespace structure_factors { template <typename CosSinType, typename ScattererType> struct direct_sum_over_equivalent_h { typedef typename ScattererType::float_type float_type; typedef std::complex<float_type> complex_type; direct_sum_over_equivalent_h( CosSinType const& cos_sin_, sgtbx::space_group const& space_group_, miller::index<> h, float_type d_star_sq_) : cos_sin(cos_sin_), hr_ht(cos_sin_, space_group_, h), d_star_sq(d_star_sq_), sum_f_calc(0,0) {} void add_contribution_of(ScattererType const& scatterer, float_type f0) { typedef float_type f_t; typedef complex_type c_t; c_t f_calc(0,0); for(std::size_t i=0;i<hr_ht.groups.size();i++) { hr_ht_group<f_t> const& g = hr_ht.groups[i]; f_t hrx = g.hr * scatterer.site; c_t term = cos_sin.get(hrx + g.ht); if (scatterer.flags.use_u_aniso()) { f_t dw = adptbx::debye_waller_factor_u_star(g.hr, scatterer.u_star); term *= dw; if (scatterer.anharmonic_adp) { term *= scatterer.anharmonic_adp->calculate(g.hr); } } f_calc += term; } if (hr_ht.is_origin_centric) { f_calc = c_t(2*f_calc.real(),0); } else if (hr_ht.is_centric) { f_calc += std::conj(f_calc) * hr_ht.f_h_inv_t; } if (scatterer.flags.use_u_iso() && scatterer.u_iso != 0) { f_t dw=adptbx::debye_waller_factor_u_iso(d_star_sq/4, scatterer.u_iso); f_calc *= dw; } f_t w = scatterer.weight(); f_t f0p_w = (f0 + scatterer.fp) * w; f_t fdp_w = scatterer.fdp; if (fdp_w != 0) { fdp_w *= w; f_calc *= c_t(f0p_w, fdp_w); } else { f_calc *= f0p_w; } sum_f_calc += f_calc; } complex_type f_calc() { return sum_f_calc * hr_ht.ltr_factor; } CosSinType const &cos_sin; hr_ht_cache<float_type> hr_ht; float_type d_star_sq; complex_type sum_f_calc; }; template <class ScattererType=scatterer<> > class direct { public: typedef ScattererType scatterer_type; typedef typename ScattererType::float_type float_type; direct() {} direct( uctbx::unit_cell const& unit_cell, sgtbx::space_group const& space_group, af::const_ref<miller::index<> > const& miller_indices, af::const_ref<ScattererType> const& scatterers, xray::scattering_type_registry const& scattering_type_registry) { math::cos_sin_exact<float_type> cos_sin; compute(cos_sin, unit_cell, space_group, miller_indices, scatterers, scattering_type_registry); } template<class CosSinType> direct( CosSinType const& cos_sin, uctbx::unit_cell const& unit_cell, sgtbx::space_group const& space_group, af::const_ref<miller::index<> > const& miller_indices, af::const_ref<ScattererType> const& scatterers, xray::scattering_type_registry const& scattering_type_registry) { compute(cos_sin, unit_cell, space_group, miller_indices, scatterers, scattering_type_registry); } af::shared<std::complex<float_type> > const& f_calc() const { return f_calc_; } private: af::shared<std::complex<float_type> > f_calc_; template <typename CosSinType> void compute( CosSinType const& cos_sin, uctbx::unit_cell const& unit_cell, sgtbx::space_group const& space_group, af::const_ref<miller::index<> > const& miller_indices, af::const_ref<ScattererType> const& scatterers, xray::scattering_type_registry const& scattering_type_registry) { typedef float_type f_t; typedef std::complex<float_type> c_t; int n = static_cast<int>(miller_indices.size()); f_calc_ = af::shared<c_t>(n, af::init_functor_null<c_t>()); c_t *f_calc_beg = f_calc_.begin(); af::shared<std::size_t> scattering_type_indices = scattering_type_registry.unique_indices(scatterers); /* The OpenMP standard specifies that A throw executed inside a parallel region must cause execution to resume within the same parallel region, and it must be caught by the same thread that threw the exception. Since a std::runtime_error may be thrown during Debye-Waller computations (c.f. adptbx.h, function debye_waller_factor_exp) one must make sure it cannot escape the body of the parallelised loop. So we catch it inside the loop and then re-throw it immediately after the loop finished. */ boost::optional<std::runtime_error> error; #if !defined(CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_NO_PRAGMA_OMP) #if !defined(__DECCXX_VER) || (defined(_OPENMP) && _OPENMP > 199819) #pragma omp parallel for schedule(static) #endif #endif for(int i=0;i<n;i++) { try { miller::index<> h = miller_indices[i]; f_t d_star_sq = unit_cell.d_star_sq(h); af::shared<double> form_factors = scattering_type_registry.unique_form_factors_at_d_star_sq( d_star_sq); direct_sum_over_equivalent_h<CosSinType, ScattererType> sum(cos_sin, space_group, h, d_star_sq); for(std::size_t j=0; j<scatterers.size(); ++j) { sum.add_contribution_of(scatterers[j], form_factors[scattering_type_indices[j]]); } f_calc_beg[i] = sum.f_calc(); } catch (std::runtime_error e) { #pragma omp critical { // The first error will be recorded only. if (!error) error = e; } } } if (error) throw *error; } }; }}} // namespace cctbx::xray::structure_factors #endif // CCTBX_XRAY_STRUCTURE_FACTORS_DIRECT_H
computeP_mex.c
#include "mex.h" #include "omp.h" #include "math.h" #include "string.h" void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // Inputs double *E; // relabelled ensemble matrix long long M, N; long long nClust, maxElem; // Outputs double *P; double *nElemInCls; // Internal long long i, Erow, label, Ntmp, Ninc, nbytes, destination_offset, bytes_to_copy; mxArray *Ptmp_mat, *newSpace_mat; double *Ptmp, *newSpace, *newptr; /* Check for proper number of input and output arguments */ if (nrhs != 2) { mexErrMsgTxt("Two inputs argument required."); } if(nlhs != 2){ mexErrMsgTxt("2 outputs required."); } // E matrix [M X N] N = mxGetM(prhs[0]); // number of data points M = mxGetN(prhs[0]); // number of ensemble members E = mxGetPr(prhs[0]); // Get size of ensemble M nClust = (long long)mxGetScalar(prhs[1]); // Create temp P matrix, half size of N; if we will need more space, we will allocate another matrix Ntmp = (long long)floor(N/2.0); Ninc = (long long)ceil(N*0.2); // 20% increase size if out of space Ptmp_mat = mxCreateDoubleMatrix(nClust,Ntmp,mxREAL); Ptmp = mxGetPr(Ptmp_mat); // Create vector for storing number of elements in each cluster plhs[1] = mxCreateDoubleMatrix(nClust,1,mxREAL); nElemInCls = mxGetPr(plhs[1]); //------------------------------------------------ // Compute P and sum of cols maxElem = 0; //#pragma omp parallel for shared(E, N, M) private(i,Erow,label) for (i=0; i<N*M; i++){ Erow = i % N; if(E[i]>0){ label = (long long)E[i]-1; // Need to add some space to Ptmp? if(nElemInCls[label] >= Ntmp){ newSpace_mat = mxCreateDoubleMatrix(nClust,Ninc,mxREAL); newSpace = mxGetPr(newSpace_mat); nbytes = (nClust) * (Ntmp + Ninc)* sizeof(double);//size of new array destination_offset = nClust * Ntmp; //start offset for copying bytes_to_copy = nClust * Ninc * sizeof(double); //ptr = mxGetPr(prhs[0]); newptr = (double *)mxRealloc(Ptmp, nbytes);//reallocate array mxSetPr(Ptmp_mat,newptr); //ptr = mxGetPr(prhs[1]); memcpy(newptr+destination_offset,newSpace,bytes_to_copy);//actual copy mxSetN(Ptmp_mat,Ntmp + Ninc);//fix dimension Ptmp = newptr; Ntmp += Ninc; mxDestroyArray(newSpace_mat); } Ptmp[label+(long long)nElemInCls[label]*nClust] = (double)Erow; nElemInCls[label]++; if(nElemInCls[label] > maxElem){ maxElem = (long long)nElemInCls[label]; } } } // Create P matrix - trim Ptmp plhs[0] = mxCreateDoubleMatrix(nClust,maxElem,mxREAL); P = mxGetPr(plhs[0]); memcpy(P,Ptmp,maxElem*nClust*sizeof(double)); // copy contents of Ptmp, only relevant mxDestroyArray(Ptmp_mat); }
GB_binop__times_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__times_int64 // A.*B function (eWiseMult): GB_AemultB__times_int64 // A*D function (colscale): GB_AxD__times_int64 // D*A function (rowscale): GB_DxB__times_int64 // C+=B function (dense accum): GB_Cdense_accumB__times_int64 // C+=b function (dense accum): GB_Cdense_accumb__times_int64 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_int64 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_int64 // C=scalar+B GB_bind1st__times_int64 // C=scalar+B' GB_bind1st_tran__times_int64 // C=A+scalar GB_bind2nd__times_int64 // C=A'+scalar GB_bind2nd_tran__times_int64 // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = (aij * bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x * y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TIMES || GxB_NO_INT64 || GxB_NO_TIMES_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__times_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__times_int64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__times_int64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__times_int64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__times_int64 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__times_int64 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *GB_RESTRICT Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__times_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__times_int64 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__times_int64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t bij = Bx [p] ; Cx [p] = (x * bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__times_int64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; Cx [p] = (aij * y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (x * aij) ; \ } GrB_Info GB_bind1st_tran__times_int64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = Ax [pA] ; \ Cx [pC] = (aij * y) ; \ } GrB_Info GB_bind2nd_tran__times_int64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
taus88.h
// copyright 2016 john howard (orthopteroid@gmail.com) // MIT license // // Homebrew OpenMP threadsafe Tausme88 PRNG // http://www.iro.umontreal.ca/~lecuyer/myftp/papers/tausme.ps #ifndef PSYCHICSNIFFLE_TAUS88_H #define PSYCHICSNIFFLE_TAUS88_H #include <omp.h> namespace util { /* // instance only one of these in the main application thread: Taus88State taus88State; // declare omp state srand(int(time(NULL))); // seed the single-thread state taus88State.seed(); // seed the omp state from the single-thread state */ struct Taus88State { uint32_t *block = 0; Taus88State( const Taus88State& other ) = delete; Taus88State& operator=( Taus88State& other ) = delete; Taus88State& operator=( const Taus88State& other ) = delete; #ifdef _OPENMP int ompMaxThreads() { return omp_get_max_threads(); } int ompThreadNum() { return omp_get_thread_num(); } #else int ompMaxThreads() { return 1; } int ompThreadNum() { return 1; } #endif void copyOut( uint32_t* stale ) { int m = ompMaxThreads(); int t = ompThreadNum(); stale[0] = block[t*m+0]; stale[1] = block[t*m+1]; stale[2] = block[t*m+2]; stale[3] = block[t*m+3]; } void copyIn( uint32_t* dirty ) { int m = ompMaxThreads(); int t = ompThreadNum(); block[t*m+0] = dirty[0]; block[t*m+1] = dirty[1]; block[t*m+2] = dirty[2]; block[t*m+3] = dirty[3]; } void seed() { int m = ompMaxThreads(); for( int i=0; i<m*4; i++ ) block[i] = ((uint32_t)rand() << 8) + (uint32_t)rand(); // 32 bits please } Taus88State() { int m = ompMaxThreads(); block = new uint32_t[m*4]; } virtual ~Taus88State() { delete[] block; } }; /* // instance one of these in each omp thread: #pragma omp parallel // declare a parallel block { Taus88 taus88(taus88State); // each thread will get it's own taus88 object, independently initialized #pragma omp parallel for ... } // when this scope closes, each taus88 object state is written back to the global omp state. no reseeding required. */ struct Taus88 { Taus88State& master; uint32_t state[4]; // uncopyable and unassignable Taus88() = delete; Taus88( const Taus88& other ) = delete; Taus88& operator=( Taus88& other ) = delete; Taus88& operator=( const Taus88& other ) = delete; // initialize local state from master block Taus88( Taus88State &master_ ) : master(master_) { master.copyOut(state); } // restore local state to master block virtual ~Taus88() { master.copyIn(state); } // permute local state each time () is called uint32_t operator()() { state[3] = (((state[0] << 13) ^ state[0]) >> 19); state[0] = (((state[0] & 0xFFFFFFFE) << 12) ^ state[3]); state[3] = (((state[1] << 2) ^ state[1]) >> 25); state[1] = (((state[1] & 0xFFFFFFF8) << 4) ^ state[3]); state[3] = (((state[2] << 3) ^ state[2]) >> 11); state[2] = (((state[2] & 0xFFFFFFF0) << 17) ^ state[3]); return state[0] ^ state[1] ^ state[2]; } }; } #endif //PSYCHICSNIFFLE_TAUS88_H
GB_unop__identity_uint64_int32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_int32) // op(A') function: GB (_unop_tran__identity_uint64_int32) // C type: uint64_t // A type: int32_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_int32) ( uint64_t *Cx, // Cx and Ax may be aliased const int32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int32_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int32_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_int32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test1.c
/* program main implicit none integer :: a = 10 !$omp target data map(tofrom: a) !$omp target map(tofrom: a) a = a + 1 !$omp end target !$omp target update from(a) !$omp end target data a = -15 !$omp target update from(a) print *, a !<-- expect -15; actual 11 end program main 2) segfault $>cat t.f program main implicit none integer :: a = 10 !$omp target map(tofrom: a) a = a + 1 !$omp end target !$omp target update from(a) a = -15 !$omp target update from(a) print *, a !<-- expect -15; actual segfault end program main */ #include <stdio.h> #pragma omp requires unified_shared_memory #define TEST1 1 #define TEST2 1 int main() { int a; // test 1 #if TEST1 a = 10; #pragma omp target data map(tofrom: a) { #pragma omp target map(tofrom: a) { a = a + 1; } //printf("test 1: a is %d (after target)\n", a); #pragma omp target update from(a) } //printf("test 1: a is %d (after target data)\n", a); a = -15; #pragma omp target update from(a) printf("test 1: a is %d\n", a); #endif #if TEST2 // test 2 a = 10; #pragma omp target map(tofrom: a) { a = a + 1; } #pragma omp target update from(a) a = -15; #pragma omp target update from(a) #endif printf("test 2: a is %d\n", a); return 1; }
DRB067-restrictpointer1-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* restrict pointers: no aliasing Array initialization using assignments. C99 is needed to compile this code e.g. gcc -std=c99 -c Stress-1.c */ #include <stdlib.h> typedef double real8; void foo(real8 * restrict newSxx, real8 * restrict newSyy, int length) { int i; #pragma omp parallel for private (i) firstprivate (length) schedule(dynamic) for (i = 0; i <= length - 1; i += 1) { newSxx[i] = 0.0; newSyy[i] = 0.0; } } int main() { int length=1000; real8* newSxx = malloc (length* sizeof (real8)); real8* newSyy = malloc (length* sizeof (real8)); foo(newSxx, newSyy, length); free (newSxx); free (newSyy); return 0; }
if-2.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void foo (int a, int b, int *p, int *q, int task) { int i; #pragma omp parallel if (a) if (b) /* { dg-error "too many .if. clauses without modifier" } */ ; #pragma omp parallel if (a) if (parallel: b) /* { dg-error "if any .if. clause has modifier, then all .if. clauses have to use modifier" } */ ; #pragma omp parallel if (parallel: a) if (b) /* { dg-error "if any .if. clause has modifier, then all .if. clauses have to use modifier" } */ ; #pragma omp parallel if (parallel:a) if (parallel:a) /* { dg-error "too many .if. clauses with .parallel. modifier" } */ ; #pragma omp parallel if (task:a) /* { dg-error "expected .parallel. .if. clause modifier rather than .task." } */ \ if (taskloop: b) /* { dg-error "expected .parallel. .if. clause modifier rather than .taskloop." } */ ; #pragma omp parallel if (target update:a) /* { dg-error "expected .parallel. .if. clause modifier rather than .target update." } */ ; #pragma omp parallel if (cancel:a) /* { dg-error "expected .parallel. .if. clause modifier rather than .cancel." } */ ; #pragma omp parallel for simd if (target update: a) /* { dg-error "expected .parallel. .if. clause modifier rather than .target update." } */ for (i = 0; i < 16; i++) ; #pragma omp task if (task) ; #pragma omp task if (task: task) ; #pragma omp task if (parallel: a) /* { dg-error "expected .task. .if. clause modifier rather than .parallel." } */ ; #pragma omp simd if (cancel: a) /* { dg-error "expected .simd. .if. clause modifier rather than .cancel." } */ for (i = 0; i < 16; i++) ; #pragma omp taskloop if (task : a) /* { dg-error "expected .taskloop. .if. clause modifier rather than .task." } */ for (i = 0; i < 16; i++) ; #pragma omp target if (taskloop: a) /* { dg-error "expected .target. .if. clause modifier rather than .taskloop." } */ ; #pragma omp target teams distribute parallel for simd if (target exit data : a) /* { dg-error "expected .target. .if. clause modifier" } */ for (i = 0; i < 16; i++) ; #pragma omp target data if (target: a) map (p[0:2]) /* { dg-error "expected .target data. .if. clause modifier rather than .target." } */ ; #pragma omp target enter data if (target data: a) map (to: p[0:2]) /* { dg-error "expected .target enter data. .if. clause modifier rather than .target data." } */ #pragma omp target exit data if (target enter data: a) map (from: p[0:2]) /* { dg-error "expected .target exit data. .if. clause modifier rather than .target enter data." } */ #pragma omp target update if (target exit data:a) to (q[0:3]) /* { dg-error "expected .target update. .if. clause modifier rather than .target exit data." } */ #pragma omp for for (i = 0; i < 16; i++) { #pragma omp cancel for if (target exit data:a) /* { dg-error "expected .cancel. .if. clause modifier" } */ } }
saturnin-linear-trail-weight-three-rounds.c
#include "saturnin-common.h" #include "../global-common.h" #include "../convolution.h" #include <math.h> #include <stdbool.h> #include <omp.h> #define NUMBER_OF_CANDIDATES 349 #define NUMBER_OF_ELEMENTS (1 << BOX_WIDTH) #define DT_SIZE (NUMBER_OF_ELEMENTS*NUMBER_OF_ELEMENTS) #define NUM_THREADS 36 #define BOUND 36 static int correlation_table_even[DT_SIZE]; static int correlation_table_odd[DT_SIZE]; /* * Count number of input correlations equal to v in column corresponding to output correlation b */ void count_input(mpz_t c, uint16_t b, int v, int index) { mpz_set_ui(c, 0); for (int i = 0; i < NUMBER_OF_ELEMENTS; i++) { if ((index % 2 == 0) && abs(correlation_table_even[NUMBER_OF_ELEMENTS*i+b]) == v) { mpz_add_ui(c, c, 1); } if ((index % 2 != 0) && abs(correlation_table_odd[NUMBER_OF_ELEMENTS*i+b]) == v) { mpz_add_ui(c, c, 1); } } } /* * Count number of output correlations equal to v in row corresponding to input correlation a */ void count_output(mpz_t c, uint16_t a, int v, int index) { mpz_set_ui(c, 0); for (int i = 0; i < NUMBER_OF_ELEMENTS; i++) { if ((index % 2 == 0) && abs(correlation_table_even[NUMBER_OF_ELEMENTS*a+i]) == v) { mpz_add_ui(c, c, 1); } if ((index % 2 != 0) && abs(correlation_table_odd[NUMBER_OF_ELEMENTS*a+i]) == v) { mpz_add_ui(c, c, 1); } } } /* * b is an encoding of a 4x4 rectangle of bits as a flat 16-bit array where (i, j) is mapped to (4*i+j). * This function swaps entries i and j. */ uint16_t swap(const uint16_t b, const unsigned long i, const unsigned long j) { uint16_t x = ((b >> i) ^ (b >> j)) & 1U; // XOR temporary return b ^ ((x << i) | (x << j)); } /* * An activity pattern is represented by a 4x4 rectangle where the position of an activity bit corresponding to a nibble with a given index * is as follows: * 3 2 1 0 * 6 5 4 7 * 9 8 11 10 * 12 15 14 13 * For example, the activity bit of a nibble with index 4 is in position (1,1) in the rectangle. * The actual entries consist of 0's and 1's, indicating whether a nibble is active or not. * In this representation a row in the rectangle corresponds to the input of mix columns and a column to * the input of the inverse of mix columns. * * The rectangle is stored as 16 bits, where (i, j) is mapped to (4*i+j) * * This function converts this representation to the sequential one: * 0 1 2 3 * 4 5 6 7 * 8 9 10 11 * 12 13 14 15 */ uint16_t convert(const uint16_t b) { uint16_t r = swap(b, 0, 3); r = swap(r, 1, 2); r = swap(r, 4, 6); r = swap(r, 8, 9); r = swap(r, 10, 11); return swap(r, 13, 15); } int main(void) { // We know that the number of candidate masks is NUMBER_OF_CANDIDATES, because we used the code below // to compute and print them. uint16_t candidate_masks[NUMBER_OF_CANDIDATES]; // Consider a single slice, consisting of 16 nibbles. // Associated with this are 2^16 possible activity patterns. Each activity pattern is represented by a rectangle as // described above where rows correspond to mix columns input and columns to inverse mix columns input. // We step through each activity pattern. for (long x = 0, k = 0; x <= UINT16_MAX; x++) { long min_box_weight = hamming_weight16(x); // Consider each column for (long j = 0; j < 4; j++) { long active = 0; for (long i = 0; i < 4; i++) { if ((x & (1U << (4*i+j))) != 0) { ++active; } } if (active != 0) { // Mix columns ensures at least 5 active nibbles min_box_weight += 5-active; } } // Consider each row for (long i = 0; i < 4; i++) { long active = 0; for (long j = 0; j < 4; j++) { if ((x & (1U << (4*i+j))) != 0) { ++active; } } if (active != 0) { // Mix columns inverse ensures at least 5 active nibbles min_box_weight += 5-active; } } // Each active nibble contributes at least a weight of 2, so we check whether this lower bound is below // the upper bound that we set. if (min_box_weight <= BOUND/2) { // convert to sequential representation. candidate_masks[k] = convert(x); k++; } } fill_correlation_table(correlation_table_even, sbox_even, BOX_WIDTH); fill_correlation_table(correlation_table_odd, sbox_odd, BOX_WIDTH); Table_fixed *total_weight = table_fixed_create(1+BOUND); Table_fixed *weights[NUMBER_OF_CANDIDATES]; for (long i = 0; i < NUMBER_OF_CANDIDATES; i++) { weights[i] = table_fixed_create(1+BOUND); } // The number of iterations of the main loop is bounded from above by 2^9 omp_set_num_threads(NUM_THREADS); #pragma omp parallel for schedule(dynamic) // Consider each candidate mask for (long i = 0; i < NUMBER_OF_CANDIDATES; i++) { mpz_t val; mpz_init(val); uint16_t x = candidate_masks[i]; int box_weight_a = hamming_weight16(x); // The number of iterations of loop is bounded from above by 2^16, since w <= 4 // Step over all possible masks after the second Sbox-layer, // having box-activity pattern equal to x for (uint64_t a = 0; a < (uint64_t) pow(2, 4*box_weight_a); a++) { if (box_weight(a, box_weight_a, 4) != box_weight_a) { continue; } // Build the mask before the third Sbox-layer uint64_t state1 = 0; for (long j = 0, m = 0; j < 16 && m < box_weight_a; j++) { if ((x & (1U << j)) != 0) { state1 |= ((a >> 4*m) & 0xf) << 4*j; m++; } } // Build the mask after the second Sbox-layer state1 = permute_slice(state1); state1 = mix_columns_slice_transposed_inverse(state1); state1 = permute_slice_inverse(state1); // Compute the convolution of the histograms of the third Sbox-layer Table_fixed *bases[32]; for (long j = 16; j < 32; j++) { bases[j] = table_fixed_create(5); } for (long j = 0; j < 16; j++) { count_output(val, (state1 >> 4*j) & 0xf, 16, j); table_fixed_insert_and_merge(bases[16+j], 0, val, &mpz_add); count_output(val, (state1 >> 4*j) & 0xf, 8, j); table_fixed_insert_and_merge(bases[16+j], 2, val, &mpz_add); count_output(val, (state1 >> 4*j) & 0xf, 4, j); table_fixed_insert_and_merge(bases[16+j], 4, val, &mpz_add); } Table_fixed *convolved_state1 = table_fixed_copy(bases[16]); for (long j = 17; j < 32; j++) { Table_fixed *tmp = convolve_fixed(convolved_state1, bases[j], 0, BOUND); table_fixed_destroy(convolved_state1); convolved_state1 = tmp; } // For a fixed mask AFTER the second Sbox-layer, consider all the masks BEFORE the second Sbox-layer, // the number of which is bounded from above by 2^12 (empirically verified) for (uint64_t b = 0; b < (uint64_t) pow(2, 4*box_weight_a); b++) { bool valid = true; long linear_weight_middle = 0; for (long j = 0, m = 0; j < 16 && m < box_weight_a; j++) { if ((x & (1U << j)) != 0) { long row_index = (b >> 4*m) & 0xf; long column_index = (a >> 4*m) & 0xf; long count; if (j % 2 == 0) { count = correlation_table_even[16*row_index+column_index]; } else { count = correlation_table_odd[16*row_index+column_index]; } if (count == 0) { valid = false; break; } linear_weight_middle += 2*(4 - (long) log2((double) abs(count))); m++; } } if (valid) { uint64_t state0 = 0; for (long j = 0, m = 0; j < 16 && m < box_weight_a; j++) { if ((x & (1U << j)) != 0) { state0 |= ((b >> 4*m) & 0xf) << 4*j; m++; } } state0 = mix_columns_slice_transposed(state0); // Filter anything that will exceed the bound to save us the work of doing the convolution if (box_weight_a + box_weight(state0, 16, BOX_WIDTH) + box_weight(state1, 16, BOX_WIDTH) > BOUND/2) { continue; } // at this point, we can do the convolution // use b1 and b2 to determine active sboxes and // convolve for (long j = 0; j < 16; j++) { /* linear weight 0, 2, and 4 */ bases[j] = table_fixed_create(5); } for (long j = 0; j < 16; j++) { count_input(val, (state0 >> 4*j) & 0xf, 16, j); table_fixed_insert_and_merge(bases[j], 0, val, &mpz_add); count_input(val, (state0 >> 4*j) & 0xf, 8, j); table_fixed_insert_and_merge(bases[j], 2, val, &mpz_add); count_input(val, (state0 >> 4*j) & 0xf, 4, j); table_fixed_insert_and_merge(bases[j], 4, val, &mpz_add); } Table_fixed *convolved_state_total = table_fixed_copy(convolved_state1); for (long j = 0; j < 16; j++) { Table_fixed *tmp = convolve_fixed(convolved_state_total, bases[j], 0, BOUND-linear_weight_middle); table_fixed_destroy(convolved_state_total); convolved_state_total = tmp; } for (size_t j = 0; j <= BOUND-linear_weight_middle; j++) { table_fixed_insert_and_merge(weights[i], j+linear_weight_middle, convolved_state_total->head[j], &mpz_add); } for (long j = 0; j < 16; j++) { table_fixed_destroy(bases[j]); } table_fixed_destroy(convolved_state_total); } } for (long j = 16; j < 32; j++) { table_fixed_destroy(bases[j]); } table_fixed_destroy(convolved_state1); } mpz_clear(val); char filename[60]; sprintf(filename, "data/saturnin-linear-trail-three-rounds_%04x.txt", x); table_fixed_print(filename, weights[i]); } for (long i = 0; i <= BOUND; i++) { for (long j = 0; j < NUMBER_OF_CANDIDATES; j++) { table_fixed_insert_and_merge(total_weight, i, weights[j]->head[i], &mpz_add); } } table_fixed_print("data/saturnin-linear-trail-three-rounds_total.txt", total_weight); for (long i = 0; i < NUMBER_OF_CANDIDATES; i++) { table_fixed_destroy(weights[i]); } table_fixed_destroy(total_weight); return 0; }
For_Paralelo.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #define TRUE 1 #define FALSE 0 #else #define omp_get_thread_num() 0 #endif int main() { #ifdef _OPENMP (void) omp_set_dynamic(FALSE); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} (void) omp_set_num_threads(4); #endif int i, n = 9; #pragma omp parallel default(none) shared(n) private(i) { #pragma omp for for (i=0; i<n; i++) printf("El hilo %d ejecuta la iteracion %d\n", omp_get_thread_num(),i); } // Final de la region paralela return(0); }
omp.c
/* * Copyright 2019-2020 SALMON developers * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Hand-Code Vector processing for Knights Corner */ #include <complex.h> #include <assert.h> #include "./glue.h" void zstencil_tuned_omp_imp( const int PNLx , const int PNLy , const int PNLz , const int NLx , const int NLy , const int NLz , const int NHx , const int NHy , const int NHz , int const * restrict modx , int const * restrict mody , int const * restrict modz , double const * restrict A , double const B[restrict NLz][NLy][NLx] , double const C[restrict 12] , double const D[restrict 12] , double complex const E[restrict PNLz][PNLy][PNLx] , double complex F[restrict PNLz][PNLy][PNLx] ) { const int NSx = NHx * 2, NSy = NHy * 2, NSz = NHz * 2; const int isx = NHx, isy = NHy, isz = NHz; const int iex = isx + NLx, iey = isy + NLy, iez = isz + NLz; __m512d at = _mm512_set1_pd(*A); __m512d HALF = _mm512_set1_pd(-0.5); __m512i INV = _mm512_set4_epi64(1LL << 63, 0, 1LL << 63, 0); __declspec(align(64)) double G[12]; for(int n = 0 ; n < 12 ; ++n) G[n] = C[n] * -0.5; __m512i nly = _mm512_set1_epi32(PNLy); __m512i nlx = _mm512_set1_epi32(PNLx); __m512i nyz = _mm512_mask_blend_epi32(0xFF00, _mm512_set1_epi32(PNLy), _mm512_set1_epi32(PNLz)); __m512i nlyx = _mm512_mask_mullo_epi32(nlx, 0xFF00, nlx, nly); __declspec(align(64)) int yz_table[16]; #pragma omp parallel for collapse(2) default(none) \ private(yz_table) \ firstprivate(NLx,NLy,NLz,NSx,NSy,NSz) \ firstprivate(G,D,isx,iex,isy,iey,isz,iez) \ firstprivate(at,HALF,INV,nly,nlx,nyz,nlyx) \ shared(B,E,F,modx,mody,modz) #pragma noprefetch #pragma novector for(int iz = isz ; iz < iez ; ++iz) { for(int iy = isy ; iy < iey ; ++iy) { __m512i tiz = _mm512_set1_epi32(iz); __m512i tiy = _mm512_set1_epi32(iy); __m512i mzm = _mm512_loadu_prefetch_epi32(modz + (iz - 4 + NLz + NSz)); __m512i mzp = _mm512_alignr_epi32(mzm, mzm, 1); __m512i zmp = _mm512_mask_blend_epi32(0xF0F0, mzm, mzp); zmp = _mm512_permute4f128_epi32(zmp, _MM_PERM_BADC); __m512i mym = _mm512_loadu_prefetch_epi32(mody + (iy - 4 + NLy + NSy)); __m512i myp = _mm512_alignr_epi32(mym, mym, 1); __m512i ymp = _mm512_mask_blend_epi32(0xF0F0, mym, myp); __m512i uyz = _mm512_mask_blend_epi32(0xFF00, ymp, zmp); __m512i tyz = _mm512_mask_blend_epi32(0xFF00, tiy, tiz); __m512i *yz = (__m512i*) yz_table; double const* b = &B[iz-isz][iy-isy][0]; double complex const* e = &E[iz ][iy ][0]; double complex * f = &F[iz ][iy ][0]; for(int ix = isx ; ix < iex ; ix += 4) { __m512i tix = _mm512_set1_epi32(ix); __m512i mm = _mm512_sub_epi32(tyz, uyz); *yz = _mm512_sub_epi32(tix, _mm512_mullo_epi32(mm, nlyx)); __m512d ex = _mm512_load_prefetch_pd(e + ix); __m512d tt = _mm512_setzero_pd(); __m512d ut = _mm512_setzero_pd(); __m512d m, p, bt, v0, v1, v2, v3, v4; /* x-dimension (unit stride) */ { __m512i x0, x2; x0 = _mm512_load_prefetch_epi64(e + modx[ix - 4 + NLx + NSx]); x2 = _mm512_load_prefetch_epi64(e + modx[ix + 4 + NLx + NSx]); { m = (__m512d) _mm512_alignr_epi32((__m512i) ex, x0, 12); p = (__m512d) _mm512_alignr_epi32(x2, (__m512i) ex, 4); v4 = _mm512_sub_pd(p, m); v3 = _mm512_add_pd(p, m); ut = _mm512_fmadd_pd(_mm512_set1_pd(D[0]), v4, ut); tt = _mm512_fmadd_pd(_mm512_set1_pd(G[0]), v3, tt); } { m = (__m512d) _mm512_alignr_epi32((__m512i) ex, x0, 8); p = (__m512d) _mm512_alignr_epi32(x2, (__m512i) ex, 8); v4 = _mm512_sub_pd(p, m); v3 = _mm512_add_pd(p, m); ut = _mm512_fmadd_pd(_mm512_set1_pd(D[1]), v4, ut); tt = _mm512_fmadd_pd(_mm512_set1_pd(G[1]), v3, tt); } { m = (__m512d) _mm512_alignr_epi32((__m512i) ex, x0, 4); p = (__m512d) _mm512_alignr_epi32(x2, (__m512i) ex, 12); v4 = _mm512_sub_pd(p, m); v3 = _mm512_add_pd(p, m); ut = _mm512_fmadd_pd(_mm512_set1_pd(D[2]), v4, ut); tt = _mm512_fmadd_pd(_mm512_set1_pd(G[2]), v3, tt); } { m = (__m512d) x0; p = (__m512d) x2; v4 = _mm512_sub_pd(p, m); v3 = _mm512_add_pd(p, m); ut = _mm512_fmadd_pd(_mm512_set1_pd(D[3]), v4, ut); tt = _mm512_fmadd_pd(_mm512_set1_pd(G[3]), v3, tt); } } /* y-dimension (NLx stride) */ { #pragma unroll(4) for(int n = 0 ; n < 4 ; ++n) { m = _mm512_load_prefetch_pd(e + yz_table[3-n]); p = _mm512_load_prefetch_pd(e + yz_table[n+4]); v4 = _mm512_sub_pd(p, m); v3 = _mm512_add_pd(p, m); ut = _mm512_fmadd_pd(_mm512_set1_pd(D[n+4]), v4, ut); tt = _mm512_fmadd_pd(_mm512_set1_pd(G[n+4]), v3, tt); } } /* z-dimension (NLy*NLx stride) */ { #pragma unroll(4) for(int n = 0 ; n < 4 ; ++n) { m = _mm512_load_prefetch_pd(e + yz_table[11-n]); p = _mm512_load_prefetch_pd(e + yz_table[n+12]); v4 = _mm512_sub_pd(p, m); v3 = _mm512_add_pd(p, m); ut = _mm512_fmadd_pd(_mm512_set1_pd(D[n+8]), v4, ut); tt = _mm512_fmadd_pd(_mm512_set1_pd(G[n+8]), v3, tt); } } bt = dcast_to_dcmplx(b + ix - isx); v2 = _mm512_fmadd_pd(at, ex, tt); v4 = (__m512d) _mm512_shuffle_epi32((__m512i) ut, _MM_PERM_BADC); v3 = (__m512d) _mm512_xor_si512((__m512i) v4, INV); v1 = _mm512_add_pd(v2, v3); v0 = _mm512_fmadd_pd(bt, ex, v1); _mm512_storenrngo_pd(&f[ix], v0); } /* NLx */ } /* NLy */ } /* NLz */ } /* * is_array: one origin * ie_array: one origin * is : one origin * ie : one origin */ void zstencil_tuned_omp_( int const is_array[restrict 3] , int const ie_array[restrict 3] , int const is[restrict 3] , int const ie[restrict 3] , int const * restrict modx , int const * restrict mody , int const * restrict modz , double complex const * restrict E , double complex * restrict F , double const * restrict B , double const * restrict A_ , double const C[restrict 12] , double const D[restrict 12] ) { #define INT_ABS(X) (X) < 0 ? -(X) : (X) const int PNLx = ie_array[0] - is_array[0] + 1; const int PNLy = ie_array[1] - is_array[1] + 1; const int PNLz = ie_array[2] - is_array[2] + 1; const int NLx = ie[0] - is[0] + 1; // lattice const int NLy = ie[1] - is[1] + 1; const int NLz = ie[2] - is[2] + 1; const int NHx = INT_ABS(is_array[0] - is[0]); // shadow const int NHy = INT_ABS(is_array[1] - is[1]); const int NHz = INT_ABS(is_array[2] - is[2]); #undef INT_ABS assert(NLx % 4 == 0); assert(NHx == 4 || NHx == 0); assert(NHy == 4 || NHy == 0); assert(NHz == 4 || NHz == 0); zstencil_tuned_omp_imp(PNLx, PNLy, PNLz, NLx, NLy, NLz, NHx, NHy, NHz, modx, mody, modz , A_ , (double const (* restrict)[NLy][NLx])(B) , C , D , (double complex const (* restrict)[PNLy][PNLx])(E) , (double complex (* restrict)[PNLy][PNLx])(F) ); }
GB_unop__atan_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__atan_fp64_fp64 // op(A') function: GB_unop_tran__atan_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = atan (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = atan (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = atan (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ATAN || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__atan_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = atan (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__atan_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ROF_TV_core.c
/* * This work is part of the Core Imaging Library developed by * Visual Analytics and Imaging System Group of the Science Technology * Facilities Council, STFC * * Copyright 2017 Daniil Kazantsev * Copyright 2017 Srikanth Nagella, Edoardo Pasca * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "ROF_TV_core.h" #define EPS 1.0e-8 #define MAX(x, y) (((x) > (y)) ? (x) : (y)) #define MIN(x, y) (((x) < (y)) ? (x) : (y)) /*sign function*/ int sign(float x) { return (x > 0) - (x < 0); } /* C-OMP implementation of ROF-TV denoising/regularization model [1] (2D/3D case) * * * Input Parameters: * 1. Noisy image/volume [REQUIRED] * 2. lambda - regularization parameter [REQUIRED] * 3. tau - marching step for explicit scheme, ~1 is recommended [REQUIRED] * 4. Number of iterations, for explicit scheme >= 150 is recommended [REQUIRED] * 5. eplsilon: tolerance constant * * Output: * [1] Regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * This function is based on the paper by * [1] Rudin, Osher, Fatemi, "Nonlinear Total Variation based noise removal algorithms" */ /* Running iterations of TV-ROF function */ float TV_ROF_CPU_main(float *Input, float *Output, float *infovector, float lambdaPar, int iterationsNumb, float tau, float epsil, int dimX, int dimY, int dimZ) { float *D1=NULL, *D2=NULL, *D3=NULL, *Output_prev=NULL; float re, re1; re = 0.0f; re1 = 0.0f; int count = 0; int i; long DimTotal,j; DimTotal = (long)(dimX*dimY*dimZ); D1 = calloc(DimTotal, sizeof(float)); D2 = calloc(DimTotal, sizeof(float)); D3 = calloc(DimTotal, sizeof(float)); /* copy into output */ copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ)); if (epsil != 0.0f) Output_prev = calloc(DimTotal, sizeof(float)); /* start TV iterations */ for(i=0; i < iterationsNumb; i++) { if ((epsil != 0.0f) && (i % 5 == 0)) copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); /* calculate differences */ D1_func(Output, D1, (long)(dimX), (long)(dimY), (long)(dimZ)); D2_func(Output, D2, (long)(dimX), (long)(dimY), (long)(dimZ)); if (dimZ > 1) D3_func(Output, D3, (long)(dimX), (long)(dimY), (long)(dimZ)); TV_kernel(D1, D2, D3, Output, Input, lambdaPar, tau, (long)(dimX), (long)(dimY), (long)(dimZ)); /* check early stopping criteria */ if ((epsil != 0.0f) && (i % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(Output[j] - Output_prev[j],2); re1 += powf(Output[j],2); } re = sqrtf(re)/sqrtf(re1); if (re < epsil) count++; if (count > 3) break; } } free(D1);free(D2); free(D3); if (epsil != 0.0f) free(Output_prev); /*adding info into info_vector */ infovector[0] = (float)(i); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ return 0; } /* calculate differences 1 */ float D1_func(float *A, float *D1, long dimX, long dimY, long dimZ) { float NOMx_1, NOMy_1, NOMy_0, NOMz_1, NOMz_0, denom1, denom2,denom3, T1; long i,j,k,i1,i2,k1,j1,j2,k2,index; if (dimZ > 1) { #pragma omp parallel for shared (A, D1, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, NOMx_1,NOMy_1,NOMy_0,NOMz_1,NOMz_0,denom1,denom2,denom3,T1) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = A[(dimX*dimY)*k + j1*dimX + i] - A[index]; /* x+ */ NOMy_1 = A[(dimX*dimY)*k + j*dimX + i1] - A[index]; /* y+ */ /*NOMx_0 = (A[(i)*dimY + j] - A[(i2)*dimY + j]); */ /* x- */ NOMy_0 = A[index] - A[(dimX*dimY)*k + j*dimX + i2]; /* y- */ NOMz_1 = A[(dimX*dimY)*k1 + j*dimX + i] - A[index]; /* z+ */ NOMz_0 = A[index] - A[(dimX*dimY)*k2 + j*dimX + i]; /* z- */ denom1 = NOMx_1*NOMx_1; denom2 = 0.5f*(sign(NOMy_1) + sign(NOMy_0))*(MIN(fabs(NOMy_1),fabs(NOMy_0))); denom2 = denom2*denom2; denom3 = 0.5f*(sign(NOMz_1) + sign(NOMz_0))*(MIN(fabs(NOMz_1),fabs(NOMz_0))); denom3 = denom3*denom3; T1 = sqrt(denom1 + denom2 + denom3 + EPS); D1[index] = NOMx_1/T1; }}} } else { #pragma omp parallel for shared (A, D1, dimX, dimY) private(i, j, i1, j1, i2, j2,NOMx_1,NOMy_1,NOMy_0,denom1,denom2,T1,index) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; /* Forward-backward differences */ NOMx_1 = A[j1*dimX + i] - A[index]; /* x+ */ NOMy_1 = A[j*dimX + i1] - A[index]; /* y+ */ /*NOMx_0 = (A[(i)*dimY + j] - A[(i2)*dimY + j]); */ /* x- */ NOMy_0 = A[index] - A[(j)*dimX + i2]; /* y- */ denom1 = NOMx_1*NOMx_1; denom2 = 0.5f*(sign(NOMy_1) + sign(NOMy_0))*(MIN(fabs(NOMy_1),fabs(NOMy_0))); denom2 = denom2*denom2; T1 = sqrtf(denom1 + denom2 + EPS); D1[index] = NOMx_1/T1; }} } return *D1; } /* calculate differences 2 */ float D2_func(float *A, float *D2, long dimX, long dimY, long dimZ) { float NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2; long i,j,k,i1,i2,k1,j1,j2,k2,index; if (dimZ > 1) { #pragma omp parallel for shared (A, D2, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = A[(dimX*dimY)*k + (j1)*dimX + i] - A[index]; /* x+ */ NOMy_1 = A[(dimX*dimY)*k + (j)*dimX + i1] - A[index]; /* y+ */ NOMx_0 = A[index] - A[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */ NOMz_1 = A[(dimX*dimY)*k1 + j*dimX + i] - A[index]; /* z+ */ NOMz_0 = A[index] - A[(dimX*dimY)*k2 + (j)*dimX + i]; /* z- */ denom1 = NOMy_1*NOMy_1; denom2 = 0.5f*(sign(NOMx_1) + sign(NOMx_0))*(MIN(fabs(NOMx_1),fabs(NOMx_0))); denom2 = denom2*denom2; denom3 = 0.5f*(sign(NOMz_1) + sign(NOMz_0))*(MIN(fabs(NOMz_1),fabs(NOMz_0))); denom3 = denom3*denom3; T2 = sqrtf(denom1 + denom2 + denom3 + EPS); D2[index] = NOMy_1/T2; }}} } else { #pragma omp parallel for shared (A, D2, dimX, dimY) private(i, j, i1, j1, i2, j2, NOMx_1,NOMy_1,NOMx_0,denom1,denom2,T2,index) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; /* Forward-backward differences */ NOMx_1 = A[j1*dimX + i] - A[index]; /* x+ */ NOMy_1 = A[j*dimX + i1] - A[index]; /* y+ */ NOMx_0 = A[index] - A[j2*dimX + i]; /* x- */ /*NOMy_0 = A[(i)*dimY + j] - A[(i)*dimY + j2]; */ /* y- */ denom1 = NOMy_1*NOMy_1; denom2 = 0.5f*(sign(NOMx_1) + sign(NOMx_0))*(MIN(fabs(NOMx_1),fabs(NOMx_0))); denom2 = denom2*denom2; T2 = sqrtf(denom1 + denom2 + EPS); D2[index] = NOMy_1/T2; }} } return *D2; } /* calculate differences 3 */ float D3_func(float *A, float *D3, long dimX, long dimY, long dimZ) { float NOMx_1, NOMy_1, NOMx_0, NOMy_0, NOMz_1, denom1, denom2, denom3, T3; long index,i,j,k,i1,i2,k1,j1,j2,k2; #pragma omp parallel for shared (A, D3, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, NOMx_1, NOMy_1, NOMy_0, NOMx_0, NOMz_1, denom1, denom2, denom3, T3) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /* Forward-backward differences */ NOMx_1 = A[(dimX*dimY)*k + (j1)*dimX + i] - A[index]; /* x+ */ NOMy_1 = A[(dimX*dimY)*k + (j)*dimX + i1] - A[index]; /* y+ */ NOMy_0 = A[index] - A[(dimX*dimY)*k + (j)*dimX + i2]; /* y- */ NOMx_0 = A[index] - A[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */ NOMz_1 = A[(dimX*dimY)*k1 + j*dimX + i] - A[index]; /* z+ */ /*NOMz_0 = A[(dimX*dimY)*k + (i)*dimY + j] - A[(dimX*dimY)*k2 + (i)*dimY + j]; */ /* z- */ denom1 = NOMz_1*NOMz_1; denom2 = 0.5f*(sign(NOMx_1) + sign(NOMx_0))*(MIN(fabs(NOMx_1),fabs(NOMx_0))); denom2 = denom2*denom2; denom3 = 0.5f*(sign(NOMy_1) + sign(NOMy_0))*(MIN(fabs(NOMy_1),fabs(NOMy_0))); denom3 = denom3*denom3; T3 = sqrtf(denom1 + denom2 + denom3 + EPS); D3[index] = NOMz_1/T3; }}} return *D3; } /* calculate divergence */ float TV_kernel(float *D1, float *D2, float *D3, float *B, float *A, float lambda, float tau, long dimX, long dimY, long dimZ) { float dv1, dv2, dv3; long index,i,j,k,i1,i2,k1,j1,j2,k2; if (dimZ > 1) { #pragma omp parallel for shared (D1, D2, D3, B, dimX, dimY, dimZ) private(index, i, j, k, i1, j1, k1, i2, j2, k2, dv1,dv2,dv3) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { for(k=0; k<dimZ; k++) { index = (dimX*dimY)*k + j*dimX+i; /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; k1 = k + 1; if (k1 >= dimZ) k1 = k-1; k2 = k - 1; if (k2 < 0) k2 = k+1; /*divergence components */ dv1 = D1[index] - D1[(dimX*dimY)*k + j2*dimX+i]; dv2 = D2[index] - D2[(dimX*dimY)*k + j*dimX+i2]; dv3 = D3[index] - D3[(dimX*dimY)*k2 + j*dimX+i]; B[index] += tau*(lambda*(dv1 + dv2 + dv3) - (B[index] - A[index])); }}} } else { #pragma omp parallel for shared (D1, D2, B, dimX, dimY) private(index, i, j, i1, j1, i2, j2,dv1,dv2) for(j=0; j<dimY; j++) { for(i=0; i<dimX; i++) { index = j*dimX+i; /* symmetric boundary conditions (Neuman) */ i1 = i + 1; if (i1 >= dimX) i1 = i-1; i2 = i - 1; if (i2 < 0) i2 = i+1; j1 = j + 1; if (j1 >= dimY) j1 = j-1; j2 = j - 1; if (j2 < 0) j2 = j+1; /* divergence components */ dv1 = D1[index] - D1[j2*dimX + i]; dv2 = D2[index] - D2[j*dimX + i2]; B[index] += tau*(lambda*(dv1 + dv2) - (B[index] - A[index])); }} } return *B; }
nn_index.h
/*********************************************************************** * Software License Agreement (BSD License) * * Copyright 2008-2009 Marius Muja (mariusm@cs.ubc.ca). All rights reserved. * Copyright 2008-2009 David G. Lowe (lowe@cs.ubc.ca). All rights reserved. * * THE BSD LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. *************************************************************************/ #ifndef FLANN_NNINDEX_H #define FLANN_NNINDEX_H #include <vector> #include "flann/general.h" #include "flann/util/matrix.h" #include "flann/util/params.h" #include "flann/util/result_set.h" #include "flann/util/dynamic_bitset.h" #include "flann/util/saving.h" namespace flann { #define KNN_HEAP_THRESHOLD 250 class IndexBase { public: virtual ~IndexBase() {}; virtual size_t veclen() const = 0; virtual size_t size() const = 0; virtual flann_algorithm_t getType() const = 0; virtual int usedMemory() const = 0; virtual IndexParams getParameters() const = 0; virtual void loadIndex(FILE* stream) = 0; virtual void saveIndex(FILE* stream) = 0; }; /** * Nearest-neighbour index base class */ template <typename Distance> class NNIndex : public IndexBase { public: typedef typename Distance::ElementType ElementType; typedef typename Distance::ResultType DistanceType; NNIndex(Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const IndexParams& params, Distance d) : distance_(d), last_id_(0), size_(0), size_at_build_(0), veclen_(0), index_params_(params), removed_(false), removed_count_(0), data_ptr_(NULL) { } NNIndex(const NNIndex& other) : distance_(other.distance_), last_id_(other.last_id_), size_(other.size_), size_at_build_(other.size_at_build_), veclen_(other.veclen_), index_params_(other.index_params_), removed_(other.removed_), removed_points_(other.removed_points_), removed_count_(other.removed_count_), ids_(other.ids_), points_(other.points_), data_ptr_(NULL) { if (other.data_ptr_) { data_ptr_ = new ElementType[size_*veclen_]; std::copy(other.data_ptr_, other.data_ptr_+size_*veclen_, data_ptr_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } } virtual ~NNIndex() { if (data_ptr_) { delete[] data_ptr_; } } virtual NNIndex* clone() const = 0; /** * Builds the index */ virtual void buildIndex() { freeIndex(); cleanRemovedPoints(); // building index buildIndexImpl(); size_at_build_ = size_; } /** * Builds th index using using the specified dataset * @param dataset the dataset to use */ virtual void buildIndex(const Matrix<ElementType>& dataset) { setDataset(dataset); this->buildIndex(); } /** * @brief Incrementally add points to the index. * @param points Matrix with points to be added * @param rebuild_threshold */ virtual void addPoints(const Matrix<ElementType>& points, float rebuild_threshold = 2) { throw FLANNException("Functionality not supported by this index"); } /** * Remove point from the index * @param index Index of point to be removed */ virtual void removePoint(size_t id) { if (!removed_) { ids_.resize(size_); for (size_t i=0;i<size_;++i) { ids_[i] = i; } removed_points_.resize(size_); removed_points_.reset(); last_id_ = size_; removed_ = true; } size_t point_index = id_to_index(id); if (point_index!=size_t(-1) && !removed_points_.test(point_index)) { removed_points_.set(point_index); removed_count_++; } } /** * Get point with specific id * @param id * @return */ virtual ElementType* getPoint(size_t id) { size_t index = id_to_index(id); if (index!=size_t(-1)) { return points_[index]; } else { return NULL; } } /** * @return number of features in this index. */ inline size_t size() const { return size_ - removed_count_; } /** * @return The dimensionality of the features in this index. */ inline size_t veclen() const { return veclen_; } /** * Returns the parameters used by the index. * * @return The index parameters */ IndexParams getParameters() const { return index_params_; } template<typename Archive> void serialize(Archive& ar) { IndexHeader header; if (Archive::is_saving::value) { header.data_type = flann_datatype_value<ElementType>::value; header.index_type = getType(); header.rows = size_; header.cols = veclen_; } ar & header; // sanity checks if (Archive::is_loading::value) { if (strcmp(header.signature,FLANN_SIGNATURE_)!=0) { throw FLANNException("Invalid index file, wrong signature"); } if (header.data_type != flann_datatype_value<ElementType>::value) { throw FLANNException("Datatype of saved index is different than of the one to be created."); } if (header.index_type != getType()) { throw FLANNException("Saved index type is different then the current index type."); } // TODO: check for distance type } ar & size_; ar & veclen_; ar & size_at_build_; bool save_dataset; if (Archive::is_saving::value) { save_dataset = get_param(index_params_,"save_dataset", false); } ar & save_dataset; if (save_dataset) { if (Archive::is_loading::value) { if (data_ptr_) { delete[] data_ptr_; } data_ptr_ = new ElementType[size_*veclen_]; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = data_ptr_ + i*veclen_; } } for (size_t i=0;i<size_;++i) { ar & serialization::make_binary_object (points_[i], veclen_*sizeof(ElementType)); } } else { if (points_.size()!=size_) { throw FLANNException("Saved index does not contain the dataset and no dataset was provided."); } } ar & last_id_; ar & ids_; ar & removed_; if (removed_) { ar & removed_points_; } ar & removed_count_; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ virtual int knnSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); assert(indices.rows >= queries.rows); assert(dists.rows >= queries.rows); assert(indices.cols >= knn); assert(dists.cols >= knn); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } int count = 0; if (use_heap) { //#pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } else { //#pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); resultSet.copy(indices[i], dists[i], n, params.sorted); indices_to_ids(indices[i], indices[i], n); count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ int knnSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, size_t knn, const SearchParams& params) const { flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = knnSearch(queries, indices_, dists, knn, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; } /** * @brief Perform k-nearest neighbor search * @param[in] queries The query points for which to find the nearest neighbors * @param[out] indices The indices of the nearest neighbors found * @param[out] dists Distances to the nearest neighbors found * @param[in] knn Number of nearest neighbors to return * @param[in] params Search parameters */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { assert(queries.cols == veclen()); bool use_heap; if (params.use_heap==FLANN_Undefined) { use_heap = (knn>KNN_HEAP_THRESHOLD)?true:false; } else { use_heap = (params.use_heap==FLANN_True)?true:false; } if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); int count = 0; if (use_heap) { //#pragma omp parallel num_threads(params.cores) { KNNResultSet2<DistanceType> resultSet(knn); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } else { //#pragma omp parallel num_threads(params.cores) { KNNSimpleResultSet<DistanceType> resultSet(knn); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = std::min(resultSet.size(), knn); indices[i].resize(n); dists[i].resize(n); if (n>0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } count += n; } } } return count; } /** * * @param queries * @param indices * @param dists * @param knn * @param params * @return */ int knnSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, size_t knn, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = knnSearch(queries, indices_, dists, knn, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indinces of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ int radiusSearch(const Matrix<ElementType>& queries, Matrix<size_t>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; size_t num_neighbors = std::min(indices.cols, dists.cols); int max_neighbors = params.max_neighbors; if (max_neighbors<0) max_neighbors = num_neighbors; else max_neighbors = std::min(max_neighbors,(int)num_neighbors); if (max_neighbors==0) { //#pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { // explicitly indicated to use unbounded radius result set // and we know there'll be enough room for resulting indices and dists if (params.max_neighbors<0 && (num_neighbors>=size())) { //#pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if (n>num_neighbors) n = num_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } else { // number of neighbors limited to max_neighbors //#pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, max_neighbors); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>max_neighbors) n = max_neighbors; resultSet.copy(indices[i], dists[i], n, params.sorted); // mark the next element in the output buffers as unused if (n<indices.cols) indices[i][n] = size_t(-1); if (n<dists.cols) dists[i][n] = std::numeric_limits<DistanceType>::infinity(); indices_to_ids(indices[i], indices[i], n); } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, Matrix<int>& indices, Matrix<DistanceType>& dists, float radius, const SearchParams& params) const { flann::Matrix<size_t> indices_(new size_t[indices.rows*indices.cols], indices.rows, indices.cols); int result = radiusSearch(queries, indices_, dists, radius, params); for (size_t i=0;i<indices.rows;++i) { for (size_t j=0;j<indices.cols;++j) { indices[i][j] = indices_[i][j]; } } delete[] indices_.ptr(); return result; } /** * @brief Perform radius search * @param[in] query The query point * @param[out] indices The indinces of the neighbors found within the given radius * @param[out] dists The distances to the nearest neighbors found * @param[in] radius The radius used for search * @param[in] params Search parameters * @return Number of neighbors found */ int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<size_t> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { assert(queries.cols == veclen()); int count = 0; // just count neighbors if (params.max_neighbors==0) { //#pragma omp parallel num_threads(params.cores) { CountRadiusResultSet<DistanceType> resultSet(radius); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); count += resultSet.size(); } } } else { if (indices.size() < queries.rows ) indices.resize(queries.rows); if (dists.size() < queries.rows ) dists.resize(queries.rows); if (params.max_neighbors<0) { // search for all neighbors //#pragma omp parallel num_threads(params.cores) { RadiusResultSet<DistanceType> resultSet(radius); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } else { // number of neighbors limited to max_neighbors //#pragma omp parallel num_threads(params.cores) { KNNRadiusResultSet<DistanceType> resultSet(radius, params.max_neighbors); //#pragma omp for schedule(static) reduction(+:count) for (int i = 0; i < (int)queries.rows; i++) { resultSet.clear(); findNeighbors(resultSet, queries[i], params); size_t n = resultSet.size(); count += n; if ((int)n>params.max_neighbors) n = params.max_neighbors; indices[i].resize(n); dists[i].resize(n); if (n > 0) { resultSet.copy(&indices[i][0], &dists[i][0], n, params.sorted); indices_to_ids(&indices[i][0], &indices[i][0], n); } } } } } return count; } /** * * @param queries * @param indices * @param dists * @param radius * @param params * @return */ int radiusSearch(const Matrix<ElementType>& queries, std::vector< std::vector<int> >& indices, std::vector<std::vector<DistanceType> >& dists, float radius, const SearchParams& params) const { std::vector<std::vector<size_t> > indices_; int result = radiusSearch(queries, indices_, dists, radius, params); indices.resize(indices_.size()); for (size_t i=0;i<indices_.size();++i) { indices[i].assign(indices_[i].begin(), indices_[i].end()); } return result; } virtual void findNeighbors(ResultSet<DistanceType>& result, const ElementType* vec, const SearchParams& searchParams) const = 0; protected: virtual void freeIndex() = 0; virtual void buildIndexImpl() = 0; size_t id_to_index(size_t id) { if (ids_.size()==0) { return id; } size_t point_index = size_t(-1); if (ids_[id]==id) { return id; } else { // binary search size_t start = 0; size_t end = ids_.size(); while (start<end) { size_t mid = (start+end)/2; if (ids_[mid]==id) { point_index = mid; break; } else if (ids_[mid]<id) { start = mid + 1; } else { end = mid; } } } return point_index; } void indices_to_ids(const size_t* in, size_t* out, size_t size) const { if (removed_) { for (size_t i=0;i<size;++i) { out[i] = ids_[in[i]]; } } } void setDataset(const Matrix<ElementType>& dataset) { size_ = dataset.rows; veclen_ = dataset.cols; last_id_ = 0; ids_.clear(); removed_points_.clear(); removed_ = false; removed_count_ = 0; points_.resize(size_); for (size_t i=0;i<size_;++i) { points_[i] = dataset[i]; } } void extendDataset(const Matrix<ElementType>& new_points) { size_t new_size = size_ + new_points.rows; if (removed_) { removed_points_.resize(new_size); ids_.resize(new_size); } points_.resize(new_size); for (size_t i=size_;i<new_size;++i) { points_[i] = new_points[i-size_]; if (removed_) { ids_[i] = last_id_++; removed_points_.reset(i); } } size_ = new_size; } void cleanRemovedPoints() { if (!removed_) return; size_t last_idx = 0; for (size_t i=0;i<size_;++i) { if (!removed_points_.test(i)) { points_[last_idx] = points_[i]; ids_[last_idx] = ids_[i]; removed_points_.reset(last_idx); ++last_idx; } } points_.resize(last_idx); ids_.resize(last_idx); removed_points_.resize(last_idx); size_ = last_idx; removed_count_ = 0; } void swap(NNIndex& other) { std::swap(distance_, other.distance_); std::swap(last_id_, other.last_id_); std::swap(size_, other.size_); std::swap(size_at_build_, other.size_at_build_); std::swap(veclen_, other.veclen_); std::swap(index_params_, other.index_params_); std::swap(removed_, other.removed_); std::swap(removed_points_, other.removed_points_); std::swap(removed_count_, other.removed_count_); std::swap(ids_, other.ids_); std::swap(points_, other.points_); std::swap(data_ptr_, other.data_ptr_); } protected: /** * The distance functor */ Distance distance_; /** * Each index point has an associated ID. IDs are assigned sequentially in * increasing order. This indicates the ID assigned to the last point added to the * index. */ size_t last_id_; /** * Number of points in the index (and database) */ size_t size_; /** * Number of features in the dataset when the index was last built. */ size_t size_at_build_; /** * Size of one point in the index (and database) */ size_t veclen_; /** * Parameters of the index. */ IndexParams index_params_; /** * Flag indicating if at least a point was removed from the index */ bool removed_; /** * Array used to mark points removed from the index */ DynamicBitset removed_points_; /** * Number of points removed from the index */ size_t removed_count_; /** * Array of point IDs, returned by nearest-neighbour operations */ std::vector<size_t> ids_; /** * Point data */ std::vector<ElementType*> points_; /** * Pointer to dataset memory if allocated by this index, otherwise NULL */ ElementType* data_ptr_; }; #define USING_BASECLASS_SYMBOLS \ using NNIndex<Distance>::distance_;\ using NNIndex<Distance>::size_;\ using NNIndex<Distance>::size_at_build_;\ using NNIndex<Distance>::veclen_;\ using NNIndex<Distance>::index_params_;\ using NNIndex<Distance>::removed_points_;\ using NNIndex<Distance>::ids_;\ using NNIndex<Distance>::removed_;\ using NNIndex<Distance>::points_;\ using NNIndex<Distance>::extendDataset;\ using NNIndex<Distance>::setDataset;\ using NNIndex<Distance>::cleanRemovedPoints;\ using NNIndex<Distance>::indices_to_ids; } #endif //FLANN_NNINDEX_H
GB_binop__bclr_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_int64) // A.*B function (eWiseMult): GB (_AemultB_01__bclr_int64) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_int64) // A.*B function (eWiseMult): GB (_AemultB_03__bclr_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_int64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_int64) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_int64) // C=scalar+B GB (_bind1st__bclr_int64) // C=scalar+B' GB (_bind1st_tran__bclr_int64) // C=A+scalar GB (_bind2nd__bclr_int64) // C=A'+scalar GB (_bind2nd_tran__bclr_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = GB_BITCLR (aij, bij, int64_t, 64) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITCLR (x, y, int64_t, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_INT64 || GxB_NO_BCLR_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bclr_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bclr_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bclr_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITCLR (x, bij, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITCLR (aij, y, int64_t, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (x, aij, int64_t, 64) ; \ } GrB_Info GB (_bind1st_tran__bclr_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (aij, y, int64_t, 64) ; \ } GrB_Info GB (_bind2nd_tran__bclr_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
layerramsubset.h
/********************************************************************************* * * Inviwo - Interactive Visualization Workshop * * Copyright (c) 2018-2020 Inviwo Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *********************************************************************************/ #ifndef IVW_LAYERRAMSUBSET_H #define IVW_LAYERRAMSUBSET_H #include <modules/base/basemoduledefine.h> #include <inviwo/core/common/inviwo.h> #include <inviwo/core/datastructures/image/layer.h> #include <inviwo/core/datastructures/image/layerram.h> #include <inviwo/core/datastructures/image/layerramprecision.h> #include <inviwo/core/util/glm.h> #include <algorithm> namespace inviwo { namespace util { /** * \brief extracts a subregion from a layer and returns it as a new layer * * This function extracts a subregion given by offset and extent from the input layer. * If border clamping is enabled, the output region will be clamped to lie completely within the * source layer. Otherwise (default), the areas outside the source layer will be filled with * zeros. * * @param in input layer * @param offset subregion offset in input layer * @param extent extent (width and height) of subregion * @param clampBorderOutsideImage if true, the output region is clamped to the layer boundaries * @return std::shared_ptr<LayerRAM> */ IVW_MODULE_BASE_API std::shared_ptr<LayerRAM> layerSubSet(const Layer* in, ivec2 offset, size2_t extent, bool clampBorderOutsideImage = false); /** * \brief extracts a subregion from a layer and converts it into a new layer * * This function extracts a subregion given by offset and extent from the input layer. The values * will be converted to type T using util::glm_convert_normalized. * If border clamping is enabled, the output region will be clamped to lie completely within the * source layer. Otherwise (default), the areas outside the source layer will be filled with * zeros. * * @param in input layer * @param offset subregion offset in input layer * @param extent extent (width and height) of subregion * @param clampBorderOutsideImage if true, the output region is clamped to the layer boundaries * @return std::shared_ptr<LayerRAMPrecision<T>> */ template <typename T> std::shared_ptr<LayerRAMPrecision<T>> layerSubSet(const Layer* in, ivec2 offset, size2_t extent, bool clampBorderOutsideImage = false); namespace detail { template <typename T> void conversionCopy(const T* src, T* dst, size_t len) { std::copy(src, src + len, dst); } template <typename To, typename From> void conversionCopy(const From* src, To* dst, size_t len) { for (size_t i = 0; i < len; i++) { dst[i] = util::glm_convert_normalized<To, From>(src[i]); } } template <typename T, typename U = T> std::shared_ptr<LayerRAMPrecision<U>> extractLayerSubSet(const LayerRAMPrecision<T>* inLayer, ivec2 offset, size2_t extent, bool clampBorderOutsideImage) { // determine parameters const ivec2 srcDim(inLayer->getDimensions()); // adjust the output dimensions to match the intersection of output and input regions const ivec2 srcOffset(glm::max(ivec2(0), offset)); const ivec2 dstOffset = clampBorderOutsideImage ? ivec2(0) : (glm::max(ivec2(0), -offset)); // clamp copy extent to source layer const ivec2 copyExtent = glm::min(ivec2(extent) - dstOffset, srcDim - srcOffset); const ivec2 dstDim = clampBorderOutsideImage ? copyExtent : ivec2(extent); // allocate space auto newLayer = std::make_shared<LayerRAMPrecision<U>>(dstDim); const auto src = inLayer->getDataTyped(); auto dst = newLayer->getDataTyped(); if (!clampBorderOutsideImage) { // clear entire layer as only parts will be copied std::fill(dst, dst + dstDim.x * dstDim.y, U(0)); } // memcpy each row to form sub layer #pragma omp parallel for for (int j = 0; j < copyExtent.y; j++) { size_t srcPos = (j + srcOffset.y) * srcDim.x + srcOffset.x; size_t dstPos = (j + dstOffset.y) * dstDim.x + dstOffset.x; conversionCopy(src + srcPos, dst + dstPos, static_cast<size_t>(copyExtent.x)); } return newLayer; } } // namespace detail } // namespace util template <typename T> std::shared_ptr<LayerRAMPrecision<T>> util::layerSubSet(const Layer* in, ivec2 offset, size2_t extent, bool clampBorderOutsideImage) { return in->getRepresentation<LayerRAM>()->dispatch<std::shared_ptr<LayerRAMPrecision<T>>>( [offset, extent, clampBorderOutsideImage](auto layerpr) { using ValueType = util::PrecisionValueType<decltype(layerpr)>; return util::detail::extractLayerSubSet<ValueType, T>(layerpr, offset, extent, clampBorderOutsideImage); }); } } // namespace inviwo #endif // IVW_LAYERRAMSUBSET_H
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // call `.bind("name")` on match expressions that match the nodes you want to // access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child"))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the `.bind()` calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/LambdaCapture.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/ParentMapContext.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_P(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); llvm::Regex RE(RegExp); return RE.match(Filename); } /// Matches statements that are (transitively) expanded from the named macro. /// Does not match if only part of the statement is expanded from that macro or /// if different parts of the the statement are expanded from different /// appearances of the macro. /// /// FIXME: Change to be a polymorphic matcher that works on any syntactic /// node. There's nothing `Stmt`-specific about it. AST_MATCHER_P(Stmt, isExpandedFromMacro, llvm::StringRef, MacroName) { // Verifies that the statement' beginning and ending are both expanded from // the same instance of the given macro. auto& Context = Finder->getASTContext(); llvm::Optional<SourceLocation> B = internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context); if (!B) return false; llvm::Optional<SourceLocation> E = internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context); if (!E) return false; return *B == *E; } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches public C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPublic()) /// matches 'int a;' AST_MATCHER(Decl, isPublic) { return Node.getAccess() == AS_public; } /// Matches protected C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isProtected()) /// matches 'int b;' AST_MATCHER(Decl, isProtected) { return Node.getAccess() == AS_protected; } /// Matches private C++ declarations. /// /// Given /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; /// }; /// \endcode /// fieldDecl(isPrivate()) /// matches 'int c;' AST_MATCHER(Decl, isPrivate) { return Node.getAccess() == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder); } /// Causes all nested matchers to be matched with the specified traversal kind. /// /// Given /// \code /// void foo() /// { /// int i = 3.0; /// } /// \endcode /// The matcher /// \code /// traverse(TK_IgnoreImplicitCastsAndParentheses, /// varDecl(hasInitializer(floatLiteral().bind("init"))) /// ) /// \endcode /// matches the variable declaration with "init" bound to the "3.0". template <typename T> internal::Matcher<T> traverse(TraversalKind TK, const internal::Matcher<T> &InnerMatcher) { return internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>(); } template <typename T> internal::BindableMatcher<T> traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) { return internal::BindableMatcher<T>( internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>()); } template <typename... T> internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>> traverse(TraversalKind TK, const internal::VariadicOperatorMatcher<T...> &InnerMatcher) { return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>( TK, InnerMatcher); } template <template <typename ToArg, typename FromArg> class ArgumentAdapterT, typename T, typename ToTypes> internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>> traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor< ArgumentAdapterT, T, ToTypes> &InnerMatcher) { return internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>(TK, InnerMatcher); } template <template <typename T, typename P1> class MatcherT, typename P1, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1< MatcherT, P1, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>( TK, InnerMatcher); } template <template <typename T, typename P1, typename P2> class MatcherT, typename P1, typename P2, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2< MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>( TK, InnerMatcher); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that referes to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches tag declarations. /// /// Example matches X, Z, U, S, E /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// enum E { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches noexcept expressions. /// /// Given /// \code /// bool a() noexcept; /// bool b() noexcept(true); /// bool c() noexcept(false); /// bool d() noexcept(noexcept(a())); /// bool e = noexcept(b()) || noexcept(c()); /// \endcode /// cxxNoexceptExpr() /// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`. /// doesn't match the noexcept specifier in the declarations a, b, c or d. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr> cxxNoexceptExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches fixed point literals extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral> fixedPointLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches any node regardless of the submatcher. /// /// However, \c optionally will retain any bindings generated by the submatcher. /// Useful when additional information which may or may not present about a main /// matching node is desired. /// /// For example, in: /// \code /// class Foo { /// int bar; /// } /// \endcode /// The matcher: /// \code /// cxxRecordDecl( /// optionally(has( /// fieldDecl(hasName("bar")).bind("var") /// ))).bind("record") /// \endcode /// will produce a result binding for both "record" and "var". /// The matcher will produce a "record" binding for even if there is no data /// member named "bar" in that class. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::Matcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::Matcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(StringRef Name) { return internal::Matcher<NamedDecl>( new internal::HasNameMatcher({std::string(Name)})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_P(NamedDecl, matchesName, std::string, RegExp) { assert(!RegExp.empty()); std::string FullNameString = "::" + Node.getQualifiedNameAsString(); llvm::Regex RE(RegExp); return RE.match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>( {std::string(Name)}); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// hasAnyOverloadesOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>, StringRef, internal::hasAnyOverloadedOperatorNameFunc> hasAnyOverloadedOperatorName; /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, Builder); } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, matchesSelector, std::string, RegExp) { assert(!RegExp.empty()); std::string SelectorString = Node.getSelector().getAsString(); llvm::Regex RE(RegExp); return RE.match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<ValueDecl> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N) { return Node.getNumArgs() == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { return (N < Node.getNumArgs() && InnerMatcher.matches( *Node.getArg(N)->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches any capture of a lambda expression. /// /// Given /// \code /// void foo() { /// int x; /// auto f = [x](){}; /// } /// \endcode /// lambdaExpr(hasAnyCapture(anything())) /// matches [x](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>, InnerMatcher, 0) { for (const LambdaCapture &Capture : Node.captures()) { if (Capture.capturesVariable()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) { *Builder = std::move(Result); return true; } } } return false; } /// Matches any capture of 'this' in a lambda expression. /// /// Given /// \code /// struct foo { /// void bar() { /// auto f = [this](){}; /// } /// } /// \endcode /// lambdaExpr(hasAnyCapture(cxxThisExpr())) /// matches [this](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<CXXThisExpr>, InnerMatcher, 1) { return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) { return LC.capturesThis(); }); } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches selection statements with initializer. /// /// Given: /// \code /// void foo() { /// if (int i = foobar(); i > 0) {} /// switch (int i = foobar(); i) {} /// for (auto& a = get_range(); auto& x : a) {} /// } /// void bar() { /// if (foobar() > 0) {} /// switch (foobar()) {} /// for (auto& x : get_range()) {} /// } /// \endcode /// ifStmt(hasInitStatement(anything())) /// matches the if statement in foo but not in bar. /// switchStmt(hasInitStatement(anything())) /// matches the switch statement in foo but not in bar. /// cxxForRangeStmt(hasInitStatement(anything())) /// matches the range for statement in foo but not in bar. AST_POLYMORPHIC_MATCHER_P(hasInitStatement, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt, CXXForRangeStmt), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *Init = Node.getInit(); return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P(hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator), std::string, Name) { return Name == Node.getOpcodeStr(Node.getOpcode()); } /// Matches operator expressions (binary or unary) that have any of the /// specified names. /// /// hasAnyOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOperatorName("+"), hasOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasAnyOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, UnaryOperator)>, StringRef, internal::hasAnyOperatorNameFunc> hasAnyOperatorName; /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; } /// \endcode AST_POLYMORPHIC_MATCHER(isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isAssignmentOp(); } /// Matches comparison operators. /// /// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 < s2 /// (matcher = cxxOperatorCallExpr(isComparisonOperator())) /// \code /// struct S { bool operator<(const S& other); }; /// void x(S s1, S s2) { bool b1 = s1 < s2; } /// \endcode AST_POLYMORPHIC_MATCHER(isComparisonOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr)) { return Node.isComparisonOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = Node.getLHS(); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = Node.getRHS(); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. inline internal::Matcher<BinaryOperator> hasEitherOperand( const internal::Matcher<Expr> &InnerMatcher) { return anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher)); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_MATCHER_P(UnaryOperator, hasUnaryOperand, internal::Matcher<Expr>, InnerMatcher) { const Expr * const Operand = Node.getSubExpr(); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches TagDecl object that are spelled with "struct." /// /// Example matches S, but not C, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isStruct) { return Node.isStruct(); } /// Matches TagDecl object that are spelled with "union." /// /// Example matches U, but not C, S or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isUnion) { return Node.isUnion(); } /// Matches TagDecl object that are spelled with "class." /// /// Example matches C, but not S, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isClass) { return Node.isClass(); } /// Matches TagDecl object that are spelled with "enum." /// /// Example matches E, but not C, S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isEnum) { return Node.isEnum(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches if the given method declaration is virtual. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isVirtual) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches C++17 deduced template specialization types, e.g. deduced class /// template types. /// /// Given /// \code /// template <typename T> /// class C { public: C(T); }; /// /// C c(123); /// \endcode /// \c deducedTemplateSpecializationType() matches the type in the declaration /// of the variable \c c. extern const AstTypeMatcher<DeducedTemplateSpecializationType> deducedTemplateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whos decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER(Expr, nullPointerConstant) { return Node.isNullPointerConstant(Finder->getASTContext(), Expr::NPC_ValueDependentIsNull); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode /// /// Deprecated. Use hasInitializer() instead to be able to /// match on the contents of the default argument. For example: /// /// \code /// void x(int val = 7) {} /// void y(int val = 42) {} /// \endcode /// parmVarDecl(hasInitializer(integerLiteral(equals(42)))) /// matches the parameter of y /// /// A matcher such as /// parmVarDecl(hasInitializer(anything())) /// is equivalent to parmVarDecl(hasDefaultArgument()). AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage, 16) MyClass(); /// \endcode /// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16)))) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index, internal::Matcher<Expr>, InnerMatcher) { return Node.getNumPlacementArgs() > Index && InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder); } /// Matches any placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage) MyClass(); /// \endcode /// cxxNewExpr(hasAnyPlacementArg(anything())) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>, InnerMatcher) { return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) { return InnerMatcher.matches(*Arg, Finder, Builder); }); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(integerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)`` and ``default(shared)``. extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return llvm::omp::isAllowedClauseForDirective( Node.getDirectiveKind(), CKind, Finder->getASTContext().getLangOpts().OpenMP); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
workshare2.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 50 int main (int argc, char *argv[]) { int i, nthreads, tid; float a[N], b[N], c[N], d[N]; /* Some initializations */ for (i=0; i<N; i++) { a[i] = i * 1.5; b[i] = i + 22.35; c[i] = d[i] = 0.0; } #pragma omp parallel shared(a,b,c,d,nthreads) private(i,tid) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("Thread %d starting...\n",tid); #pragma omp sections nowait { #pragma omp section { printf("Thread %d doing section 1\n",tid); for (i=0; i<N; i++) { c[i] = a[i] + b[i]; printf("Thread %d: c[%d]= %f\n",tid,i,c[i]); } } #pragma omp section { printf("Thread %d doing section 2\n",tid); for (i=0; i<N; i++) { d[i] = a[i] * b[i]; printf("Thread %d: d[%d]= %f\n",tid,i,d[i]); } } } /* end of sections */ printf("Thread %d done.\n",tid); } /* end of parallel section */ }
coordinate_transformation_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: // // #ifndef KRATOS_COORDINATE_TRANSFORMATION_UTILITIES_H #define KRATOS_COORDINATE_TRANSFORMATION_UTILITIES_H // system includes // external includes #include "boost/numeric/ublas/matrix_proxy.hpp" // kratos includes #include "includes/define.h" #include "includes/node.h" #include "containers/variable.h" #include "geometries/geometry.h" namespace Kratos { ///@addtogroup KratosCore ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// A utility to rotate the local contributions of certain nodes to the system matrix, which is required to apply slip conditions in arbitrary directions. template<class TLocalMatrixType, class TLocalVectorType, class TValueType> class CoordinateTransformationUtils { public: ///@name Type Definitions ///@{ /// Pointer definition of CoordinateTransformationUtils KRATOS_CLASS_POINTER_DEFINITION(CoordinateTransformationUtils); typedef Node<3> NodeType; typedef Geometry< Node<3> > GeometryType; // typedef boost::numeric::ublas::matrix_row<TLocalMatrixType> LocalRowType; // // typedef boost::numeric::ublas::matrix_range<TLocalMatrixType> MatrixBlockType; ///@} ///@name Life Cycle ///@{ /// Constructor. /** @param DomainSize Number of space dimensions (2 or 3) * @param NumRowsPerNode Number of matrix or vector rows associated to each node. Velocity DOFs are assumed to be the first mDomainSize rows in each block of rows. * @param rSelectionFlag All nodes where the flag given by this argument is set to true will be transformed to a rotated coordinate system. */ CoordinateTransformationUtils(const unsigned int DomainSize, const unsigned int NumRowsPerNode, const Kratos::Flags& rSelectionFlag = SLIP): mDomainSize(DomainSize), mBlockSize(NumRowsPerNode), mrFlag(rSelectionFlag) {} /// Destructor. virtual ~CoordinateTransformationUtils() {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Calculates rotation operator for given point * * This metod calculates rotation matrix for a given point. Nodal NORMAL variable should be * assigned properly since rotation is calculated based on it. * * @param rRotationMatrix Output rotation matrix * @param rThisPoint Current node */ virtual void CalculateRotationOperatorPure( TLocalMatrixType& rRotationMatrix, const GeometryType::PointType& rThisPoint) const { KRATOS_TRY if (mDomainSize == 2) { BoundedMatrix<double, 2, 2> local_matrix; this->LocalRotationOperatorPure(local_matrix, rThisPoint); if (rRotationMatrix.size1() != 2 || rRotationMatrix.size2() != 2) { rRotationMatrix.resize(2, 2, false); } noalias(rRotationMatrix) = local_matrix; } else if (mDomainSize == 3) { BoundedMatrix<double, 3, 3> local_matrix; this->LocalRotationOperatorPure(local_matrix, rThisPoint); if (rRotationMatrix.size1() != 3 || rRotationMatrix.size2() != 3) { rRotationMatrix.resize(3, 3, false); } noalias(rRotationMatrix) = local_matrix; } else { KRATOS_ERROR << "Unsupported domain size [ mDomainSize = " << mDomainSize << " ].\n"; } KRATOS_CATCH(""); } /** * @brief Calculates rotation nodal matrix shape sensitivities * * This method calculates shape sensitivities of rotation matrix for given node. * Nodal NORMAL(historical data container) and NORMAL_SHAPE_SENSITIVITY(non-historical data contaienr) variables * should be properly initialized. * * NORMAL_SHAPE_SENSITIVITY matrix should be properly sized and initialized with proper shape sensitivity values * rows: number_of_nodes contributing to NORMAL * DOMAIN_SIZE, columns: DOMAIN_SIZE * * @param rRotationMatrixShapeDerivative Output shape sensitivities matrix w.r.t. NodeIndex and DerivativeIndex * @param DerivativeNodeIndex NodeIndex for which shape sensitivity matrix is computed * @param DerivativeDirectionIndex Direction index of the node for which shape sensitivity matrix is computed * @param rThisPoint Current node where rotation matrix shape sensitivities are required */ virtual void CalculateRotationOperatorPureShapeSensitivities( TLocalMatrixType& rRotationMatrixShapeDerivative, const std::size_t DerivativeNodeIndex, const std::size_t DerivativeDirectionIndex, const GeometryType::PointType& rThisPoint) const { KRATOS_TRY if (mDomainSize == 2) { BoundedMatrix<double, 2, 2> local_matrix; this->CalculateRotationOperatorPureShapeSensitivities( local_matrix, DerivativeNodeIndex, DerivativeDirectionIndex, rThisPoint); if (rRotationMatrixShapeDerivative.size1() != 2 || rRotationMatrixShapeDerivative.size2() != 2) { rRotationMatrixShapeDerivative.resize(2, 2, false); } noalias(rRotationMatrixShapeDerivative) = local_matrix; } else if (mDomainSize == 3) { BoundedMatrix<double, 3, 3> local_matrix; this->CalculateRotationOperatorPureShapeSensitivities( local_matrix, DerivativeNodeIndex, DerivativeDirectionIndex, rThisPoint); if (rRotationMatrixShapeDerivative.size1() != 3 || rRotationMatrixShapeDerivative.size2() != 3) { rRotationMatrixShapeDerivative.resize(3, 3, false); } noalias(rRotationMatrixShapeDerivative) = local_matrix; } else { KRATOS_ERROR << "Unsupported domain size [ mDomainSize = " << mDomainSize << " ].\n"; } KRATOS_CATCH(""); } /** * @brief Calculate 2d rotation nodal matrix shape sensitivities * * This method calculates shape sensitivities of 2D rotation matrix for given node. * Nodal NORMAL(historical data container) and NORMAL_SHAPE_SENSITIVITY(non-historical data contaienr) variables * should be properly initialized. * * NORMAL_SHAPE_SENSITIVITY matrix should be properly sized and initialized with proper shape sensitivity values * rows: (number_of_neighbour_nodes + 1) * 2 * cols: 2 * * @param rOutput Output shape sensitivities matrix w.r.t. NodeIndex and DerivativeIndex * @param DerivativeNodeIndex NodeIndex for which shape sensitivity matrix is computed * @param DerivativeDirectionIndex Direction index of the node for which shape sensitivity matrix is computed * @param rThisPoint Current node where rotation matrix shape sensitivities are required */ virtual void CalculateRotationOperatorPureShapeSensitivities( BoundedMatrix<double, 2, 2>& rOutput, const std::size_t DerivativeNodeIndex, const std::size_t DerivativeDirectionIndex, const GeometryType::PointType& rThisPoint) const { KRATOS_TRY KRATOS_ERROR_IF(!rThisPoint.SolutionStepsDataHas(NORMAL)) << "NORMAL is not found in node at " << rThisPoint.Coordinates() << "."; KRATOS_ERROR_IF(!rThisPoint.Has(NORMAL_SHAPE_DERIVATIVE)) << "NORMAL_SHAPE_DERIVATIVE is not found in node at " << rThisPoint.Coordinates() << "."; const array_1d<double, 3>& r_nodal_normal = rThisPoint.FastGetSolutionStepValue(NORMAL); const double nodal_normal_magnitude = norm_2(r_nodal_normal); KRATOS_ERROR_IF(nodal_normal_magnitude == 0.0) << "NORMAL at node " << rThisPoint.Coordinates() << " is not properly initialized."; const Matrix& r_sensitivity_values = rThisPoint.GetValue(NORMAL_SHAPE_DERIVATIVE); KRATOS_DEBUG_ERROR_IF(r_sensitivity_values.size2() != 2) << "NORMAL_SHAPE_DERIVATIVE is not properly initialized at node " << rThisPoint.Coordinates() << " to calculate 2D rotation operator shape sensitivities. [ required number of columns = 2, available number of columns = " << r_sensitivity_values.size2() << " ]."; const std::size_t require_rows = (DerivativeNodeIndex + 1) * 2; KRATOS_DEBUG_ERROR_IF(r_sensitivity_values.size1() < require_rows) << "NORMAL_SHAPE_DERIVATIVE is not properly initialized at node " << rThisPoint.Coordinates() << " to calculate 2D rotation operator shape sensitivities. [ required number of rows >= " << require_rows << ", available number of rows = " << r_sensitivity_values.size1() << " ]."; const Vector& r_nodal_normal_derivatives = row(r_sensitivity_values, DerivativeNodeIndex * 2 + DerivativeDirectionIndex); rOutput(0, 0) = r_nodal_normal_derivatives[0] / nodal_normal_magnitude; rOutput(0, 1) = r_nodal_normal_derivatives[1] / nodal_normal_magnitude; rOutput(1, 0) = -r_nodal_normal_derivatives[1] / nodal_normal_magnitude; rOutput(1, 1) = r_nodal_normal_derivatives[0] / nodal_normal_magnitude; const double nodal_normal_magnitude_derivative = (r_nodal_normal[0] * r_nodal_normal_derivatives[0] + r_nodal_normal[1] * r_nodal_normal_derivatives[1]) / nodal_normal_magnitude; const double coeff = nodal_normal_magnitude_derivative / (std::pow(nodal_normal_magnitude, 2)); rOutput(0, 0) -= r_nodal_normal[0] * coeff; rOutput(0, 1) -= r_nodal_normal[1] * coeff; rOutput(1, 0) -= -r_nodal_normal[1] * coeff; rOutput(1, 1) -= r_nodal_normal[0] * coeff; KRATOS_CATCH(""); } /** * @brief Calculate 3d rotation nodal matrix shape sensitivities * * This method calculates shape sensitivities of 3D rotation matrix for given node. * Nodal NORMAL(historical data container) and NORMAL_SHAPE_SENSITIVITY(non-historical data contaienr) variables * should be properly initialized. * * NORMAL_SHAPE_SENSITIVITY matrix should be properly sized and initialized with proper shape sensitivity values * rows: (number_of_neighbour_nodes + 1) * 3 * cols: 3 * * @param rOutput Output shape sensitivities matrix w.r.t. NodeIndex and DerivativeIndex * @param DerivativeNodeIndex NodeIndex for which shape sensitivity matrix is computed * @param DerivativeDirectionIndex Direction index of the node for which shape sensitivity matrix is computed * @param rThisPoint Current node where rotation matrix shape sensitivities are required */ virtual void CalculateRotationOperatorPureShapeSensitivities( BoundedMatrix<double, 3, 3>& rOutput, const std::size_t DerivativeNodeIndex, const std::size_t DerivativeDirectionIndex, const GeometryType::PointType& rThisPoint) const { KRATOS_TRY KRATOS_ERROR_IF(!rThisPoint.SolutionStepsDataHas(NORMAL)) << "NORMAL is not found in node at " << rThisPoint.Coordinates() << "."; KRATOS_ERROR_IF(!rThisPoint.Has(NORMAL_SHAPE_DERIVATIVE)) << "NORMAL_SHAPE_DERIVATIVE is not found in node at " << rThisPoint.Coordinates() << "."; const array_1d<double, 3>& r_nodal_normal = rThisPoint.FastGetSolutionStepValue(NORMAL); const double nodal_normal_magnitude = norm_2(r_nodal_normal); KRATOS_ERROR_IF(nodal_normal_magnitude == 0.0) << "NORMAL at node " << rThisPoint.Coordinates() << " is not properly initialized."; const Matrix& r_sensitivity_values = rThisPoint.GetValue(NORMAL_SHAPE_DERIVATIVE); KRATOS_DEBUG_ERROR_IF(r_sensitivity_values.size2() != 3) << "NORMAL_SHAPE_DERIVATIVE is not properly initialized at node " << rThisPoint.Coordinates() << " to calculate 3D rotation operator shape sensitivities. [ required number of columns = 3, available number of columns = " << r_sensitivity_values.size2() << " ]."; const std::size_t require_rows = (DerivativeNodeIndex + 1) * 3; KRATOS_DEBUG_ERROR_IF(r_sensitivity_values.size1() < require_rows) << "NORMAL_SHAPE_DERIVATIVE is not properly initialized at node " << rThisPoint.Coordinates() << " to calculate 3D rotation operator shape sensitivities. [ required number of rows >= " << require_rows << ", available number of rows = " << r_sensitivity_values.size1() << " ]."; const Vector& r_nodal_normal_derivative = row(r_sensitivity_values, DerivativeNodeIndex * 3 + DerivativeDirectionIndex); const double nodal_normal_magnitude_derivative = VectorNormDerivative(nodal_normal_magnitude, r_nodal_normal, r_nodal_normal_derivative); const array_1d<double, 3>& unit_normal = r_nodal_normal / nodal_normal_magnitude; const array_1d<double, 3>& unit_normal_derivative = UnitVectorDerivative(nodal_normal_magnitude, nodal_normal_magnitude_derivative, r_nodal_normal, r_nodal_normal_derivative); rOutput(0, 0) = unit_normal_derivative[0]; rOutput(0, 1) = unit_normal_derivative[1]; rOutput(0, 2) = unit_normal_derivative[2]; array_1d<double, 3> rT1(3, 0.0); rT1[0] = 1.0; double dot = unit_normal[0]; double dot_derivative = unit_normal_derivative[0]; if (std::abs(dot) > 0.99) { rT1[0] = 0.0; rT1[1] = 1.0; dot = unit_normal[1]; dot_derivative = unit_normal_derivative[1]; } // calculate rT1 noalias(rT1) -= unit_normal * dot; const double rT1_norm = norm_2(rT1); const array_1d<double, 3>& unit_rT1 = rT1 / rT1_norm; // calculate rT1 derivative const array_1d<double, 3>& rT1_derivative = (unit_normal_derivative * dot + unit_normal * dot_derivative) * -1.0; // calculate rT1 norm derivative const double rT1_norm_derivative = VectorNormDerivative(rT1_norm, rT1, rT1_derivative); const array_1d<double, 3>& unit_rT1_derivative = UnitVectorDerivative(rT1_norm, rT1_norm_derivative, rT1, rT1_derivative); rOutput(1, 0) = unit_rT1_derivative[0]; rOutput(1, 1) = unit_rT1_derivative[1]; rOutput(1, 2) = unit_rT1_derivative[2]; rOutput(2, 0) = unit_normal_derivative[1] * unit_rT1[2] + unit_normal[1] * unit_rT1_derivative[2] - unit_normal_derivative[2] * unit_rT1[1] - unit_normal[2] * unit_rT1_derivative[1]; rOutput(2, 1) = unit_normal_derivative[2] * unit_rT1[0] + unit_normal[2] * unit_rT1_derivative[0] - unit_normal_derivative[0] * unit_rT1[2] - unit_normal[0] * unit_rT1_derivative[2]; rOutput(2, 2) = unit_normal_derivative[0] * unit_rT1[1] + unit_normal[0] * unit_rT1_derivative[1] - unit_normal_derivative[1] * unit_rT1[0] - unit_normal[1] * unit_rT1_derivative[0]; KRATOS_CATCH(""); } /// Rotate the local system contributions so that they are oriented with each node's normal. /** @param rLocalMatrix Local system matrix @param rLocalVector Local RHS vector @param rGeometry A reference to the element's (or condition's) geometry */ virtual void Rotate(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { if(mBlockSize != mDomainSize) //Monolithic case { if(mDomainSize == 2) RotateAux<2,3>(rLocalMatrix,rLocalVector,rGeometry); if(mDomainSize == 3) RotateAux<3,4>(rLocalMatrix,rLocalVector,rGeometry); } else //fractional step case { if(mDomainSize == 2) RotateAuxPure<2>(rLocalMatrix,rLocalVector,rGeometry); if(mDomainSize == 3) RotateAuxPure<3>(rLocalMatrix,rLocalVector,rGeometry); } } /// RHS only version of Rotate virtual void Rotate(TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { //const unsigned int LocalSize = rLocalVector.size(); // We expect this to work both with elements (4 nodes) and conditions (3 nodes) unsigned int Index = 0; if (rLocalVector.size() > 0) { if(mBlockSize != mDomainSize) //Monolithic case { for(unsigned int j = 0; j < rGeometry.PointsNumber(); ++j) { if( this->IsSlip(rGeometry[j]) ) { if(mDomainSize == 3) { array_1d<double,4> aux,aux1; BoundedMatrix<double,4,4> rRot; LocalRotationOperator3D<4>(rRot,rGeometry[j]); for(unsigned int k=0; k<4; k++) aux[k] = rLocalVector[j*mBlockSize+k]; noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<4; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } else { array_1d<double,3> aux,aux1; BoundedMatrix<double,3,3> rRot; LocalRotationOperator2D<3>(rRot,rGeometry[j]); for(unsigned int k=0; k<3; k++) { aux[k] = rLocalVector[j*mBlockSize+k]; } noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<3; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } } Index += mBlockSize; } } else //fractional step case { for(unsigned int j = 0; j < rGeometry.PointsNumber(); ++j) { if( this->IsSlip(rGeometry[j]) ) { if(mDomainSize == 3) { array_1d<double,3> aux,aux1; BoundedMatrix<double,3,3> rRot; LocalRotationOperatorPure(rRot,rGeometry[j]); for(unsigned int k=0; k<3; k++) aux[k] = rLocalVector[j*mBlockSize+k]; noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<3; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } else { array_1d<double,2> aux,aux1; BoundedMatrix<double,2,2> rRot; LocalRotationOperatorPure(rRot,rGeometry[j]); for(unsigned int k=0; k<2; k++) aux[k] = rLocalVector[j*mBlockSize+k]; noalias(aux1) = prod(rRot,aux); for(unsigned int k=0; k<2; k++) rLocalVector[j*mBlockSize+k] = aux1[k]; } } Index += mBlockSize; } } } } /// Apply slip boundary conditions to the rotated local contributions. /** This function takes the local system contributions rotated so each node's velocities are expressed using a base oriented with its normal and imposes that the normal velocity is equal to the mesh velocity in the normal direction. */ virtual void ApplySlipCondition(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { const unsigned int LocalSize = rLocalVector.size(); // We expect this to work both with elements (4 nodes) and conditions (3 nodes) if (LocalSize > 0) { for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode) { if( this->IsSlip(rGeometry[itNode])) { // We fix the first dof (normal velocity) for each rotated block unsigned int j = itNode * mBlockSize; //const double k = rLocalMatrix(j,j)+rLocalMatrix(j,j+1)+rLocalMatrix(j,j+2); // If the mesh is moving, we must impose v_normal = vmesh_normal array_1d<double,3> VMesh = rGeometry[itNode].FastGetSolutionStepValue(MESH_VELOCITY); VMesh -= rGeometry[itNode].FastGetSolutionStepValue(VELOCITY); array_1d<double,3> rN = rGeometry[itNode].FastGetSolutionStepValue(NORMAL); this->Normalize(rN); for( unsigned int i = 0; i < j; ++i)// Skip term (i,i) { rLocalMatrix(i,j) = 0.0; rLocalMatrix(j,i) = 0.0; } for( unsigned int i = j+1; i < LocalSize; ++i) { rLocalMatrix(i,j) = 0.0; rLocalMatrix(j,i) = 0.0; } rLocalVector(j) = inner_prod(rN,VMesh); rLocalMatrix(j,j) = 1.0; } } } } /// RHS only version of ApplySlipCondition virtual void ApplySlipCondition(TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { if (rLocalVector.size() > 0) { for(unsigned int itNode = 0; itNode < rGeometry.PointsNumber(); ++itNode) { if( this->IsSlip(rGeometry[itNode]) ) { // We fix the first dof (normal velocity) for each rotated block unsigned int j = itNode * mBlockSize; // If the mesh is moving, we must impose v_normal = vmesh_normal array_1d<double,3> VMesh = rGeometry[itNode].FastGetSolutionStepValue(MESH_VELOCITY); VMesh -= rGeometry[itNode].FastGetSolutionStepValue(VELOCITY); array_1d<double,3> rN = rGeometry[itNode].FastGetSolutionStepValue(NORMAL); this->Normalize(rN); rLocalVector[j] = inner_prod(rN,VMesh); } } } } /// Transform nodal velocities to the rotated coordinates (aligned with each node's normal) virtual void RotateVelocities(ModelPart& rModelPart) const { TLocalVectorType Vel(mDomainSize); TLocalVectorType Tmp(mDomainSize); ModelPart::NodeIterator it_begin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(Vel,Tmp) for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++) { ModelPart::NodeIterator itNode = it_begin+iii; if( this->IsSlip(*itNode) ) { //this->RotationOperator<TLocalMatrixType>(Rotation,); if(mDomainSize == 3) { BoundedMatrix<double,3,3> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 3; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(rRot,Vel); for(unsigned int i = 0; i < 3; i++) rVelocity[i] = Tmp[i]; } else { BoundedMatrix<double,2,2> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 2; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(rRot,Vel); for(unsigned int i = 0; i < 2; i++) rVelocity[i] = Tmp[i]; } } } } /// Transform nodal velocities from the rotated system to the original one virtual void RecoverVelocities(ModelPart& rModelPart) const { TLocalVectorType Vel(mDomainSize); TLocalVectorType Tmp(mDomainSize); ModelPart::NodeIterator it_begin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(Vel,Tmp) for(int iii=0; iii<static_cast<int>(rModelPart.Nodes().size()); iii++) { ModelPart::NodeIterator itNode = it_begin+iii; if( this->IsSlip(*itNode) ) { if(mDomainSize == 3) { BoundedMatrix<double,3,3> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 3; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(trans(rRot),Vel); for(unsigned int i = 0; i < 3; i++) rVelocity[i] = Tmp[i]; } else { BoundedMatrix<double,2,2> rRot; LocalRotationOperatorPure(rRot,*itNode); array_1d<double,3>& rVelocity = itNode->FastGetSolutionStepValue(VELOCITY); for(unsigned int i = 0; i < 2; i++) Vel[i] = rVelocity[i]; noalias(Tmp) = prod(trans(rRot),Vel); for(unsigned int i = 0; i < 2; i++) rVelocity[i] = Tmp[i]; } } } } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "CoordinateTransformationUtils"; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "CoordinateTransformationUtils"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ template<unsigned int TDim, unsigned int TBlockSize, unsigned int TSkip = 0> void RotateAux(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { const unsigned int LocalSize = rLocalVector.size(); unsigned int Index = 0; int rotations_needed = 0; const unsigned int NumBlocks = LocalSize / TBlockSize; DenseVector<bool> NeedRotation( NumBlocks, false); std::vector< BoundedMatrix<double,TBlockSize,TBlockSize> > rRot(NumBlocks); for(unsigned int j = 0; j < NumBlocks; ++j) { if( this->IsSlip(rGeometry[j]) ) { NeedRotation[j] = true; rotations_needed++; if (TDim == 2) LocalRotationOperator2D<TBlockSize,TSkip>(rRot[j],rGeometry[j]); else LocalRotationOperator3D<TBlockSize,TSkip>(rRot[j],rGeometry[j]); } Index += TBlockSize; } if(rotations_needed > 0) { BoundedMatrix<double,TBlockSize,TBlockSize> mat_block, tmp; array_1d<double,TBlockSize> aux, aux1; for(unsigned int i=0; i<NumBlocks; i++) { if(NeedRotation[i] == true) { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); noalias(mat_block) = prod(rRot[i],tmp); WriteBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); } else { ReadBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); noalias(tmp) = prod(rRot[i],mat_block); WriteBlockMatrix<TBlockSize>(tmp, rLocalMatrix, i*TBlockSize, j*TBlockSize); } } for(unsigned int k=0; k<TBlockSize; k++) aux[k] = rLocalVector[i*TBlockSize+k]; noalias(aux1) = prod(rRot[i],aux); for(unsigned int k=0; k<TBlockSize; k++) rLocalVector[i*TBlockSize+k] = aux1[k]; } else { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TBlockSize>(mat_block, rLocalMatrix, i*TBlockSize, j*TBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); WriteBlockMatrix<TBlockSize>(tmp, rLocalMatrix, i*TBlockSize, j*TBlockSize); } } } } } } //to be used when there is only velocity (no additional pressure or other var block) template<unsigned int TDim> void RotateAuxPure(TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const { const unsigned int LocalSize = rLocalVector.size(); unsigned int Index = 0; int rotations_needed = 0; const unsigned int NumBlocks = LocalSize / mBlockSize; DenseVector<bool> NeedRotation( NumBlocks, false); std::vector< BoundedMatrix<double,TDim,TDim> > rRot(NumBlocks); for(unsigned int j = 0; j < NumBlocks; ++j) { if( this->IsSlip(rGeometry[j]) ) { NeedRotation[j] = true; rotations_needed++; LocalRotationOperatorPure(rRot[j],rGeometry[j]); } Index += mBlockSize; } if(rotations_needed > 0) { BoundedMatrix<double,TDim,TDim> mat_block, tmp; array_1d<double,TDim> aux, aux1; for(unsigned int i=0; i<NumBlocks; i++) { if(NeedRotation[i] == true) { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); noalias(mat_block) = prod(rRot[i],tmp); WriteBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); } else { ReadBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); noalias(tmp) = prod(rRot[i],mat_block); WriteBlockMatrix<TDim>(tmp, rLocalMatrix, i*mBlockSize, j*mBlockSize); } } for(unsigned int k=0; k<TDim; k++) aux[k] = rLocalVector[i*mBlockSize+k]; noalias(aux1) = prod(rRot[i],aux); for(unsigned int k=0; k<TDim; k++) rLocalVector[i*mBlockSize+k] = aux1[k]; } else { for(unsigned int j=0; j<NumBlocks; j++) { if(NeedRotation[j] == true) { ReadBlockMatrix<TDim>(mat_block, rLocalMatrix, i*mBlockSize, j*mBlockSize); noalias(tmp) = prod(mat_block,trans(rRot[j])); WriteBlockMatrix<TDim>(tmp, rLocalMatrix, i*mBlockSize, j*mBlockSize); } } } } } } template<unsigned int TBlockSize, unsigned int TSkip = 0> void LocalRotationOperator2D( BoundedMatrix<double,TBlockSize,TBlockSize>& rRot, GeometryType::PointType& rThisPoint) const { noalias(rRot) = IdentityMatrix(TBlockSize); // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1]; aux = sqrt(aux); rRot(TSkip ,TSkip ) = rNormal[0]/aux; rRot(TSkip ,TSkip+1) = rNormal[1]/aux; rRot(TSkip+1,TSkip ) = -rNormal[1]/aux; rRot(TSkip+1,TSkip+1) = rNormal[0]/aux; } template<unsigned int TBlockSize, unsigned int TSkip = 0> void LocalRotationOperator3D( BoundedMatrix<double,TBlockSize,TBlockSize>& rRot, GeometryType::PointType& rThisPoint) const { noalias(rRot) = IdentityMatrix(TBlockSize); // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1] + rNormal[2]*rNormal[2]; aux = sqrt(aux); rRot(TSkip,TSkip ) = rNormal[0]/aux; rRot(TSkip,TSkip+1) = rNormal[1]/aux; rRot(TSkip,TSkip+2) = rNormal[2]/aux; // Define the new coordinate system, where the first vector is aligned with the normal // To choose the remaining two vectors, we project the first component of the cartesian base to the tangent plane array_1d<double,3> rT1; rT1(0) = 1.0; rT1(1) = 0.0; rT1(2) = 0.0; double dot = rRot(TSkip,TSkip);//this->Dot(rN,rT1); // It is possible that the normal is aligned with (1,0,0), resulting in norm(rT1) = 0 // If this is the case, repeat the procedure using (0,1,0) if ( fabs(dot) > 0.99 ) { rT1(0) = 0.0; rT1(1) = 1.0; rT1(2) = 0.0; dot = rRot(TSkip,TSkip+1); //this->Dot(rN,rT1); } // calculate projection and normalize rT1[0] -= dot*rRot(TSkip,TSkip); rT1[1] -= dot*rRot(TSkip,TSkip+1); rT1[2] -= dot*rRot(TSkip,TSkip+2); this->Normalize(rT1); rRot(TSkip+1,TSkip ) = rT1[0]; rRot(TSkip+1,TSkip+1) = rT1[1]; rRot(TSkip+1,TSkip+2) = rT1[2]; // The third base component is choosen as N x T1, which is normalized by construction rRot(TSkip+2,TSkip ) = rRot(TSkip,TSkip+1)*rT1[2] - rRot(TSkip,TSkip+2)*rT1[1]; rRot(TSkip+2,TSkip+1) = rRot(TSkip,TSkip+2)*rT1[0] - rRot(TSkip,TSkip )*rT1[2]; rRot(TSkip+2,TSkip+2) = rRot(TSkip,TSkip )*rT1[1] - rRot(TSkip,TSkip+1)*rT1[0]; } void LocalRotationOperatorPure( BoundedMatrix<double,3,3>& rRot, const GeometryType::PointType& rThisPoint) const { // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1] + rNormal[2]*rNormal[2]; aux = sqrt(aux); rRot(0,0) = rNormal[0]/aux; rRot(0,1) = rNormal[1]/aux; rRot(0,2) = rNormal[2]/aux; // Define the new coordinate system, where the first vector is aligned with the normal // To choose the remaining two vectors, we project the first component of the cartesian base to the tangent plane array_1d<double,3> rT1; rT1(0) = 1.0; rT1(1) = 0.0; rT1(2) = 0.0; double dot = rRot(0,0);//this->Dot(rN,rT1); // It is possible that the normal is aligned with (1,0,0), resulting in norm(rT1) = 0 // If this is the case, repeat the procedure using (0,1,0) if ( fabs(dot) > 0.99 ) { rT1(0) = 0.0; rT1(1) = 1.0; rT1(2) = 0.0; dot = rRot(0,1); //this->Dot(rN,rT1); } // calculate projection and normalize rT1[0] -= dot*rRot(0,0); rT1[1] -= dot*rRot(0,1); rT1[2] -= dot*rRot(0,2); this->Normalize(rT1); rRot(1,0) = rT1[0]; rRot(1,1) = rT1[1]; rRot(1,2) = rT1[2]; // The third base component is choosen as N x T1, which is normalized by construction rRot(2,0) = rRot(0,1)*rT1[2] - rRot(0,2)*rT1[1]; rRot(2,1) = rRot(0,2)*rT1[0] - rRot(0,0)*rT1[2]; rRot(2,2) = rRot(0,0)*rT1[1] - rRot(0,1)*rT1[0]; } void LocalRotationOperatorPure( BoundedMatrix<double,2,2>& rRot, const GeometryType::PointType& rThisPoint) const { // Get the normal evaluated at the node const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); double aux = rNormal[0]*rNormal[0] + rNormal[1]*rNormal[1]; aux = sqrt(aux); rRot(0,0) = rNormal[0]/aux; rRot(0,1) = rNormal[1]/aux; rRot(1,0) = -rNormal[1]/aux; rRot(1,1) = rNormal[0]/aux; } bool IsSlip(const Node<3>& rNode) const { return rNode.Is(mrFlag); } /// Normalize a vector. /** * @param rThis the vector * @return Original norm of the input vector */ template< class TVectorType > double Normalize(TVectorType& rThis) const { double Norm = 0; for(typename TVectorType::iterator iComponent = rThis.begin(); iComponent < rThis.end(); ++iComponent) Norm += (*iComponent)*(*iComponent); Norm = sqrt(Norm); for(typename TVectorType::iterator iComponent = rThis.begin(); iComponent < rThis.end(); ++iComponent) *iComponent /= Norm; return Norm; } ///@} ///@name Protected Access ///@{ unsigned int GetDomainSize() const { return mDomainSize; } unsigned int GetBlockSize() const { return mBlockSize; } ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ /// Number of spatial dimensions const unsigned int mDomainSize; /// Number of matrix or vector rows associated to each node. /** @note Velocity Dofs are assumed to be the first mDomainSize rows. */ const unsigned int mBlockSize; const Kratos::Flags& mrFlag; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ // /// Compute a rotation matrix to transform values from the cartesian base to one oriented with the node's normal // /** // * The normal is read from solution step data NORMAL. Use NormalCalculationUtils::CalculateOnSimplex to // * obtain and store the nodal normal from the normals of the model's conditons. // * @param rRot The rotation matrix (output) // * @param rThisPoint The point used to orient the new coordinate system. // * @see NormalCalculationUtils // */ // template<class TMatrixType> // void RotationOperator(TMatrixType& rRot, // GeometryType::PointType& rThisPoint) const // { // typedef boost::numeric::ublas::matrix_row<TMatrixType> ThisRowType; // // Get the normal evaluated at the node // const array_1d<double,3>& rNormal = rThisPoint.FastGetSolutionStepValue(NORMAL); // // if(mDomainSize == 3) // { // // Define the new coordinate system, where the first vector is aligned with the normal // ThisRowType rN(rRot,0); // for( unsigned int i = 0; i < 3; ++i) // rN[i] = rNormal[i]; // this->Normalize(rN); // // // To choose the remaining two vectors, we project the first component of the cartesian base to the tangent plane // ThisRowType rT1(rRot,1); // rT1(0) = 1.0; // rT1(1) = 0.0; // rT1(2) = 0.0; // // double dot = this->Dot(rN,rT1); // // // It is possible that the normal is aligned with (1,0,0), resulting in norm(rT1) = 0 // // If this is the case, repeat the procedure using (0,1,0) // if ( fabs(dot) > 0.99 ) // { // rT1(0) = 0.0; // rT1(1) = 1.0; // rT1(2) = 0.0; // // dot = this->Dot(rN,rT1); // } // // // calculate projection and normalize // rT1 -= dot * rN; // this->Normalize(rT1); // // // The third base component is choosen as N x T1, which is normalized by construction // ThisRowType rT2(rRot,2); // rT2(0) = rN(1)*rT1(2) - rN(2)*rT1(1); // rT2(1) = rN(2)*rT1(0) - rN(0)*rT1(2); // rT2(2) = rN(0)*rT1(1) - rN(1)*rT1(0); // } // else //if(mDomainSize == 2) // { // /* The basis for the new coordinate system is (normal,tangent) // Tangent vector is chosen (-normal_y, normal_x) so that the resulting base // is right-handed. // */ // ThisRowType rN(rRot,0); // ThisRowType rT(rRot,1); // // rN[0] = rNormal[0]; // rN[1] = rNormal[1]; // this->Normalize(rN); // rT[0] = -rN[1]; // rT[1] = rN[0]; // } // // } template< class TVectorType > double Dot(const TVectorType& rV1, const TVectorType& rV2) const { double dot = 0.0; for( typename TVectorType::const_iterator iV1 = rV1.begin(),iV2 = rV2.begin(); iV1 != rV1.end(); ++iV1, ++iV2) { dot += (*iV1) * (*iV2); } return dot; } inline double VectorNormDerivative( const double ValueNorm, const array_1d<double, 3>& rValue, const array_1d<double, 3>& rValueDerivative) const { return inner_prod(rValue, rValueDerivative) / ValueNorm; } inline array_1d<double, 3> UnitVectorDerivative( const double VectorNorm, const double VectorNormDerivative, const array_1d<double, 3>& rVector, const array_1d<double, 3>& rVectorDerivative) const { return (rVectorDerivative * VectorNorm - rVector * VectorNormDerivative) / std::pow(VectorNorm, 2); } /// Transform a local contribution from cartesian coordinates to rotated ones // void ApplyRotation(TLocalMatrixType& rMatrix, // const TLocalMatrixType& rRotation) const // { // // compute B = R*A*transpose(R) // const unsigned int LocalSize = rMatrix.size1(); // const unsigned int NumBlocks = LocalSize / mBlockSize; // //TLocalMatrixType Tmp = ZeroMatrix(LocalSize,LocalSize); // /* // for (unsigned int iBlock = 0; iBlock < NumBlocks; iBlock++) // { // for (unsigned int jBlock = 0; jBlock < NumBlocks; jBlock++) // { // for (unsigned int i = iBlock*mBlockSize; i < (iBlock+1)*mBlockSize; i++) // { // for(unsigned int j = jBlock*mBlockSize; j < (jBlock+1)*mBlockSize; j++) // { // double& tij = Tmp(i,j); // for(unsigned int k = iBlock*mBlockSize; k < (iBlock+1)*mBlockSize; k++) // { // for(unsigned int l = jBlock*mBlockSize; l < (jBlock+1)*mBlockSize; l++) // { // tij += rRotation(i,k)*rMatrix(k,l)*rRotation(j,l); // } // } // } // } // } // }*/ // // Matrix Tmp = prod(rMatrix,trans(rRotation)); // noalias(rMatrix) = prod(rRotation,Tmp); // // // noalias(rMatrix) = Tmp; // } //auxiliary functions template< unsigned int TBlockSize > void ReadBlockMatrix( BoundedMatrix<double,TBlockSize, TBlockSize>& block, const Matrix& origin, const unsigned int Ibegin, const unsigned int Jbegin) const { for(unsigned int i=0; i<TBlockSize; i++) { for(unsigned int j=0; j<TBlockSize; j++) { block(i,j) = origin(Ibegin+i, Jbegin+j); } } } template< unsigned int TBlockSize > void WriteBlockMatrix( const BoundedMatrix<double,TBlockSize, TBlockSize>& block, Matrix& destination, const unsigned int Ibegin, const unsigned int Jbegin) const { for(unsigned int i=0; i<TBlockSize; i++) { for(unsigned int j=0; j<TBlockSize; j++) { destination(Ibegin+i, Jbegin+j) = block(i,j); } } } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. CoordinateTransformationUtils& operator=(CoordinateTransformationUtils const& rOther) {} /// Copy constructor. CoordinateTransformationUtils(CoordinateTransformationUtils const& rOther) {} ///@} }; ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TLocalMatrixType, class TLocalVectorType, class TValueType> inline std::istream& operator >>(std::istream& rIStream, CoordinateTransformationUtils<TLocalMatrixType, TLocalVectorType, TValueType>& rThis) { return rIStream; } /// output stream function template<class TLocalMatrixType, class TLocalVectorType, class TValueType> inline std::ostream& operator <<(std::ostream& rOStream, const CoordinateTransformationUtils<TLocalMatrixType, TLocalVectorType, TValueType>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } #endif // KRATOS_COORDINATE_TRANSFORMATION_UTILITIES_H
omptough.c
#include <pthread.h> #include <stdlib.h> #include <malloc.h> #include <unistd.h> #include <stdio.h> #include <omp.h> #include "papi_test.h" #define NITER (100000) int main( int argc, char *argv[] ) { int i; int ret; int nthreads; int *evtset; int *ctrcode; nthreads = omp_get_max_threads( ); evtset = ( int * ) malloc( sizeof ( int ) * nthreads ); ctrcode = ( int * ) malloc( sizeof ( int ) * nthreads ); tests_quiet( argc, argv ); /* Set TESTS_QUIET variable */ ret = PAPI_library_init( PAPI_VER_CURRENT ); if ( ret != PAPI_VER_CURRENT && ret > 0 ) { fprintf( stderr, "PAPI library version mismatch '%s'\n", PAPI_strerror( ret ) ); exit( 1 ); } if ( ret < 0 ) { fprintf( stderr, "PAPI initialization error '%s'\n", PAPI_strerror( ret ) ); exit( 1 ); } if ( ( ret = PAPI_thread_init( ( unsigned long ( * )( void ) ) pthread_self ) ) != PAPI_OK ) { fprintf( stderr, "PAPI thread initialization error '%s'\n", PAPI_strerror( ret ) ); exit( 1 ); } for ( i = 0; i < nthreads; i++ ) { evtset[i] = PAPI_NULL; if ( ( ret = PAPI_event_name_to_code( "PAPI_TOT_INS", &ctrcode[i] ) ) != PAPI_OK ) { fprintf( stderr, "PAPI evt-name-to-code error '%s'\n", PAPI_strerror( ret ) ); } } for ( i = 0; i < NITER; i++ ) { #pragma omp parallel { int tid; int pid; tid = omp_get_thread_num( ); pid = pthread_self( ); if ( ( ret = PAPI_register_thread( ) ) != PAPI_OK ) { if ( !TESTS_QUIET ) { fprintf( stderr, "[%5d] Error in register thread (tid=%d pid=%d) '%s'\n", i, tid, pid, PAPI_strerror( ret ) ); test_fail( __FILE__, __LINE__, "omptough", 1 ); } } evtset[tid] = PAPI_NULL; if ( ( ret = PAPI_create_eventset( &( evtset[tid] ) ) ) != PAPI_OK ) { if ( !TESTS_QUIET ) { fprintf( stderr, "[%5d] Error creating eventset (tid=%d pid=%d) '%s'\n", i, tid, pid, PAPI_strerror( ret ) ); test_fail( __FILE__, __LINE__, "omptough", 1 ); } } if ( ( ret = PAPI_destroy_eventset( &( evtset[tid] ) ) ) != PAPI_OK ) { if ( !TESTS_QUIET ) { fprintf( stderr, "[%5d] Error destroying eventset (tid=%d pid=%d) '%s'\n", i, tid, pid, PAPI_strerror( ret ) ); evtset[tid] = PAPI_NULL; test_fail( __FILE__, __LINE__, "omptough", 1 ); } } if ( ( ret = PAPI_unregister_thread( ) ) != PAPI_OK ) { if ( !TESTS_QUIET ) { fprintf( stderr, "[%5d] Error in unregister thread (tid=%d pid=%d) ret='%s'\n", i, tid, pid, PAPI_strerror( ret ) ); test_fail( __FILE__, __LINE__, "omptough", 1 ); } } } } test_pass( __FILE__, NULL, 0 ); exit( 1 ); }
openmp_wrapper.h
/*! * Copyright (c) 2017 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_OPENMP_WRAPPER_H_ #define LIGHTGBM_OPENMP_WRAPPER_H_ #ifdef _OPENMP #include <omp.h> #include <LightGBM/utils/log.h> #include <exception> #include <memory> #include <mutex> #include <stdexcept> #include <vector> class ThreadExceptionHelper { public: ThreadExceptionHelper() { ex_ptr_ = nullptr; } ~ThreadExceptionHelper() { ReThrow(); } void ReThrow() { if (ex_ptr_ != nullptr) { std::rethrow_exception(ex_ptr_); } } void CaptureException() { // only catch first exception. if (ex_ptr_ != nullptr) { return; } std::unique_lock<std::mutex> guard(lock_); if (ex_ptr_ != nullptr) { return; } ex_ptr_ = std::current_exception(); } private: std::exception_ptr ex_ptr_; std::mutex lock_; }; #define OMP_INIT_EX() ThreadExceptionHelper omp_except_helper #define OMP_LOOP_EX_BEGIN() try { #define OMP_LOOP_EX_END() } \ catch(std::exception& ex) { Log::Warning(ex.what()); omp_except_helper.CaptureException(); } \ catch(...) { omp_except_helper.CaptureException(); } #define OMP_THROW_EX() omp_except_helper.ReThrow() #else #ifdef _MSC_VER #pragma warning(disable: 4068) // disable unknown pragma warning #endif #ifdef __cplusplus extern "C" { #endif /** Fall here if no OPENMP support, so just simulate a single thread running. All #pragma omp should be ignored by the compiler **/ inline void omp_set_num_threads(int) {} inline int omp_get_num_threads() {return 1;} inline int omp_get_thread_num() {return 0;} #ifdef __cplusplus }; // extern "C" #endif #define OMP_INIT_EX() #define OMP_LOOP_EX_BEGIN() #define OMP_LOOP_EX_END() #define OMP_THROW_EX() #endif #endif /* LIGHTGBM_OPENMP_WRAPPER_H_ */
simd-clones-5.c
/* { dg-do compile { target i?86-*-* x86_64-*-* } } */ /* { dg-options "-fopenmp -w" } */ /* ?? The -w above is to inhibit the following warning for now: a.c:2:6: warning: AVX vector argument without AVX enabled changes the ABI. */ #pragma omp declare simd notinbranch simdlen(4) void foo (int *a) { *a = 555; }
convolution_1x1_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_pack1to4_int8_neon(bottom_im2col, top_blob, kernel, opt); } static void conv1x1s2_sgemm_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2 * outw + w; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const signed char* r0 = bottom_blob.channel(p); signed char* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { outptr[0] = r0[0]; outptr[1] = r0[2]; outptr[2] = r0[4]; outptr[3] = r0[6]; r0 += 8; outptr += 4; } for (; j + 1 < outw; j += 2) { outptr[0] = r0[0]; outptr[1] = r0[2]; r0 += 4; outptr += 2; } for (; j < outw; j++) { outptr[0] = r0[0]; r0 += 2; outptr += 1; } r0 += tailstep; } } conv1x1s1_sgemm_pack1to4_int8_neon(bottom_blob_shrinked, top_blob, kernel, opt); }
GB_unaryop__lnot_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_uint32 // op(A') function: GB_tran__lnot_bool_uint32 // C type: bool // A type: uint32_t // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_uint32 ( bool *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nbody.c
// // Torbert, 3.12.2009 // // OpenGL Demo, 3-D Example // #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <GL/glut.h> #include <time.h> #include <ctype.h> #define N 3000//N is the max amount of bodies #define G .005 /////////////////////////////// double pi=3.1415926; int w=640,h=480; int currentindex=0; double rho,phi,theta,up=1.0; // the center of the screen double xc,yc,zc; double xe,ye,ze; int collision=0; int centered=0; /////////////////////////////// typedef struct { double mass; double radius; double x; double y; double z; double xv; double yv; double zv; double r; double g; double b; int flag; } Body; Body bodies[N]; double t=0.0; int previousx; int previousy; double dt=.01; double dist(double x1, double y1, double z1, double x2, double y2, double z2) { double xpart=x1-x2; double ypart=y1-y2; double zpart=z1-z2; return sqrt( xpart*xpart + ypart*ypart + zpart*zpart); } void display(void) { // double t; // clear the screen glClear(GL_COLOR_BUFFER_BIT); glColor3f(0.0,0.0,0.0); //draw lines for stuff glBegin(GL_LINES); glVertex3f(0.0f+xc, 0.0f+yc, 0.0f+zc); glVertex3f(50.0f+xc, 0.0f+yc, 0.0f+zc); glVertex3f(0.0f+xc, 0.0f+yc, 0.0f+zc); glVertex3f(0.0f+xc, 10.0f+yc, 0.0f+zc); glVertex3f(0.0f+xc, 0.0f+yc, 0.0f+zc); glVertex3f(0.0f+xc, 0.0f+yc, 10.0f+zc); glEnd(); glRasterPos3f(50.0f+xc, 0.0f+yc, 0.0f+zc); glutBitmapString(GLUT_BITMAP_HELVETICA_18,"x"); glRasterPos3f(40.0f+xc, -2.0f+yc, 0.0f+zc); glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24,"w"); glRasterPos3f(10.0f+xc, -2.0f+yc, 0.0f+zc); glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24,"s"); glRasterPos3f(0.0f+xc, 50.0f+yc, 0.0f+zc); glutBitmapString(GLUT_BITMAP_HELVETICA_18,"y"); glRasterPos3f(-2.0f+xc, 40.0f+yc, 0.0f+zc); glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24,"e"); glRasterPos3f(-2.0f+xc, 10.0f+yc, 0.0f+zc); glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24,"d"); glRasterPos3f(0.0f+xc, 0.0f+yc, 50.0f+zc); glutBitmapString(GLUT_BITMAP_HELVETICA_18,"z"); glRasterPos3f(0.0f+xc, -2.0f+yc, 40.0f+zc); glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24,"r"); glRasterPos3f(0.0f+xc, -2.0f+yc, 10.0f+zc); glutBitmapString(GLUT_BITMAP_TIMES_ROMAN_24,"f"); int i; for(i=0;i<currentindex;i++) { int q; double fx=0; double fy=0; double fz=0; for(q=0; q<currentindex;q++) { if(q!=i) { double d=dist(bodies[i].x,bodies[i].y, bodies[i].z,bodies[q].x, bodies[q].y,bodies[q].z); double top=(G*bodies[q].mass); double magnitude=top/(d); fx-=magnitude*((bodies[i].x-bodies[q].x)/d); fy-=magnitude*((bodies[i].y-bodies[q].y)/d); fz-=magnitude*((bodies[i].z-bodies[q].z)/d); if(d< bodies[i].radius*7/8+bodies[q].radius*7/8 && bodies[i].flag==0 && collision==0) { if(bodies[q].mass<bodies[i].mass) bodies[q].flag=1; else bodies[i].flag=1; } } } //get correct direction bodies[i].xv+=fx*dt; bodies[i].yv+=fy*dt; bodies[i].zv+=fz*dt; bodies[i].x+=bodies[i].xv*dt; bodies[i].y+=bodies[i].yv*dt; bodies[i].z+=bodies[i].zv*dt; int m=(int)bodies[i].mass; // glColor3f((double)(m%10000)/10000.0,(double)(m%1000)/1000.0,(double)(m%100)/100.0); glColor3f(bodies[i].r,bodies[i].g,bodies[i].b); glMatrixMode(GL_MODELVIEW); glPushMatrix(); glTranslatef(bodies[i].x,bodies[i].y,bodies[i].z); glutWireSphere(bodies[i].radius, 10, 10); glPopMatrix(); } int flagged=0; if( collision==0) #pragma omp for private(i) for(i=0;i<currentindex;i++) { if(bodies[i].flag==1) { // printf("%d\n", bodies[i].flag); flagged++; int q; for(q=0; q<currentindex;q++) { if(q!=i) { double d=dist(bodies[i].x,bodies[i].y, bodies[i].z,bodies[q].x, bodies[q].y,bodies[q].z); if(d< bodies[i].radius*7/8+bodies[q].radius*7/8 && bodies[q].flag==0) { bodies[q].mass+=bodies[i].mass; bodies[q].radius+= pow((4/3*pi*bodies[i].radius*bodies[i].radius+4/3*pi*bodies[q].radius*bodies[q].radius)/(4/3),1/10); bodies[q].xv+=bodies[i].xv*bodies[i].mass/bodies[q].mass; bodies[q].yv+=bodies[i].yv*bodies[i].mass/bodies[q].mass; bodies[q].zv+=bodies[i].zv*bodies[i].mass/bodies[q].mass; bodies[q].r=(bodies[q].r+bodies[i].r)/2; bodies[q].g=(bodies[q].g+bodies[i].g)/2; bodies[q].b=(bodies[q].b+bodies[i].b)/2; bodies[i].r=(bodies[q].r+bodies[i].r)/2; bodies[i].g=(bodies[q].g+bodies[i].g)/2; bodies[i].b=(bodies[q].b+bodies[i].b)/2; } } } } } int count=0; if( collision==0) for(i=0; i<currentindex; i++) { if(bodies[i].flag==0){ bodies[count].mass =bodies[i].mass; bodies[count].radius =bodies[i].radius; bodies[count].x =bodies[i].x; bodies[count].y =bodies[i].y; bodies[count].z =bodies[i].z; bodies[count].xv =bodies[i].xv; bodies[count].yv =bodies[i].yv; bodies[count].zv =bodies[i].zv; bodies[count].r =bodies[i].r; bodies[count].g =bodies[i].g; bodies[count].b =bodies[i].b; bodies[count].flag=0; count++; } } currentindex-=flagged; // glutWireSphere(.5, 20, 16); t+=0.001; // printf("%f\n",t); glutSwapBuffers(); } void look() { xe=xc+rho*sin(theta)*sin(phi); // y ye=yc+rho*cos(theta); // z ze=zc+rho*sin(theta)*cos(phi); // x glMatrixMode(GL_MODELVIEW); glLoadIdentity(); gluLookAt(xe,ye,ze, xc,yc,zc, 0.0,up,0.0); } void idle(void) { if(up>0.0) { if(theta+0.001>pi) { up*=-1.0; theta=-pi; } if(theta-0.001<0) { up*=-1.0; } } else{ if(theta+0.001>0.0) up*=-1.0; if(theta-0.001<-pi) { theta=pi; } } if(centered==1) { double topx=0; double topy=0; double topz=0; double total=0; int in; for(in=0;in<currentindex;in++) { double x=bodies[in].x; double y=bodies[in].y; double z=bodies[in].z; double m=bodies[in].mass; topx+=x*m; topy+=y*m; topz+=z*m; total+=m; } xc=topx/total; yc=topy/total; zc=topz/total; } look(); glutPostRedisplay(); } void mouse(int button,int state,int xscr,int yscr) { if(button==0 && state==0) { previousx=xscr; previousy=yscr; } } void motion(int xscr,int yscr) { int deltax=xscr-previousx; int deltay=yscr-previousy; previousx=xscr; previousy=yscr; theta += (double)deltay/100.0; phi += (double)deltax/100.0; } void mouse_wheel(int wheel,int direction,int xscr,int yscr) { // printf("%d\n", direction); if(direction==-1) rho+=2; else rho-=2; if(rho<0.1) rho=0.1; } int move_val = 10; void keyfunc(unsigned char key,int xscr,int yscr) { if(key=='q') { exit(0); } if(key=='w') { xc+=move_val; } if(key=='s') { xc-=move_val; } if(key=='e') { yc+=move_val; } if(key=='d') { yc-=move_val; } if(key=='r') { zc+=move_val; } if(key=='f') { zc-=move_val; } if(key==' ') { xc-=xe/5; yc-=ye/5; zc-=ze/5; } if(key=='c') { centered=(!centered); } } void reshape(int wscr,int hscr) { GLfloat aspect_ratio; w=wscr; h=hscr; aspect_ratio=(GLfloat)w/(GLfloat)h; glViewport(0,0,(GLsizei)w,(GLsizei)h); glMatrixMode(GL_PROJECTION); glLoadIdentity(); gluPerspective(60.0,aspect_ratio,0.1,100000.0); look(); } void makeBody(double m, double r, double xpos, double ypos, double zpos,double i, double j, double k,double rc, double gc, double bc) { if(currentindex==N) { return; } int index=currentindex; bodies[index].mass=m; bodies[index].radius=r; bodies[index].x=xpos; bodies[index].y=ypos; bodies[index].z=zpos; bodies[index].xv=i; bodies[index].yv=j; bodies[index].zv=k; bodies[index].r=rc; bodies[index].g=gc; bodies[index].b=bc; bodies[index].flag=0; currentindex++; } void randombody() { int index; double mx=0; double my=0; double mz=0; for(index=0;index<N;index++) { if(index<N-1) { double m=1; m=rand()%5000; double r=m/1000; double xpos=(rand()%100)-50; // double xpos=0; double ypos=(rand()%100)-50; double zpos=(rand()%100)-50; // double zpos=0; double i=(rand()%20)-10; double j=(rand()%20)-10; double k=(rand()%20)-10; // double i=0; // double j=0; // double k=0; mx+=m*i; my+=m*j; mz+=m*k; double rc=(double)(rand()%256)/256.0; double g=(double)(rand()%256)/256.0; double b=(double)(rand()%256)/256.0; makeBody(m,r,xpos,ypos,zpos,i,j,k,rc,g,b); } else { double m=1; m=rand()%500+500; double r=m/1000; double xpos=(rand()%50)-25; // double xpos=0; double ypos=(rand()%50)-25; double zpos=(rand()%50)-25; // double zpos=0; double i=mx/m; double j=my/m; double k=mz/m; double rc=(double)(rand()%256)/256.0; double g=(double)(rand()%256)/256.0; double b=(double)(rand()%256)/256.0; makeBody(m,r,xpos,ypos,zpos,i,j,k,rc,g,b); } } } void singlebody() { int factor=2000; double m=rand()%factor; double r=m/factor; double xpos=(rand()%500)-250; // double xpos=0; double ypos=(rand()%500)-250; // double zpos=(rand()%100)-50; double zpos=0; double i=(rand()%40)-20; double j=(rand()%40)-20; double k=0; // double k=(rand()%20)-10; double rc=(double)(rand()%256)/256.0; double g=(double)(rand()%256)/256.0; double b=(double)(rand()%256)/256.0; makeBody(m,r,xpos,ypos,zpos,i,j,k,rc,g,b); } void asteroid() { double m=rand()%50; double r=m/10; double xpos=(double)(rand()%400)-200; // double xpos=0; double ypos=(rand()%400)-200; // double zpos=(rand()%100)-50; double zpos=0; double i=(rand()%50)-25; double j=(rand()%50)-25; double k=0; // double k=(rand()%20)-10; double rc=(double)(rand()%256)/256.0; double g=(double)(rand()%256)/256.0; double b=(double)(rand()%256)/256.0; makeBody(m,r,xpos,ypos,zpos,i,j,k,rc,g,b); } void ringsystem(double x, double y,double z, double xv, double yv, double zv, double r, int planets) { double sunmass=100000; double planetmass=100; makeBody(sunmass,40, x, y, z, xv, yv, zv,1,222.0/255.0,0); int e; double ov=10*sqrt((sunmass*sunmass*G)/((planetmass+sunmass)*r)); printf("%f %f\n",ov, sunmass*G); double angle=2*pi/planets; for(e=1;e<=planets;e++) { makeBody(planetmass ,5 ,x+cos(angle*e)*r, y+sin(angle*e)*r, z, xv+cos(angle*e - pi/2)*ov, yv+sin(angle*e - pi/2)*ov, zv,0,0,1); } } int main(int argc,char* argv[]) { int x; srand(time(NULL)); ringsystem(0,0, 0,0,0,0,100,1); ringsystem(0,300, 0,0,0,0,100,1); rho=1000.1; phi=0.0; theta=pi/2.0; xc=yc=zc=0.0; glutInit(&argc,argv); glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB); glutInitWindowSize(w,h); glutInitWindowPosition(100,50); glutCreateWindow("OpenGL Demo"); glClearColor(1.0,1.0,1.0,0.0); glShadeModel(GL_SMOOTH); glutDisplayFunc(display); glutIdleFunc(idle); glutMouseFunc(mouse); glutMotionFunc(motion); glutMouseWheelFunc(mouse_wheel); glutKeyboardFunc(keyfunc); glutReshapeFunc(reshape); glutMainLoop(); return 0; }
element_data_sensitivity_builder.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: // #if !defined(KRATOS_ELEMENT_SENSITIVITY_BUILDER_H_INCLUDED) #define KRATOS_ELEMENT_SENSITIVITY_BUILDER_H_INCLUDED // System includes // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "utilities/openmp_utils.h" #include "response_functions/adjoint_response_function.h" namespace Kratos { ///@name Kratos Classes ///@{ class ElementSensitivityBuilder { public: ///@name Type Definitions ///@{ typedef Geometry<Node<3>> GeometryType; ///@} ///@name Life Cycle ///@{ ElementSensitivityBuilder(ModelPart& rModelPart, AdjointResponseFunction::Pointer pResponseFunction) : mrModelPart(rModelPart), mpResponseFunction(pResponseFunction) { } ///@} ///@name Operations ///@{ void BuildElementSensitivities(std::string const& rVariable, double ScalingFactor = 1.0) { KRATOS_TRY; if (KratosComponents<Variable<double>>::Has(rVariable) == true) { const Variable<double>& r_variable = KratosComponents<Variable<double>>::Get(rVariable); BuildElementSensitivities(r_variable, ScalingFactor); } else if (KratosComponents<Variable<array_1d<double, 3>>>::Has(rVariable) == true) { const Variable<array_1d<double, 3>>& r_variable = KratosComponents<Variable<array_1d<double, 3>>>::Get(rVariable); BuildElementSensitivities(r_variable, ScalingFactor); } else KRATOS_ERROR << "Unsupported variable: " << rVariable << "." << std::endl; KRATOS_CATCH(""); } ///@} private: ///@name Private Operations ///@{ template <class TDataType> void BuildElementSensitivities(Variable<TDataType> const& rVariable, double ScalingFactor) { KRATOS_TRY; auto& r_elements = mrModelPart.Elements(); const auto& r_process_info = mrModelPart.GetProcessInfo(); const int num_threads = OpenMPUtils::GetNumThreads(); std::vector<Vector> local_sensitivity(num_threads); std::vector<Vector> partial_sensitivity(num_threads); std::vector<Vector> adjoint_vector(num_threads); std::vector<Matrix> sensitivity_matrix(num_threads); #pragma omp parallel { ModelPart::ElementIterator elements_begin; ModelPart::ElementIterator elements_end; OpenMPUtils::PartitionedIterators(r_elements, elements_begin, elements_end); int k = OpenMPUtils::ThisThread(); for (auto it = elements_begin; it != elements_end; ++it) { it->CalculateSensitivityMatrix(rVariable, sensitivity_matrix[k], r_process_info); mpResponseFunction->CalculatePartialSensitivity( *it, rVariable, sensitivity_matrix[k], partial_sensitivity[k], r_process_info); it->GetValuesVector(adjoint_vector[k]); if (local_sensitivity[k].size() != sensitivity_matrix[k].size1()) local_sensitivity[k].resize(sensitivity_matrix[k].size1(), false); noalias(local_sensitivity[k]) = ScalingFactor * (prod(sensitivity_matrix[k], adjoint_vector[k]) + partial_sensitivity[k]); AssembleElementSensitivityContribution( rVariable, local_sensitivity[k], *it); } } KRATOS_CATCH(""); } ///@} private: ///@name Member Variables ///@{ ModelPart& mrModelPart; AdjointResponseFunction::Pointer mpResponseFunction; ///@} ///@name Private Operations ///@{ void AssembleElementSensitivityContribution(Variable<double> const& rVariable, Vector const& rSensitivityVector, Element& rElement) { KRATOS_DEBUG_ERROR_IF(rSensitivityVector.size() != 1) << "rSensitivityVector.size() = " << rSensitivityVector.size() << std::endl; rElement.GetValue(rVariable) += rSensitivityVector[0]; } void AssembleElementSensitivityContribution(Variable<array_1d<double, 3>> const& rVariable, Vector const& rSensitivityVector, Element& rElement) { array_1d<double, 3>& r_sensitivity = rElement.GetValue(rVariable); const auto ws_dim = rElement.GetGeometry().WorkingSpaceDimension(); KRATOS_DEBUG_ERROR_IF(rSensitivityVector.size() != ws_dim) << "rSensitivityVector.size() = " << rSensitivityVector.size() << std::endl; for (unsigned d = 0; d < ws_dim; ++d) r_sensitivity[d] += rSensitivityVector[d]; } ///@} }; ///@} // Kratos Classes } /* namespace Kratos.*/ #endif /* KRATOS_ELEMENT_SENSITIVITY_BUILDER_H_INCLUDED defined */
bcnn_mat.c
/* * Copyright (c) 2016-present Jean-Noel Braun. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include "bcnn_mat.h" #include <math.h> #include <bh/bh_log.h> #include <bh/bh_macros.h> #include <bh/bh_mem.h> #include "bcnn/bcnn.h" #include <bh/bh_timer.h> #if (defined(__aarch64__)) #include "openblas/openblas_sgemm.h" #endif int bcnn_fill_f32(int n, float a, float *x) { int i; for (i = 0; i < n; ++i) { x[i] = a; } return 0; } int bcnn_copy_f32(int n, float *x, float *y) { memcpy(y, x, n * sizeof(float)); return 0; } int bcnn_axpy(int n, float a, float *x, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) y[i] += a * x[i]; #else int i, nd, nm; __m256 sum0; __m256 sum1; __m256 reg0, reg1, reg2, reg3; __m256 areg = _mm256_set1_ps(a); __m256 prod; int data_is_aligned = bh_is_aligned32(x) & bh_is_aligned32(y); nd = n / 16 * 16; nm = n % 16; if (data_is_aligned) { for (i = 0; i < nd; i += 16) { reg0 = _mm256_load_ps(x + 0); reg1 = _mm256_load_ps(x + 8); reg2 = _mm256_load_ps(y + 0); reg3 = _mm256_load_ps(y + 8); prod = _mm256_mul_ps(reg0, areg); sum0 = _mm256_add_ps(prod, reg2); prod = _mm256_mul_ps(reg1, areg); sum1 = _mm256_add_ps(prod, reg3); _mm256_store_ps(y + 0, sum0); _mm256_store_ps(y + 8, sum1); x += 16; y += 16; } } else { for (i = 0; i < nd; i += 16) { reg0 = _mm256_loadu_ps(x + 0); reg1 = _mm256_loadu_ps(x + 8); reg2 = _mm256_loadu_ps(y + 0); reg3 = _mm256_loadu_ps(y + 8); prod = _mm256_mul_ps(reg0, areg); sum0 = _mm256_add_ps(prod, reg2); prod = _mm256_mul_ps(reg1, areg); sum1 = _mm256_add_ps(prod, reg3); _mm256_storeu_ps(y + 0, sum0); _mm256_storeu_ps(y + 8, sum1); x += 16; y += 16; } } for (i = 0; i < nm; ++i) y[i] += a * x[i]; #endif return 0; } int bcnn_axpby(int n, float a, float *x, float b, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) y[i] = a * x[i] + b * y[i]; #else int i, nd, nm; __m256 sum0; __m256 sum1; __m256 reg0, reg1, reg2, reg3; __m256 areg = _mm256_set1_ps(a); __m256 breg = _mm256_set1_ps(b); __m256 prod0, prod1; int data_is_aligned = bh_is_aligned32(x) & bh_is_aligned32(y); nd = n / 16 * 16; nm = n % 16; if (data_is_aligned) { for (i = 0; i < nd; i += 16) { reg0 = _mm256_load_ps(x + 0); reg1 = _mm256_load_ps(x + 8); reg2 = _mm256_load_ps(y + 0); reg3 = _mm256_load_ps(y + 8); prod0 = _mm256_mul_ps(reg0, areg); prod1 = _mm256_mul_ps(reg2, breg); sum0 = _mm256_add_ps(prod0, prod1); prod0 = _mm256_mul_ps(reg1, areg); prod1 = _mm256_mul_ps(reg3, breg); sum1 = _mm256_add_ps(prod0, prod1); _mm256_store_ps(y + 0, sum0); _mm256_store_ps(y + 8, sum1); x += 16; y += 16; } } else { for (i = 0; i < nd; i += 16) { reg0 = _mm256_loadu_ps(x + 0); reg1 = _mm256_loadu_ps(x + 8); reg2 = _mm256_loadu_ps(y + 0); reg3 = _mm256_loadu_ps(y + 8); prod0 = _mm256_mul_ps(reg0, areg); prod1 = _mm256_mul_ps(reg2, breg); sum0 = _mm256_add_ps(prod0, prod1); prod0 = _mm256_mul_ps(reg1, areg); prod1 = _mm256_mul_ps(reg3, breg); sum1 = _mm256_add_ps(prod0, prod1); _mm256_storeu_ps(y + 0, sum0); _mm256_storeu_ps(y + 8, sum1); x += 16; y += 16; } } for (i = 0; i < nm; ++i) y[i] = a * x[i] + b * y[i]; #endif return 0; } void bcnn_axpy_strided(int num_batches, float a, float *x, float *y, int stride[2], int x_dim[3], int y_dim[3], int min_dim[3]) { for (int n = 0; n < num_batches; ++n) { for (int k = 0; k < min_dim[0]; ++k) { for (int j = 0; j < min_dim[1]; ++j) { for (int i = 0; i < min_dim[2]; ++i) { int dst_ind = i * stride[0] + y_dim[2] * (j * stride[0] + y_dim[1] * (y_dim[0] * n + k)); int src1_ind = i * stride[1] + x_dim[2] * (j * stride[1] + x_dim[1] * (x_dim[0] * n + k)); y[dst_ind] += a * x[src1_ind]; } } } } } int bcnn_pow(int n, float *x, float a, float *y) { int i; for (i = 0; i < n; ++i) { y[i] = powf(x[i], a); } return 0; } int bcnn_vadd(int n, float *a, float *b, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { y[i] = a[i] + b[i]; } #else int i, nd, nm; __m128 r0, r1, r2, r3; nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(a); r1 = _mm_loadu_ps(a + 4); r2 = _mm_loadu_ps(b); r3 = _mm_loadu_ps(b + 4); r0 = _mm_add_ps(r0, r2); r1 = _mm_add_ps(r1, r3); _mm_storeu_ps(y, r0); _mm_storeu_ps(y + 4, r1); a += 8; b += 8; y += 8; } for (i = 0; i < nm; ++i) { y[i] = a[i] + b[i]; } #endif return 0; } int bcnn_vsub(int n, float *a, float *b, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { y[i] = a[i] - b[i]; } #else int i, nd, nm; __m128 r0, r1, r2, r3; nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(a); r1 = _mm_loadu_ps(a + 4); r2 = _mm_loadu_ps(b); r3 = _mm_loadu_ps(b + 4); r0 = _mm_sub_ps(r0, r2); r1 = _mm_sub_ps(r1, r3); _mm_storeu_ps(y, r0); _mm_storeu_ps(y + 4, r1); a += 8; b += 8; y += 8; } for (i = 0; i < nm; ++i) { y[i] = a[i] - b[i]; } #endif return 0; } int bcnn_vmul(int n, float *a, float *b, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { y[i] = a[i] * b[i]; } #else int i, nd, nm; __m128 r0, r1, r2, r3; nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(a); r1 = _mm_loadu_ps(a + 4); r2 = _mm_loadu_ps(b); r3 = _mm_loadu_ps(b + 4); r0 = _mm_mul_ps(r0, r2); r1 = _mm_mul_ps(r1, r3); _mm_storeu_ps(y, r0); _mm_storeu_ps(y + 4, r1); a += 8; b += 8; y += 8; } for (i = 0; i < nm; ++i) { y[i] = a[i] * b[i]; } #endif return 0; } int bcnn_vdiv(int n, float *a, float *b, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { if (bh_abs(b[i]) > 0.00001f) y[i] = a[i] / b[i]; else y[i] = 0.0f; } #else int i, nd, nm; __m128 r0, r1, r2, r3; nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(a); r1 = _mm_loadu_ps(a + 4); r2 = _mm_loadu_ps(b); r3 = _mm_loadu_ps(b + 4); r0 = _mm_div_ps(r0, r2); r1 = _mm_div_ps(r1, r3); _mm_storeu_ps(y, r0); _mm_storeu_ps(y + 4, r1); a += 8; b += 8; y += 8; } for (i = 0; i < nm; ++i) { if (bh_abs(b[i]) > 0.00001f) y[i] = a[i] / b[i]; else y[i] = 0.0f; } #endif return 0; } int bcnn_scal(int n, float a, float *x) { #ifndef BCNN_USE_AVX int i; if (a == 0.0f) { memset(x, 0, n * sizeof(float)); } else if (a != 1.0f) { for (i = 0; i < n; ++i) x[i] *= a; } #else int i, nd, nm; __m128 reg0, reg1; __m128 areg = _mm_set1_ps(a); __m128 prod; int data_is_aligned = bh_is_aligned32(x); if (a == 0.0f) { memset(x, 0, n * sizeof(float)); } else if (a != 1.0f) { nd = n / 8 * 8; nm = n % 8; if (data_is_aligned) { for (i = 0; i < nd; i += 8) { reg0 = _mm_load_ps(x + 0); reg1 = _mm_load_ps(x + 4); prod = _mm_mul_ps(reg0, areg); _mm_store_ps(x + 0, prod); prod = _mm_mul_ps(reg1, areg); _mm_store_ps(x + 4, prod); x += 8; } } else { for (i = 0; i < nd; i += 8) { reg0 = _mm_loadu_ps(x + 0); reg1 = _mm_loadu_ps(x + 4); prod = _mm_mul_ps(reg0, areg); _mm_storeu_ps(x + 0, prod); prod = _mm_mul_ps(reg1, areg); _mm_storeu_ps(x + 4, prod); x += 8; } } for (i = 0; i < nm; ++i) x[i] *= a; } #endif return 0; } int bcnn_add_scalar(int n, float a, float *x) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { x[i] += a; } #else int i, nd, nm; __m128 reg0, reg1; __m128 areg = _mm_set1_ps(a); __m128 prod; int data_is_aligned = bh_is_aligned32(x); if (a == 0.0f) { return 0; } else if (a != 1.0f) { nd = n / 8 * 8; nm = n % 8; if (data_is_aligned) { for (i = 0; i < nd; i += 8) { reg0 = _mm_load_ps(x + 0); reg1 = _mm_load_ps(x + 4); prod = _mm_add_ps(reg0, areg); _mm_store_ps(x + 0, prod); prod = _mm_add_ps(reg1, areg); _mm_store_ps(x + 4, prod); x += 8; } } else { for (i = 0; i < nd; i += 8) { reg0 = _mm_loadu_ps(x + 0); reg1 = _mm_loadu_ps(x + 4); prod = _mm_add_ps(reg0, areg); _mm_storeu_ps(x + 0, prod); prod = _mm_add_ps(reg1, areg); _mm_storeu_ps(x + 4, prod); x += 8; } } for (i = 0; i < nm; ++i) x[i] += a; } #endif return 0; } float bcnn_dot(int n, float *x, float *y) { #ifndef BCNN_USE_AVX int i; float dot = 0; for (i = 0; i < n; ++i) dot += x[i] * y[i]; return dot; #else int i, nd, nm; float sum = 0; float sum_res[4]; __m128 sum_r = _mm_setzero_ps(); __m128 r0, r1, r2, r3; nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(x); r1 = _mm_loadu_ps(y); r2 = _mm_loadu_ps(x + 4); r3 = _mm_loadu_ps(y + 4); r0 = _mm_mul_ps(r0, r1); r2 = _mm_mul_ps(r2, r3); sum_r = _mm_add_ps(sum_r, r0); sum_r = _mm_add_ps(sum_r, r2); x += 8; y += 8; } _mm_storeu_ps(sum_res, sum_r); sum += sum_res[0] + sum_res[1] + sum_res[2] + sum_res[3]; for (i = 0; i < nm; ++i) sum += x[i] * y[i]; return sum; #endif } int bcnn_vsum(int n, float *x, float *sum) { #ifndef BCNN_USE_AVX int i; float s = 0.0f; for (i = 0; i < n; ++i) s += x[i]; *(sum) = s; #else int i, nd, nm; float s = 0.0f; float sum_res[4]; __m128 sum_r = _mm_setzero_ps(); __m128 r0, r1; nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(x); r1 = _mm_loadu_ps(x + 4); sum_r = _mm_add_ps(sum_r, r0); sum_r = _mm_add_ps(sum_r, r1); x += 8; } _mm_storeu_ps(sum_res, sum_r); s += sum_res[0] + sum_res[1] + sum_res[2] + sum_res[3]; for (i = 0; i < nm; ++i) s += x[i]; *(sum) = s; #endif return 0; } int bcnn_gemv(int trans_a, int m, int n, float alpha, float *a, float *x, float beta, float *y) { int i, j; #ifdef BCNN_USE_AVX int nd, md; __m128 apart, mula, mul0, areg, xreg, yreg; float sum[4] = {0}; #endif if (!trans_a) { if (beta != 1.0f) { for (i = 0; i < m; ++i) { y[i] *= beta; } } #ifndef BCNN_USE_AVX for (i = 0; i < m; ++i) { for (j = 0; j < n; ++j) { y[i] += alpha * a[i * n + j] * x[j]; } } #else nd = n / 4 * 4; apart = _mm_set1_ps(alpha); for (i = 0; i < m; ++i) { memset(sum, 0, 4 * sizeof(float)); yreg = _mm_setzero_ps(); for (j = 0; j < nd; j += 4) { areg = _mm_loadu_ps(&a[i * n + j]); xreg = _mm_loadu_ps(&x[j]); mula = _mm_mul_ps(apart, areg); mul0 = _mm_mul_ps(xreg, mula); yreg = _mm_add_ps(yreg, mul0); } _mm_storeu_ps(sum, yreg); y[i] += sum[0] + sum[1] + sum[2] + sum[3]; for (; j < n; ++j) y[i] += alpha * a[i * n + j] * x[j]; } #endif } else { if (beta != 1.0f) { for (i = 0; i < n; ++i) { y[i] *= beta; } } #ifndef BCNN_USE_AVX for (i = 0; i < n; ++i) { for (j = 0; j < m; ++j) { y[i] += alpha * a[i * m + j] * x[j]; } } #else md = m / 4 * 4; apart = _mm_set1_ps(alpha); for (i = 0; i < n; ++i) { memset(sum, 0, 4 * sizeof(float)); yreg = _mm_setzero_ps(); for (j = 0; j < md; j += 4) { areg = _mm_loadu_ps(&a[i * m + j]); xreg = _mm_loadu_ps(&x[j]); mula = _mm_mul_ps(apart, areg); mul0 = _mm_mul_ps(xreg, mula); yreg = _mm_add_ps(yreg, mul0); } _mm_storeu_ps(sum, yreg); y[i] += sum[0] + sum[1] + sum[2] + sum[3]; for (; j < m; ++j) y[i] += alpha * a[i * m + j] * x[j]; } #endif } return 0; } float bcnn_l2_distance(float *x, float *y, int n) { float dist = 0.0f; int i; #ifdef BCNN_USE_AVX int data_is_aligned = bh_is_aligned32(x) & bh_is_aligned32(y); __m128 vx0, vy0, vx1, vy1, vdiff0, vdiff1; __m128 vdist = _mm_set1_ps(0.0f); float dist4f[4] = {0.0f}; int nd, nm; nd = n / 8 * 8; nm = n % 8; if (data_is_aligned) { for (i = 0; i < nd; i += 8) { vx0 = _mm_load_ps(x); vy0 = _mm_load_ps(y); vx1 = _mm_load_ps(x + 4); vy1 = _mm_load_ps(y + 4); vdiff0 = _mm_sub_ps(vx0, vy0); vdiff0 = _mm_mul_ps(vdiff0, vdiff0); vdiff1 = _mm_sub_ps(vx1, vy1); vdiff1 = _mm_mul_ps(vdiff1, vdiff1); vdist = _mm_add_ps(vdist, vdiff0); vdist = _mm_add_ps(vdist, vdiff1); x += 8; y += 8; } _mm_store_ps(dist4f, vdist); } else { for (i = 0; i < nd; i += 8) { vx0 = _mm_loadu_ps(x); vy0 = _mm_loadu_ps(y); vx1 = _mm_loadu_ps(x + 4); vy1 = _mm_loadu_ps(y + 4); vdiff0 = _mm_sub_ps(vx0, vy0); vdiff0 = _mm_mul_ps(vdiff0, vdiff0); vdiff1 = _mm_sub_ps(vx1, vy1); vdiff1 = _mm_mul_ps(vdiff1, vdiff1); vdist = _mm_add_ps(vdist, vdiff0); vdist = _mm_add_ps(vdist, vdiff1); x += 8; y += 8; } _mm_storeu_ps(dist4f, vdist); } dist += dist4f[0] + dist4f[1] + dist4f[2] + dist4f[3]; for (i = 0; i < nm; ++i) dist += (x[i] - y[i]) * (x[i] - y[i]); #else for (i = 0; i < n; ++i) dist += (x[i] - y[i]) * (x[i] - y[i]); #endif return dist; } float bcnn_sqrdiff_vs(float *x, float a, int n) { float dist = 0.0f; int i; #ifndef BCNN_USE_AVX for (i = 0; i < n; ++i) dist += (x[i] - a) * (x[i] - a); #else int data_is_aligned = bh_is_aligned32(x); __m128 vx0, vx1, vdiff0, vdiff1; __m128 vdist = _mm_set1_ps(0.0f); __m128 areg = _mm_set1_ps(a); float dist4f[4] = {0.0f}; int nd, nm; nd = n / 8 * 8; nm = n % 8; if (data_is_aligned) { for (i = 0; i < nd; i += 8) { vx0 = _mm_load_ps(x); vx1 = _mm_load_ps(x + 4); vdiff0 = _mm_sub_ps(vx0, areg); vdiff0 = _mm_mul_ps(vdiff0, vdiff0); vdiff1 = _mm_sub_ps(vx1, areg); vdiff1 = _mm_mul_ps(vdiff1, vdiff1); vdist = _mm_add_ps(vdist, vdiff0); vdist = _mm_add_ps(vdist, vdiff1); x += 8; } _mm_store_ps(dist4f, vdist); } else { for (i = 0; i < nd; i += 8) { vx0 = _mm_loadu_ps(x); vx1 = _mm_loadu_ps(x + 4); vdiff0 = _mm_sub_ps(vx0, areg); vdiff0 = _mm_mul_ps(vdiff0, vdiff0); vdiff1 = _mm_sub_ps(vx1, areg); vdiff1 = _mm_mul_ps(vdiff1, vdiff1); vdist = _mm_add_ps(vdist, vdiff0); vdist = _mm_add_ps(vdist, vdiff1); x += 8; } _mm_storeu_ps(dist4f, vdist); } dist += dist4f[0] + dist4f[1] + dist4f[2] + dist4f[3]; for (i = 0; i < nm; ++i) dist += (x[i] - a) * (x[i] - a); #endif return dist; } float bcnn_shiftdot(int n, float *x, float a, float *y, float b) { #ifndef BCNN_USE_AVX int i; float dot = 0; for (i = 0; i < n; ++i) dot += (x[i] - a) * (y[i] - b); return dot; #else int i, nd, nm; float sum = 0; float sum_res[4]; __m128 sum_r = _mm_setzero_ps(); __m128 r0, r1, r2, r3; __m128 areg = _mm_set1_ps(a); __m128 breg = _mm_set1_ps(b); nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { r0 = _mm_loadu_ps(x); r1 = _mm_loadu_ps(y); r2 = _mm_loadu_ps(x + 4); r3 = _mm_loadu_ps(y + 4); r0 = _mm_sub_ps(r0, areg); r1 = _mm_sub_ps(r1, breg); r2 = _mm_sub_ps(r2, areg); r3 = _mm_sub_ps(r3, breg); r0 = _mm_mul_ps(r0, r1); r2 = _mm_mul_ps(r2, r3); sum_r = _mm_add_ps(sum_r, r0); sum_r = _mm_add_ps(sum_r, r2); x += 8; y += 8; } _mm_storeu_ps(sum_res, sum_r); sum += sum_res[0] + sum_res[1] + sum_res[2] + sum_res[3]; for (i = 0; i < nm; ++i) sum += (x[i] - a) * (y[i] - b); return sum; #endif } int bcnn_varnorm(int n, float *a, float c, float *y) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { y[i] *= c / (a[i] * sqrtf(a[i]) + 0.00001f); } #else int i, nd, nm; __m128 r0, r1, reg0, reg1; __m128 creg = _mm_set1_ps(c); __m128 epsreg = _mm_set1_ps(0.00001f); nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { reg0 = _mm_loadu_ps(y); reg1 = _mm_loadu_ps(y + 4); r0 = _mm_loadu_ps(a); r1 = _mm_loadu_ps(a + 4); r0 = _mm_mul_ps( reg0, _mm_div_ps(creg, _mm_add_ps(_mm_mul_ps(r0, _mm_sqrt_ps(r0)), epsreg))); r1 = _mm_mul_ps( reg1, _mm_div_ps(creg, _mm_add_ps(_mm_mul_ps(r1, _mm_sqrt_ps(r1)), epsreg))); _mm_storeu_ps(y, r0); _mm_storeu_ps(y + 4, r1); a += 8; y += 8; } for (i = 0; i < nm; ++i) { y[i] *= c / (a[i] * sqrtf(a[i]) + 0.00001f); } #endif return 0; } int bcnn_varmean(int n, float *m, float a, float *var) { #ifndef BCNN_USE_AVX int i; for (i = 0; i < n; ++i) { var[i] = var[i] * a - m[i] * m[i]; } #else int i, nd, nm; __m128 r0, r1, reg0, reg1; __m128 areg = _mm_set1_ps(a); nd = n / 8 * 8; nm = n % 8; for (i = 0; i < nd; i += 8) { reg0 = _mm_loadu_ps(var); reg1 = _mm_loadu_ps(var + 4); r0 = _mm_loadu_ps(m); r1 = _mm_loadu_ps(m + 4); r0 = _mm_sub_ps(_mm_mul_ps(reg0, areg), _mm_mul_ps(r0, r0)); r1 = _mm_sub_ps(_mm_mul_ps(reg1, areg), _mm_mul_ps(r1, r1)); _mm_storeu_ps(var, r0); _mm_storeu_ps(var + 4, r1); m += 8; var += 8; } for (i = 0; i < nm; ++i) { var[i] = var[i] * a - m[i] * m[i]; } #endif return 0; } void bcnn_add_bias(float *output, float *bias, int batch_size, int num_channels, int spatial_size, int num_threads) { for (int b = 0; b < batch_size; ++b) { #pragma omp parallel for num_threads(num_threads) for (int i = 0; i < num_channels; ++i) { bcnn_add_scalar(spatial_size, bias[i], output + i * spatial_size); } output += num_channels * spatial_size; } } void bcnn_scales(float *output, float *scales, int batch_size, int num_channels, int spatial_size, int num_threads) { for (int b = 0; b < batch_size; ++b) { #pragma omp parallel for num_threads(num_threads) for (int i = 0; i < num_channels; ++i) { bcnn_scal(spatial_size, scales[i], output + i * spatial_size); } output += num_channels * spatial_size; } } void bcnn_grad_scales(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) { int i, b, f; for (f = 0; f < n; ++f) { float sum = 0; for (b = 0; b < batch; ++b) { for (i = 0; i < size; ++i) { int index = i + size * (f + n * b); sum += delta[index] * x_norm[index]; } } scale_updates[f] += sum; } } void bcnn_grad_bias(float *grad_bias, float *grad_data, int batch_size, int num_channels, int spatial_size) { int i, j, b; float *p = NULL; for (b = 0; b < batch_size; ++b) { for (i = 0; i < num_channels; ++i) { p = grad_data + spatial_size * (i + b * num_channels); for (j = 0; j < spatial_size; ++j) { grad_bias[i] += p[j]; } } } } static inline int is_a_positive_and_inferior_to_b(int a, int b) { return (unsigned int)a < (unsigned int)b; } void bcnn_im2col(const float *data_im, const int channels, const int height, const int width, const int kernel_size, const int pad, const int stride, float *data_col) { int channel, kernel_row, kernel_col, output_rows, output_cols, input_col, input_row, output_col; const int output_h = (height + 2 * pad - kernel_size) / stride + 1; const int output_w = (width + 2 * pad - kernel_size) / stride + 1; const int channel_size = height * width; for (channel = channels; channel--; data_im += channel_size) { for (kernel_row = 0; kernel_row < kernel_size; kernel_row++) { for (kernel_col = 0; kernel_col < kernel_size; kernel_col++) { input_row = -pad + kernel_row; for (output_rows = output_h; output_rows; output_rows--) { if (!is_a_positive_and_inferior_to_b(input_row, height)) { for (output_cols = output_w; output_cols; output_cols--) { *(data_col++) = 0; } } else { input_col = -pad + kernel_col; for (output_col = output_w; output_col; output_col--) { if (is_a_positive_and_inferior_to_b(input_col, width)) { *(data_col++) = data_im[input_row * width + input_col]; } else { *(data_col++) = 0; } input_col += stride; } } input_row += stride; } } } } } static void bcnn_im2col_mt_st1(const float *data_im, const int channels, const int height, const int width, const int kernel_size, const int pad, float *data_col, int num_threads) { int height_col = (height + 2 * pad - kernel_size) + 1; int width_col = (width + 2 * pad - kernel_size) + 1; int channels_col = channels * kernel_size * kernel_size; #pragma omp parallel for num_threads(num_threads) for (int c = 0; c < channels_col; ++c) { int w_offset = c % kernel_size; int h_offset = (c / kernel_size) % kernel_size; int c_im = c / kernel_size / kernel_size; const int hc0 = h_offset - pad; const int wc0 = w_offset - pad; int wleft = bh_max(0, pad - w_offset); int wmid = bh_min(width_col, width + pad - w_offset) - wleft; int wright = bh_max(0, width_col - (width + pad - w_offset)); for (int h = 0; h < pad - h_offset; ++h) { const int row_offset = (c * height_col + h) * width_col; memset(data_col + row_offset, 0, width_col * sizeof(float)); } for (int h = bh_max(0, pad - h_offset); h < bh_min(height_col, height + pad - h_offset); ++h) { int h_pad = h + hc0; const int row_offset = (c * height_col + h) * width_col; const int srow_offset = (c_im * height + h_pad) * width; memset(data_col + row_offset, 0, wleft * sizeof(float)); memcpy(data_col + row_offset + wleft, data_im + srow_offset + wleft + wc0, wmid * sizeof(float)); memset(data_col + row_offset + wleft + wmid, 0, wright * sizeof(float)); } for (int h = height + pad - h_offset; h < height_col; ++h) { const int row_offset = (c * height_col + h) * width_col; memset(data_col + row_offset, 0, width_col * sizeof(float)); } } } void bcnn_im2col_mt(const float *data_im, const int channels, const int height, const int width, const int kernel_size, const int pad, const int stride, float *data_col, int num_threads) { int height_col = (height + 2 * pad - kernel_size) / stride + 1; int width_col = (width + 2 * pad - kernel_size) / stride + 1; int channels_col = channels * kernel_size * kernel_size; if (stride == 1) { bcnn_im2col_mt_st1(data_im, channels, height, width, kernel_size, pad, data_col, num_threads); } else { #pragma omp parallel for num_threads(num_threads) for (int c = 0; c < channels_col; ++c) { int w_offset = c % kernel_size; int h_offset = (c / kernel_size) % kernel_size; int c_im = c / kernel_size / kernel_size; const int hc0 = h_offset - pad; const int wc0 = w_offset - pad; for (int h = 0; h < height_col; ++h) { int h_pad = h * stride + hc0; const int row_offset = (c * height_col + h) * width_col; const int srow_offset = (c_im * height + h_pad) * width; for (int w = 0; w < width_col; ++w) { int w_pad = w * stride + wc0; if ((((unsigned)h_pad) < ((unsigned)height)) && (((unsigned)w_pad) < ((unsigned)width))) data_col[row_offset + w] = data_im[srow_offset + w_pad]; else { data_col[row_offset + w] = 0.; } } } } } } void bcnn_col2im(const float *data_col, const int channels, const int height, const int width, const int kernel, const int pad, const int stride, float *data_im) { int channel, kernel_row, kernel_col, output_rows, input_col, input_row, output_col; const int output_h = (height + 2 * pad - kernel) / stride + 1; const int output_w = (width + 2 * pad - kernel) / stride + 1; const int channel_size = height * width; bcnn_fill_f32(height * width * channels, 0.0f, data_im); for (channel = channels; channel--; data_im += channel_size) { for (kernel_row = 0; kernel_row < kernel; kernel_row++) { for (kernel_col = 0; kernel_col < kernel; kernel_col++) { input_row = -pad + kernel_row; for (output_rows = output_h; output_rows; output_rows--) { if (!is_a_positive_and_inferior_to_b(input_row, height)) { data_col += output_w; } else { input_col = -pad + kernel_col; for (output_col = output_w; output_col; output_col--) { if (is_a_positive_and_inferior_to_b(input_col, width)) { data_im[input_row * width + input_col] += *data_col; } data_col++; input_col += stride; } } input_row += stride; } } } } } /* Kernels for NC4HW4 layouts */ void bcnn_add_bias_nc4hw4(float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 mv = _mm_set1_ps(0.0f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps(_mm_load_ps(dst_z + 4 * p), biasv); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t mv = vdupq_n_f32(0.0f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vld1q_f32(dst_z + 4 * p), biasv); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *bias_z = bias + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] += bias_z[i]; } } } #endif } void bcnn_add_bias_with_relu_nc4hw4(float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 mv = _mm_set1_ps(0.0f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps(_mm_load_ps(dst_z + 4 * p), biasv); dstv = _mm_max_ps(dstv, mv); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t mv = vdupq_n_f32(0.0f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vld1q_f32(dst_z + 4 * p), biasv); dstv = vmaxq_f32(dstv, mv); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *bias_z = bias + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] += bias_z[i]; if (dst_x[i] < 0) { dst_x[i] = 0; } } } } #endif } void bcnn_add_bias_with_lrelu_nc4hw4(float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 zerov = _mm_set1_ps(0.0f); __m128 slopenegv = _mm_set1_ps(0.1f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps(_mm_load_ps(dst_z + 4 * p), biasv); __m128 dstv_pos = _mm_max_ps(dstv, zerov); __m128 dstv_neg = _mm_mul_ps(slopenegv, _mm_min_ps(dstv, zerov)); dstv = _mm_add_ps(dstv_pos, dstv_neg); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t zerov = vdupq_n_f32(0.0f); float32x4_t slopenegv = vdupq_n_f32(0.1f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vld1q_f32(dst_z + 4 * p), biasv); float32x4_t dstv_pos = vmaxq_f32(dstv, zerov); float32x4_t dstv_neg = vmulq_f32(slopenegv, vminq_f32(dstv, zerov)); dstv = vaddq_f32(dstv_pos, dstv_neg); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *bias_z = bias + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] += bias_z[i]; dst_x[i] = (dst_x[i] > 0 ? dst_x[i] : 0.1f * dst_x[i]); } } } #endif } void bcnn_add_bias_with_prelu_nc4hw4(float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 zerov = _mm_set1_ps(0.0f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); __m128 slopev = _mm_load_ps(slope + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps(_mm_load_ps(dst_z + 4 * p), biasv); __m128 dstv_pos = _mm_max_ps(dstv, zerov); __m128 dstv_neg = _mm_mul_ps(slopev, _mm_min_ps(dstv, zerov)); dstv = _mm_add_ps(dstv_pos, dstv_neg); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t zerov = vdupq_n_f32(0.0f); float32x4_t slopenegv = vdupq_n_f32(0.1f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float32x4_t slopev = vld1q_f32(slope + 4 * z); float *dst_z = dst + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vld1q_f32(dst_z + 4 * p), biasv); float32x4_t dstv_pos = vmaxq_f32(dstv, zerov); float32x4_t dstv_neg = vmulq_f32(slopev, vminq_f32(dstv, zerov)); dstv = vaddq_f32(dstv_pos, dstv_neg); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *bias_z = bias + 4 * z; const float *slope_z = slope + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] += bias_z[i]; dst_x[i] = (dst_x[i] > 0 ? dst_x[i] : slope_z[i] * dst_x[i]); } } } #endif } void bcnn_scale_and_add_bias_nc4hw4(float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 zerov = _mm_set1_ps(0.0f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); __m128 alphav = _mm_load_ps(alpha + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps( _mm_mul_ps(_mm_load_ps(src_z + 4 * p), alphav), biasv); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t zerov = vdupq_n_f32(0.0f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float32x4_t alphav = vld1q_f32(alpha + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vmulq_f32(vld1q_f32(src_z + 4 * p), alphav), biasv); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; const float *bias_z = bias + 4 * z; const float *alpha_z = alpha + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; const float *src_x = src_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] = src_x[i] * alpha_z[i] + bias_z[i]; } } } #endif } void bcnn_scale_and_add_bias_with_relu_nc4hw4( float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 zerov = _mm_set1_ps(0.0f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); __m128 alphav = _mm_load_ps(alpha + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps( _mm_mul_ps(_mm_load_ps(src_z + 4 * p), alphav), biasv); dstv = _mm_max_ps(dstv, zerov); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t zerov = vdupq_n_f32(0.0f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float32x4_t alphav = vld1q_f32(alpha + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vmulq_f32(vld1q_f32(src_z + 4 * p), alphav), biasv); dstv = vmaxq_f32(dstv, zerov); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; const float *bias_z = bias + 4 * z; const float *alpha_z = alpha + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; const float *src_x = src_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] = src_x[i] * alpha_z[i] + bias_z[i]; dst_x[i] = (dst_x[i] > 0 ? dst_x[i] : 0.f); } } } #endif } void bcnn_scale_and_add_bias_with_lrelu_nc4hw4( float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 zerov = _mm_set1_ps(0.0f); __m128 slopenegv = _mm_set1_ps(0.1f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); __m128 alphav = _mm_load_ps(alpha + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps( _mm_mul_ps(_mm_load_ps(src_z + 4 * p), alphav), biasv); __m128 dstv_pos = _mm_max_ps(dstv, zerov); __m128 dstv_neg = _mm_mul_ps(slopenegv, _mm_min_ps(dstv, zerov)); dstv = _mm_add_ps(dstv_pos, dstv_neg); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t zerov = vdupq_n_f32(0.0f); float32x4_t slopenegv = vdupq_n_f32(0.1f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float32x4_t alphav = vld1q_f32(alpha + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vmulq_f32(vld1q_f32(src_z + 4 * p), alphav), biasv); float32x4_t dstv_pos = vmaxq_f32(dstv, zerov); float32x4_t dstv_neg = vmulq_f32(slopenegv, vminq_f32(dstv, zerov)); dstv = vaddq_f32(dstv_pos, dstv_neg); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; const float *bias_z = bias + 4 * z; const float *alpha_z = alpha + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; const float *src_x = src_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] = src_x[i] * alpha_z[i] + bias_z[i]; dst_x[i] = (dst_x[i] > 0 ? dst_x[i] : 0.1f * dst_x[i]); } } } #endif } void bcnn_scale_and_add_bias_with_prelu_nc4hw4( float *dst, const float *src, const float *bias, const float *alpha, const float *slope, size_t num_planes, size_t num_biases) { #if defined(BCNN_USE_AVX) __m128 zerov = _mm_set1_ps(0.0f); for (int z = 0; z < num_biases; ++z) { __m128 biasv = _mm_load_ps(bias + 4 * z); __m128 alphav = _mm_load_ps(alpha + 4 * z); __m128 slopev = _mm_load_ps(slope + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { __m128 dstv = _mm_add_ps( _mm_mul_ps(_mm_load_ps(src_z + 4 * p), alphav), biasv); __m128 dstv_pos = _mm_max_ps(dstv, zerov); __m128 dstv_neg = _mm_mul_ps(slopev, _mm_min_ps(dstv, zerov)); dstv = _mm_add_ps(dstv_pos, dstv_neg); _mm_store_ps(dst_z + 4 * p, dstv); } } #elif defined(BCNN_USE_NEON) float32x4_t zerov = vdupq_n_f32(0.0f); for (int z = 0; z < num_biases; ++z) { float32x4_t biasv = vld1q_f32(bias + 4 * z); float32x4_t alphav = vld1q_f32(alpha + 4 * z); float32x4_t slopev = vld1q_f32(slope + 4 * z); float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; for (int p = 0; p < num_planes; ++p) { float32x4_t dstv = vaddq_f32(vmulq_f32(vld1q_f32(src_z + 4 * p), alphav), biasv); float32x4_t dstv_pos = vmaxq_f32(dstv, zerov); float32x4_t dstv_neg = vmulq_f32(slopev, vminq_f32(dstv, zerov)); dstv = vaddq_f32(dstv_pos, dstv_neg); vst1q_f32(dst_z + 4 * p, dstv); } } #else for (int z = 0; z < num_biases; ++z) { float *dst_z = dst + num_planes * 4 * z; const float *src_z = src + num_planes * 4 * z; const float *bias_z = bias + 4 * z; const float *alpha_z = alpha + 4 * z; const float *slope_z = slope + 4 * z; for (int p = 0; p < num_planes; ++p) { float *dst_x = dst_z + 4 * p; const float *src_x = src_z + 4 * p; for (int i = 0; i < 4; ++i) { dst_x[i] = src_x[i] * alpha_z[i] + bias_z[i]; dst_x[i] = (dst_x[i] > 0 ? dst_x[i] : slope_z[i] * dst_x[i]); } } } #endif } /* Look-up Table for the post convolution functions */ bcnn_post_conv_nc4hw4_func bcnn_post_conv_nc4hw4_lut[8] = { bcnn_add_bias_nc4hw4, bcnn_add_bias_with_relu_nc4hw4, bcnn_add_bias_with_lrelu_nc4hw4, bcnn_add_bias_with_prelu_nc4hw4, bcnn_scale_and_add_bias_nc4hw4, bcnn_scale_and_add_bias_with_relu_nc4hw4, bcnn_scale_and_add_bias_with_lrelu_nc4hw4, bcnn_scale_and_add_bias_with_prelu_nc4hw4}; void bcnn_nchw_to_nc4hw4(float *dst, const float *src, size_t area, size_t depth, int batch_size) { int z, x; int cur = 0; memset(dst, 0, batch_size * area * bh_div_up(depth, 4) * 4 * sizeof(float)); for (int b = 0; b < batch_size; ++b) { float *dst_batch = dst + b * area * bh_div_up(depth, 4) * 4; for (z = 0; z < depth; ++z) { int plane = z / 4; float *dst_plane = dst_batch + plane * area * 4; int offset = z % 4; for (x = 0; x < area; ++x) { dst_plane[4 * x + offset] = src[cur++]; } } } } void bcnn_nc4hw4_to_nchw(float *dst, const float *src, size_t area, size_t depth, int batch_size) { int x; int z; int cur = 0; for (int b = 0; b < batch_size; ++b) { const float *src_batch = src + b * area * bh_div_up(depth, 4) * 4; for (z = 0; z < depth; ++z) { int plane = z / 4; const float *src_plane = src_batch + plane * area * 4; int offset = z % 4; for (x = 0; x < area; ++x) { dst[cur++] = src_plane[4 * x + offset]; } } } } void bcnn_conv3x3_convert_src(const float *src, float *dst, size_t step) { float *_x = (float *)src; float *_y = dst; bv_float4 m00 = bv_float4_sub(bv_float4_load(_x + 4 * 0), bv_float4_load(_x + 4 * 8)); bv_float4 m01 = bv_float4_sub(bv_float4_load(_x + 4 * 1), bv_float4_load(_x + 4 * 9)); bv_float4 m02 = bv_float4_sub(bv_float4_load(_x + 4 * 2), bv_float4_load(_x + 4 * 10)); bv_float4 m03 = bv_float4_sub(bv_float4_load(_x + 4 * 3), bv_float4_load(_x + 4 * 11)); bv_float4 m10 = bv_float4_add(bv_float4_load(_x + 4 * 4), bv_float4_load(_x + 4 * 8)); bv_float4 m11 = bv_float4_add(bv_float4_load(_x + 4 * 5), bv_float4_load(_x + 4 * 9)); bv_float4 m12 = bv_float4_add(bv_float4_load(_x + 4 * 6), bv_float4_load(_x + 4 * 10)); bv_float4 m13 = bv_float4_add(bv_float4_load(_x + 4 * 7), bv_float4_load(_x + 4 * 11)); bv_float4 m20 = bv_float4_sub(bv_float4_load(_x + 4 * 8), bv_float4_load(_x + 4 * 4)); bv_float4 m21 = bv_float4_sub(bv_float4_load(_x + 4 * 9), bv_float4_load(_x + 4 * 5)); bv_float4 m22 = bv_float4_sub(bv_float4_load(_x + 4 * 10), bv_float4_load(_x + 4 * 6)); bv_float4 m23 = bv_float4_sub(bv_float4_load(_x + 4 * 11), bv_float4_load(_x + 4 * 7)); bv_float4 m30 = bv_float4_sub(bv_float4_load(_x + 4 * 12), bv_float4_load(_x + 4 * 4)); bv_float4 m31 = bv_float4_sub(bv_float4_load(_x + 4 * 13), bv_float4_load(_x + 4 * 5)); bv_float4 m32 = bv_float4_sub(bv_float4_load(_x + 4 * 14), bv_float4_load(_x + 4 * 6)); bv_float4 m33 = bv_float4_sub(bv_float4_load(_x + 4 * 15), bv_float4_load(_x + 4 * 7)); bv_float4_store(bv_float4_sub(m00, m02), _y + step * 0); bv_float4_store(bv_float4_add(m01, m02), _y + step * 1); bv_float4_store(bv_float4_sub(m02, m01), _y + step * 2); bv_float4_store(bv_float4_sub(m03, m01), _y + step * 3); bv_float4_store(bv_float4_sub(m10, m12), _y + step * 4); bv_float4_store(bv_float4_add(m11, m12), _y + step * 5); bv_float4_store(bv_float4_sub(m12, m11), _y + step * 6); bv_float4_store(bv_float4_sub(m13, m11), _y + step * 7); bv_float4_store(bv_float4_sub(m20, m22), _y + step * 8); bv_float4_store(bv_float4_add(m21, m22), _y + step * 9); bv_float4_store(bv_float4_sub(m22, m21), _y + step * 10); bv_float4_store(bv_float4_sub(m23, m21), _y + step * 11); bv_float4_store(bv_float4_sub(m30, m32), _y + step * 12); bv_float4_store(bv_float4_add(m31, m32), _y + step * 13); bv_float4_store(bv_float4_sub(m32, m31), _y + step * 14); bv_float4_store(bv_float4_sub(m33, m31), _y + step * 15); } void bcnn_conv3x3_convert_dst(const float *src_z, float *dst_block, size_t step) { float *yy = dst_block; float *x = (float *)src_z; bv_float4 m00 = bv_float4_add(bv_float4_add(bv_float4_load(x + step * 0), bv_float4_load(x + step * 4)), bv_float4_load(x + step * 8)); bv_float4 m01 = bv_float4_add(bv_float4_add(bv_float4_load(x + step * 1), bv_float4_load(x + step * 5)), bv_float4_load(x + step * 9)); bv_float4 m02 = bv_float4_add(bv_float4_add(bv_float4_load(x + step * 2), bv_float4_load(x + step * 6)), bv_float4_load(x + step * 10)); bv_float4 m03 = bv_float4_add(bv_float4_add(bv_float4_load(x + step * 3), bv_float4_load(x + step * 7)), bv_float4_load(x + step * 11)); bv_float4 m10 = bv_float4_add(bv_float4_sub(bv_float4_load(x + step * 4), bv_float4_load(x + step * 8)), bv_float4_load(x + step * 12)); bv_float4 m11 = bv_float4_add(bv_float4_sub(bv_float4_load(x + step * 5), bv_float4_load(x + step * 9)), bv_float4_load(x + step * 13)); bv_float4 m12 = bv_float4_add(bv_float4_sub(bv_float4_load(x + step * 6), bv_float4_load(x + step * 10)), bv_float4_load(x + step * 14)); bv_float4 m13 = bv_float4_add(bv_float4_sub(bv_float4_load(x + step * 7), bv_float4_load(x + step * 11)), bv_float4_load(x + step * 15)); bv_float4_store(bv_float4_add(bv_float4_add(m00, m01), m02), yy + 4 * 0); bv_float4_store(bv_float4_add(bv_float4_sub(m01, m02), m03), yy + 4 * 1); bv_float4_store(bv_float4_add(bv_float4_add(m10, m11), m12), yy + 4 * 2); bv_float4_store(bv_float4_add(bv_float4_sub(m11, m12), m13), yy + 4 * 3); } void bcnn_conv3x3_convert_weights(const float *src_weights, float *dst_weights, int src_channels, int dst_channels) { float weight[CONV3x3_BLOCK_UNIT * CONV3x3_BLOCK_UNIT]; int srcDepthD4 = bh_div_up(src_channels, 4); int dstDepthD4 = bh_div_up(dst_channels, 4); for (int dz = 0; dz < dst_channels; ++dz) { int dz_4 = dz / CONV3x3_BLOCK_UNIT; int mx = dz % CONV3x3_BLOCK_UNIT; float *dst_dz = dst_weights + dz_4 * srcDepthD4 * 16; for (int sz = 0; sz < src_channels; ++sz) { int sz_4 = sz / CONV3x3_BLOCK_UNIT; int my = sz % CONV3x3_BLOCK_UNIT; float *dst_sz = dst_dz + sz_4 * CONV3x3_BLOCK_UNIT * CONV3x3_BLOCK_UNIT; float *src = (float *)src_weights + 9 * (sz + dz * src_channels); float *dst = weight; float *k = (float *)src; float m00 = k[0]; float m01 = k[1]; float m02 = k[2]; float m10 = 0.500000 * k[0] + 0.500000 * k[3] + 0.500000 * k[6]; float m11 = 0.500000 * k[1] + 0.500000 * k[4] + 0.500000 * k[7]; float m12 = 0.500000 * k[2] + 0.500000 * k[5] + 0.500000 * k[8]; float m20 = 0.500000 * k[0] + -0.500000 * k[3] + 0.500000 * k[6]; float m21 = 0.500000 * k[1] + -0.500000 * k[4] + 0.500000 * k[7]; float m22 = 0.500000 * k[2] + -0.500000 * k[5] + 0.500000 * k[8]; float m30 = 0 + k[6]; float m31 = 0 + k[7]; float m32 = 0 + k[8]; k = dst; k[0] = m00; k[1] = 0.500000 * m00 + 0.500000 * m01 + 0.500000 * m02; k[2] = 0.500000 * m00 + -0.500000 * m01 + 0.500000 * m02; k[3] = 0 + m02; k[4] = m10; k[5] = 0.500000 * m10 + 0.500000 * m11 + 0.500000 * m12; k[6] = 0.500000 * m10 + -0.500000 * m11 + 0.500000 * m12; k[7] = 0 + m12; k[8] = m20; k[9] = 0.500000 * m20 + 0.500000 * m21 + 0.500000 * m22; k[10] = 0.500000 * m20 + -0.500000 * m21 + 0.500000 * m22; k[11] = 0 + m22; k[12] = m30; k[13] = 0.500000 * m30 + 0.500000 * m31 + 0.500000 * m32; k[14] = 0.500000 * m30 + -0.500000 * m31 + 0.500000 * m32; k[15] = 0 + m32; for (int ki = 0; ki < CONV3x3_BLOCK_UNIT * CONV3x3_BLOCK_UNIT; ++ki) { float *dst_i = dst_sz + ki * srcDepthD4 * dstDepthD4 * 16; dst_i[4 * my + mx] = weight[ki]; } } } } //#if defined(BCNN_USE_AVX) static void bcnn_gemm_kernel4x4(float *dst, const float *src, const float *weight, size_t src_depth_quad, size_t dst_step, size_t dst_depth_quad, size_t width, size_t weight_depth_offset) { #if defined(BCNN_USE_AVX) int src_depth_step = 4 * width; int wC4 = width / 4; int w4End = wC4 * 4; for (int dz = 0; dz < dst_depth_quad; ++dz) { float *dst_z = dst + dz * dst_step; const float *weight_dz = weight + dz * (src_depth_quad * 16 + weight_depth_offset); for (int dx = 0; dx < wC4; ++dx) { float *dst_x = dst_z + dx * 4 * 4; __m128 dst0 = _mm_set1_ps(0.0f); __m128 dst1 = _mm_set1_ps(0.0f); __m128 dst2 = _mm_set1_ps(0.0f); __m128 dst3 = _mm_set1_ps(0.0f); const float *src_dx = src + 4 * dx * 4; for (int sz = 0; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_depth_step; const float *weight_z = weight_dz + sz * 16; __m128 w0 = _mm_loadu_ps(weight_z + 4 * 0); __m128 w1 = _mm_loadu_ps(weight_z + 4 * 1); __m128 w2 = _mm_loadu_ps(weight_z + 4 * 2); __m128 w3 = _mm_loadu_ps(weight_z + 4 * 3); #define COMPUTE(v) \ { \ __m128 srcValue = _mm_loadu_ps(src_z + 4 * v); \ __m128 s0 = _mm_set1_ps(srcValue[0]); \ __m128 s1 = _mm_set1_ps(srcValue[1]); \ __m128 s2 = _mm_set1_ps(srcValue[2]); \ __m128 s3 = _mm_set1_ps(srcValue[3]); \ __m128 sw0 = _mm_mul_ps(s0, w0); \ __m128 sw1 = _mm_mul_ps(s1, w1); \ __m128 sw2 = _mm_mul_ps(s2, w2); \ __m128 sw3 = _mm_mul_ps(s3, w3); \ dst##v = _mm_add_ps(dst##v, sw0); \ dst##v = _mm_add_ps(dst##v, sw1); \ dst##v = _mm_add_ps(dst##v, sw2); \ dst##v = _mm_add_ps(dst##v, sw3); \ } COMPUTE(0); COMPUTE(1); COMPUTE(2); COMPUTE(3); } _mm_store_ps(dst_x + 4 * 0, dst0); _mm_store_ps(dst_x + 4 * 1, dst1); _mm_store_ps(dst_x + 4 * 2, dst2); _mm_store_ps(dst_x + 4 * 3, dst3); } for (int dx = w4End; dx < width; ++dx) { float *dst_x = dst_z + dx * 4; __m128 dstValue = _mm_set1_ps(0.0f); const float *src_dx = src + 4 * dx; for (int sz = 0; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_depth_step; const float *weight_z = weight_dz + sz * 16; __m128 w0 = _mm_loadu_ps(weight_z + 4 * 0); __m128 w1 = _mm_loadu_ps(weight_z + 4 * 1); __m128 w2 = _mm_loadu_ps(weight_z + 4 * 2); __m128 w3 = _mm_loadu_ps(weight_z + 4 * 3); __m128 srcValue = _mm_loadu_ps(src_z); __m128 s0 = _mm_set1_ps(srcValue[0]); __m128 s1 = _mm_set1_ps(srcValue[1]); __m128 s2 = _mm_set1_ps(srcValue[2]); __m128 s3 = _mm_set1_ps(srcValue[3]); __m128 sw0 = _mm_mul_ps(s0, w0); __m128 sw1 = _mm_mul_ps(s1, w1); __m128 sw2 = _mm_mul_ps(s2, w2); __m128 sw3 = _mm_mul_ps(s3, w3); dstValue = _mm_add_ps(dstValue, sw0); dstValue = _mm_add_ps(dstValue, sw1); dstValue = _mm_add_ps(dstValue, sw2); dstValue = _mm_add_ps(dstValue, sw3); } _mm_store_ps(dst_x, dstValue); } } #elif defined(BCNN_USE_NEON) #if defined(__aarch64__) int src_z_step = 4 * width; int weight_z_step = 16 * src_depth_quad + weight_depth_offset; int x13 = src_depth_quad; int w8 = width / 8; int w8tail = (w8 * 8) / 4; int w4 = width / 4; int w4tail = w4 * 4; for (int dz = 0; dz < dst_depth_quad; ++dz) { float *dst_z = dst + dz * dst_step; const float *weight_dz = weight + dz * weight_z_step; for (int dx = 0; dx < w8; ++dx) { const float *src_dx = src + dx * 32; float *dst_x = dst_z + dx * 32; float32x4_t dst0 = vdupq_n_f32(0.0f); float32x4_t dst1 = vdupq_n_f32(0.0f); float32x4_t dst2 = vdupq_n_f32(0.0f); float32x4_t dst3 = vdupq_n_f32(0.0f); float32x4_t dst4 = vdupq_n_f32(0.0f); float32x4_t dst5 = vdupq_n_f32(0.0f); float32x4_t dst6 = vdupq_n_f32(0.0f); float32x4_t dst7 = vdupq_n_f32(0.0f); float32x4_t w0 = vld1q_f32(weight_dz + 4 * 0); float32x4_t w1 = vld1q_f32(weight_dz + 4 * 1); float32x4_t w2 = vld1q_f32(weight_dz + 4 * 2); float32x4_t w3 = vld1q_f32(weight_dz + 4 * 3); // dst0 / dst1 float32x4_t v0 = vld1q_f32(src_dx); dst0 = vmulq_n_f32(w0, v0[0]); float32x4_t v1 = vld1q_f32(src_dx + 4); dst0 = vfmaq_laneq_f32(dst0, w1, v0, 1); dst1 = vmulq_n_f32(w0, v1[0]); dst0 = vfmaq_laneq_f32(dst0, w2, v0, 2); dst1 = vfmaq_laneq_f32(dst1, w1, v1, 1); dst0 = vfmaq_laneq_f32(dst0, w3, v0, 3); dst1 = vfmaq_laneq_f32(dst1, w2, v1, 2); dst1 = vfmaq_laneq_f32(dst1, w3, v1, 3); // dst2 / dst3 v0 = vld1q_f32(src_dx + 8); dst2 = vmulq_n_f32(w0, v0[0]); v1 = vld1q_f32(src_dx + 12); dst2 = vfmaq_laneq_f32(dst2, w1, v0, 1); dst3 = vmulq_n_f32(w0, v1[0]); dst2 = vfmaq_laneq_f32(dst2, w2, v0, 2); dst3 = vfmaq_laneq_f32(dst3, w1, v1, 1); dst2 = vfmaq_laneq_f32(dst2, w3, v0, 3); dst3 = vfmaq_laneq_f32(dst3, w2, v1, 2); dst3 = vfmaq_laneq_f32(dst3, w3, v1, 3); // dst4 / dst5 v0 = vld1q_f32(src_dx + 16); dst4 = vmulq_n_f32(w0, v0[0]); v1 = vld1q_f32(src_dx + 20); dst4 = vfmaq_laneq_f32(dst4, w1, v0, 1); dst5 = vmulq_n_f32(w0, v1[0]); dst4 = vfmaq_laneq_f32(dst4, w2, v0, 2); dst5 = vfmaq_laneq_f32(dst5, w1, v1, 1); dst4 = vfmaq_laneq_f32(dst4, w3, v0, 3); dst5 = vfmaq_laneq_f32(dst5, w2, v1, 2); dst5 = vfmaq_laneq_f32(dst5, w3, v1, 3); // dst6 / dst7 v0 = vld1q_f32(src_dx + 24); dst6 = vmulq_n_f32(w0, v0[0]); v1 = vld1q_f32(src_dx + 28); dst6 = vfmaq_laneq_f32(dst6, w1, v0, 1); dst7 = vmulq_n_f32(w0, v1[0]); dst6 = vfmaq_laneq_f32(dst6, w2, v0, 2); dst7 = vfmaq_laneq_f32(dst7, w1, v1, 1); dst6 = vfmaq_laneq_f32(dst6, w3, v0, 3); dst7 = vfmaq_laneq_f32(dst7, w2, v1, 2); dst7 = vfmaq_laneq_f32(dst7, w3, v1, 3); for (int sz = 1; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_z_step; const float *weight_z = weight_dz + sz * 16; float32x4_t w0 = vld1q_f32(weight_z + 4 * 0); float32x4_t w1 = vld1q_f32(weight_z + 4 * 1); float32x4_t w2 = vld1q_f32(weight_z + 4 * 2); float32x4_t w3 = vld1q_f32(weight_z + 4 * 3); // dst0 / dst1 float32x4_t v0 = vld1q_f32(src_z); dst0 = vfmaq_laneq_f32(dst0, w0, v0, 0); float32x4_t v1 = vld1q_f32(src_z + 4); dst0 = vfmaq_laneq_f32(dst0, w1, v0, 1); dst1 = vfmaq_laneq_f32(dst1, w0, v1, 0); dst0 = vfmaq_laneq_f32(dst0, w2, v0, 2); dst1 = vfmaq_laneq_f32(dst1, w1, v1, 1); dst0 = vfmaq_laneq_f32(dst0, w3, v0, 3); dst1 = vfmaq_laneq_f32(dst1, w2, v1, 2); dst1 = vfmaq_laneq_f32(dst1, w3, v1, 3); // dst2 / dst3 v0 = vld1q_f32(src_z + 8); dst2 = vfmaq_laneq_f32(dst2, w0, v0, 0); v1 = vld1q_f32(src_z + 12); dst2 = vfmaq_laneq_f32(dst2, w1, v0, 1); dst3 = vfmaq_laneq_f32(dst3, w0, v1, 0); dst2 = vfmaq_laneq_f32(dst2, w2, v0, 2); dst3 = vfmaq_laneq_f32(dst3, w1, v1, 1); dst2 = vfmaq_laneq_f32(dst2, w3, v0, 3); dst3 = vfmaq_laneq_f32(dst3, w2, v1, 2); dst3 = vfmaq_laneq_f32(dst3, w3, v1, 3); // dst4 / dst5 v0 = vld1q_f32(src_z + 16); dst4 = vfmaq_laneq_f32(dst4, w0, v0, 0); v1 = vld1q_f32(src_z + 20); dst4 = vfmaq_laneq_f32(dst4, w1, v0, 1); dst5 = vfmaq_laneq_f32(dst5, w0, v1, 0); dst4 = vfmaq_laneq_f32(dst4, w2, v0, 2); dst5 = vfmaq_laneq_f32(dst5, w1, v1, 1); dst4 = vfmaq_laneq_f32(dst4, w3, v0, 3); dst5 = vfmaq_laneq_f32(dst5, w2, v1, 2); dst5 = vfmaq_laneq_f32(dst5, w3, v1, 3); // dst6 / dst7 v0 = vld1q_f32(src_z + 24); dst6 = vfmaq_laneq_f32(dst6, w0, v0, 0); v1 = vld1q_f32(src_z + 28); dst6 = vfmaq_laneq_f32(dst6, w1, v0, 1); dst7 = vfmaq_laneq_f32(dst7, w0, v1, 0); dst6 = vfmaq_laneq_f32(dst6, w2, v0, 2); dst7 = vfmaq_laneq_f32(dst7, w1, v1, 1); dst6 = vfmaq_laneq_f32(dst6, w3, v0, 3); dst7 = vfmaq_laneq_f32(dst7, w2, v1, 2); dst7 = vfmaq_laneq_f32(dst7, w3, v1, 3); } vst1q_f32(dst_x + 4 * 0, dst0); vst1q_f32(dst_x + 4 * 1, dst1); vst1q_f32(dst_x + 4 * 2, dst2); vst1q_f32(dst_x + 4 * 3, dst3); vst1q_f32(dst_x + 4 * 4, dst4); vst1q_f32(dst_x + 4 * 5, dst5); vst1q_f32(dst_x + 4 * 6, dst6); vst1q_f32(dst_x + 4 * 7, dst7); } for (int dx = w8tail; dx < w4; ++dx) { const float *src_dx = src + dx * 16; float *dst_x = dst_z + dx * 16; float32x4_t dst0 = vdupq_n_f32(0.0f); float32x4_t dst1 = vdupq_n_f32(0.0f); float32x4_t dst2 = vdupq_n_f32(0.0f); float32x4_t dst3 = vdupq_n_f32(0.0f); float32x4_t w0 = vld1q_f32(weight_dz + 4 * 0); float32x4_t w1 = vld1q_f32(weight_dz + 4 * 1); float32x4_t w2 = vld1q_f32(weight_dz + 4 * 2); float32x4_t w3 = vld1q_f32(weight_dz + 4 * 3); // start // dst0 / dst1 float32x4_t v0 = vld1q_f32(src_dx); dst0 = vmulq_n_f32(w0, v0[0]); float32x4_t v1 = vld1q_f32(src_dx + 4); dst0 = vfmaq_laneq_f32(dst0, w1, v0, 1); dst1 = vmulq_n_f32(w0, v1[0]); dst0 = vfmaq_laneq_f32(dst0, w2, v0, 2); dst1 = vfmaq_laneq_f32(dst1, w1, v1, 1); dst0 = vfmaq_laneq_f32(dst0, w3, v0, 3); dst1 = vfmaq_laneq_f32(dst1, w2, v1, 2); dst1 = vfmaq_laneq_f32(dst1, w3, v1, 3); // dst2 / dst3 v0 = vld1q_f32(src_dx + 8); dst2 = vmulq_n_f32(w0, v0[0]); v1 = vld1q_f32(src_dx + 12); dst2 = vfmaq_laneq_f32(dst2, w1, v0, 1); dst3 = vmulq_n_f32(w0, v1[0]); dst2 = vfmaq_laneq_f32(dst2, w2, v0, 2); dst3 = vfmaq_laneq_f32(dst3, w1, v1, 1); dst2 = vfmaq_laneq_f32(dst2, w3, v0, 3); dst3 = vfmaq_laneq_f32(dst3, w2, v1, 2); dst3 = vfmaq_laneq_f32(dst3, w3, v1, 3); for (int sz = 1; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_z_step; const float *weight_z = weight_dz + sz * 16; float32x4_t w0 = vld1q_f32(weight_z + 4 * 0); float32x4_t w1 = vld1q_f32(weight_z + 4 * 1); float32x4_t w2 = vld1q_f32(weight_z + 4 * 2); float32x4_t w3 = vld1q_f32(weight_z + 4 * 3); // dst0 / dst1 float32x4_t v0 = vld1q_f32(src_z); dst0 = vfmaq_laneq_f32(dst0, w0, v0, 0); float32x4_t v1 = vld1q_f32(src_z + 4); dst0 = vfmaq_laneq_f32(dst0, w1, v0, 1); dst1 = vfmaq_laneq_f32(dst1, w0, v1, 0); dst0 = vfmaq_laneq_f32(dst0, w2, v0, 2); dst1 = vfmaq_laneq_f32(dst1, w1, v1, 1); dst0 = vfmaq_laneq_f32(dst0, w3, v0, 3); dst1 = vfmaq_laneq_f32(dst1, w2, v1, 2); dst1 = vfmaq_laneq_f32(dst1, w3, v1, 3); // dst2 / dst3 v0 = vld1q_f32(src_z + 8); dst2 = vfmaq_laneq_f32(dst2, w0, v0, 0); v1 = vld1q_f32(src_z + 12); dst2 = vfmaq_laneq_f32(dst2, w1, v0, 1); dst3 = vfmaq_laneq_f32(dst3, w0, v1, 0); dst2 = vfmaq_laneq_f32(dst2, w2, v0, 2); dst3 = vfmaq_laneq_f32(dst3, w1, v1, 1); dst2 = vfmaq_laneq_f32(dst2, w3, v0, 3); dst3 = vfmaq_laneq_f32(dst3, w2, v1, 2); dst3 = vfmaq_laneq_f32(dst3, w3, v1, 3); } vst1q_f32(dst_x + 4 * 0, dst0); vst1q_f32(dst_x + 4 * 1, dst1); vst1q_f32(dst_x + 4 * 2, dst2); vst1q_f32(dst_x + 4 * 3, dst3); } for (int dx = w4tail; dx < width; ++dx) { float *dst_x = dst_z + dx * 4; const float *src_dx = src + dx * 4; float32x4_t dst0 = vdupq_n_f32(0.0f); float32x4_t dst1 = vdupq_n_f32(0.0f); float32x4_t w0 = vld1q_f32(weight_dz + 4 * 0); float32x4_t w1 = vld1q_f32(weight_dz + 4 * 1); float32x4_t w2 = vld1q_f32(weight_dz + 4 * 2); float32x4_t w3 = vld1q_f32(weight_dz + 4 * 3); float32x4_t v0 = vld1q_f32(src_dx); dst0 = vmulq_n_f32(w0, v0[0]); dst1 = vmulq_n_f32(w1, v0[1]); for (int sz = 1; sz < src_depth_quad; ++sz) { dst0 = vfmaq_laneq_f32(dst0, w2, v0, 2); dst1 = vfmaq_laneq_f32(dst1, w3, v0, 3); const float *src_z = src_dx + sz * src_z_step; const float *weight_z = weight_dz + sz * 16; w0 = vld1q_f32(weight_z + 4 * 0); w1 = vld1q_f32(weight_z + 4 * 1); w2 = vld1q_f32(weight_z + 4 * 2); w3 = vld1q_f32(weight_z + 4 * 3); v0 = vld1q_f32(src_z); dst0 = vfmaq_laneq_f32(dst0, w0, v0, 0); dst1 = vfmaq_laneq_f32(dst1, w1, v0, 1); } dst0 = vfmaq_laneq_f32(dst0, w2, v0, 2); dst1 = vfmaq_laneq_f32(dst1, w3, v0, 3); dst0 = vaddq_f32(dst0, dst1); vst1q_f32(dst_x, dst0); } } #else // TODO int src_depth_step = 4 * width; int wC4 = width / 4; int w4End = wC4 * 4; for (int dz = 0; dz < dst_depth_quad; ++dz) { float *dst_z = dst + dz * dst_step; float *weight_dz = weight + dz * (src_depth_quad * 16 + weight_depth_offset); for (int dx = 0; dx < wC4; ++dx) { float *dst_x = dst_z + dx * 4 * 4; float32x4_t dst0 = vdupq_n_f32(0.0f); float32x4_t dst1 = vdupq_n_f32(0.0f); float32x4_t dst2 = vdupq_n_f32(0.0f); float32x4_t dst3 = vdupq_n_f32(0.0f); const float *src_dx = src + 4 * dx * 4; for (int sz = 0; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_depth_step; const float *weight_z = weight_dz + sz * 16; float32x4_t w0 = vld1q_f32(weight_z + 4 * 0); float32x4_t w1 = vld1q_f32(weight_z + 4 * 1); float32x4_t w2 = vld1q_f32(weight_z + 4 * 2); float32x4_t w3 = vld1q_f32(weight_z + 4 * 3); #define COMPUTE(v) \ { \ float32x4_t srcValue = vld1q_f32(src_z + 4 * v); \ float32x4_t s0 = vdupq_n_f32(srcValue[0]); \ float32x4_t s1 = vdupq_n_f32(srcValue[1]); \ float32x4_t s2 = vdupq_n_f32(srcValue[2]); \ float32x4_t s3 = vdupq_n_f32(srcValue[3]); \ float32x4_t sw0 = vmulq_f32(s0, w0); \ float32x4_t sw1 = vmulq_f32(s1, w1); \ float32x4_t sw2 = vmulq_f32(s2, w2); \ float32x4_t sw3 = vmulq_f32(s3, w3); \ dst##v = vaddq_f32(dst##v, sw0); \ dst##v = vaddq_f32(dst##v, sw1); \ dst##v = vaddq_f32(dst##v, sw2); \ dst##v = vaddq_f32(dst##v, sw3); \ } COMPUTE(0); COMPUTE(1); COMPUTE(2); COMPUTE(3); } vst1q_f32(dst_x + 4 * 0, dst0); vst1q_f32(dst_x + 4 * 1, dst1); vst1q_f32(dst_x + 4 * 2, dst2); vst1q_f32(dst_x + 4 * 3, dst3); } for (int dx = w4End; dx < width; ++dx) { float *dst_x = dst_z + dx * 4; float32x4_t dstValue = vdupq_n_f32(0.0f); const float *src_dx = src + 4 * dx; for (int sz = 0; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_depth_step; const float *weight_z = weight_dz + sz * 16; float32x4_t w0 = vld1q_f32(weight_z + 4 * 0); float32x4_t w1 = vld1q_f32(weight_z + 4 * 1); float32x4_t w2 = vld1q_f32(weight_z + 4 * 2); float32x4_t w3 = vld1q_f32(weight_z + 4 * 3); float32x4_t srcValue = vld1q_f32(src_z); float32x4_t s0 = vdupq_n_f32(srcValue[0]); float32x4_t s1 = vdupq_n_f32(srcValue[1]); float32x4_t s2 = vdupq_n_f32(srcValue[2]); float32x4_t s3 = vdupq_n_f32(srcValue[3]); float32x4_t sw0 = vmulq_f32(s0, w0); float32x4_t sw1 = vmulq_f32(s1, w1); float32x4_t sw2 = vmulq_f32(s2, w2); float32x4_t sw3 = vmulq_f32(s3, w3); dstValue = vaddq_f32(dstValue, sw0); dstValue = vaddq_f32(dstValue, sw1); dstValue = vaddq_f32(dstValue, sw2); dstValue = vaddq_f32(dstValue, sw3); } vst1q_f32(dst_x, dstValue); } } #endif // __aarch64__ #else int dx, sz, fx, fy, dz; size_t src_depth_step = 4 * width; for (dz = 0; dz < dst_depth_quad; ++dz) { float *dst_z = dst + dz * dst_step; float *weight_dz = (float *)weight + dz * (src_depth_quad * 16 + weight_depth_offset); for (dx = 0; dx < width; ++dx) { float *dst_x = dst_z + dx * 4; dst_x[0] = 0.0f; dst_x[1] = 0.0f; dst_x[2] = 0.0f; dst_x[3] = 0.0f; const float *src_dx = src + 4 * dx; for (sz = 0; sz < src_depth_quad; ++sz) { const float *src_z = src_dx + sz * src_depth_step; const float *weight_z = weight_dz + sz * 16; for (int i = 0; i < 4; ++i) { for (int j = 0; j < 4; ++j) { dst_x[j] += src_z[i] * weight_z[4 * i + j]; } } } } } #endif } static void bcnn_gemm_kernel4x4_tiled(float *dst_batch, const float *src, const float *weight, size_t src_depth_quad, size_t dst_step, size_t dst_depth_quad, size_t weight_depth_offset) { bcnn_gemm_kernel4x4(dst_batch, src, weight, src_depth_quad, dst_step, dst_depth_quad, CONV_TILED, weight_depth_offset); } //#endif void bcnn_conv3x3s1_kernel(float *src, int src_w, int src_h, int src_c, float *dst, int dst_w, int dst_h, int dst_c, int batch_size, int pad, float *weights, float *scales, float *biases, float *slopes, float *workspace, int workspace_sz, int post_func, int num_threads) { int src_c4 = bh_div_up(src_c, 4); int dst_c4 = bh_div_up(dst_c, 4); int dst_w2 = bh_div_up(dst_w, 2); int dst_h2 = bh_div_up(dst_h, 2); int workspace_thread_stride = workspace_sz / num_threads; bcnn_post_conv_nc4hw4_func post_function = bcnn_post_conv_nc4hw4_lut[post_func]; for (int b = 0; b < batch_size; ++b) { float *src_batch = src + src_w * src_h * src_c4 * 4 * b; float *dst_batch = dst + dst_w * dst_h * dst_c4 * 4 * b; int dst_area4 = dst_h2 * dst_w2; int num_tiles = bh_div_up(dst_area4, CONV_TILED); num_threads = bh_min(num_threads, num_tiles); float *weight = weights; float *bias = biases; #pragma omp parallel for num_threads(num_threads) for (int thread_id = 0; thread_id < num_threads; thread_id++) { float *src_thread = workspace + thread_id * workspace_thread_stride; /*fprintf(stderr, "num_threads %d stride %d\n", num_threads, workspace_thread_stride);*/ for (int tid = (int)thread_id; tid < num_tiles; tid += num_threads) { int x_tile = (int)tid * CONV_TILED; int xr = dst_area4 - x_tile; int xc = xr > CONV_TILED ? CONV_TILED : xr; float *dst_block = src_thread + xc * CONV3x3_SRC_BLOCK * (src_c4 + dst_c4); float *dst_thread = src_thread + xc * CONV3x3_SRC_BLOCK * src_c4; // bh_timer t = {0}; // bh_timer_start(&t); for (int xi = 0; xi < xc; ++xi) { int index = x_tile + xi; float *dst_xi = src_thread + 4 * xi; int w_idx = index % dst_w2; int h_idx = index / dst_w2; int src_x = w_idx * 2 - pad; int src_y = h_idx * 2 - pad; int sy = bh_max(0, src_y) - src_y; int ey = bh_min(src_y + 4, src_h) - src_y; int sx = bh_max(0, src_x) - src_x; int ex = bh_min(src_x + 4, src_w) - src_x; float *src_start = src_batch + (src_x + src_y * src_w) * 4; for (int z = 0; z < src_c4; ++z) { memset(dst_block, 0, CONV3x3_SRC_BLOCK * sizeof(float)); float *dst_start = dst_xi + z * 4 * xc; float *src_z = src_start + z * 4 * src_w * src_h; if (ex > sx) { // Extract One Block for (int yy = sy; yy < ey; ++yy) { float *dst_yy = dst_block + yy * 16; float *src_yy = src_z + 4 * src_w * yy; memcpy(dst_yy + 4 * sx, src_yy + sx * 4, 4 * (ex - sx) * sizeof(float)); } } // Transform bcnn_conv3x3_convert_src(dst_block, dst_start, 4 * xc * src_c4); } } // bh_timer_stop(&t); // fprintf(stderr, "conv3x3 src %f\n", bh_timer_get_msec(&t)); // bh_timer_start(&t); if (xc == CONV_TILED) { for (int i = 0; i < CONV3x3_BLOCK_UNIT * CONV3x3_BLOCK_UNIT; ++i) { bcnn_gemm_kernel4x4_tiled( dst_thread + i * dst_c4 * 4 * xc, src_thread + i * src_c4 * 4 * xc, weight + i * 16 * src_c4 * dst_c4, src_c4, xc * 4, dst_c4, 0); } } else { for (int i = 0; i < CONV3x3_BLOCK_UNIT * CONV3x3_BLOCK_UNIT; ++i) { bcnn_gemm_kernel4x4(dst_thread + (i * dst_c4) * xc * 4, src_thread + i * src_c4 * 4 * xc, weight + (i * dst_c4) * src_c4 * 16, src_c4, xc * 4, dst_c4, xc, 0); } } // bh_timer_stop(&t); // fprintf(stderr, "conv3x3 gemm %f\n", bh_timer_get_msec(&t)); // dst for (int xi = 0; xi < xc; ++xi) { int index = x_tile + xi; float *src_xi = dst_thread + 4 * xi; int w_idx = index % dst_w2; int h_idx = index / dst_w2; int dst_x = w_idx * 2; int dst_y = h_idx * 2; float *dst_batch_xi = dst_batch + 4 * (dst_x + dst_y * dst_w); for (int z = 0; z < dst_c4; ++z) { float *src_z = src_xi + z * xc * 4; float *dst_z = dst_batch_xi + z * dst_w * dst_h * 4; bcnn_conv3x3_convert_dst(src_z, dst_block, dst_c4 * 4 * xc); // bias addition and relu float *bias_z = bias + 4 * z; float *scales_z = scales + 4 * z; float *slopes_z = slopes + 4 * z; post_function(dst_block, dst_block, bias_z, scales_z, slopes_z, 4, 1); bv_float4_store(bv_float4_load(dst_block), dst_z); if (w_idx * 2 + 1 < dst_w) { bv_float4_store(bv_float4_load(dst_block + 4), dst_z + 4); } if (h_idx * 2 + 1 < dst_h) { bv_float4_store(bv_float4_load(dst_block + 8), dst_z + dst_w * 4); if (w_idx * 2 + 1 < dst_w) { bv_float4_store(bv_float4_load(dst_block + 12), dst_z + dst_w * 4 + 4); } } } } // bh_timer_stop(&t); // fprintf(stderr, "conv3x3 dst %f\n", bh_timer_get_msec(&t)); } } } return; } // General Matrix-Matrix multiplication // ldb n // _________ // | | // | B | k // | | // ________|______ | // | | | // m| | | m // | A | C | // |_______|_______| // lda k ldc n // // This implementation follows the Blis micro-kernel algorithm // Reference: BLIS: A Framework for Rapidly Instantiating BLAS Functionality static int equal(float a, float b) { const float EPSILON = 1e-5; if (fabsf(a - b) < EPSILON) { return 1; } return 0; } static void sgemm_nn_pack_MRxk8(int k, const float *A, int inc_row_A, int inc_col_A, float *buffer, int mr) { int j, a2 = inc_row_A, a3 = 2 * inc_row_A, a4 = 3 * inc_row_A; int a5 = 4 * inc_row_A; int a6 = 5 * inc_row_A; int a7 = 6 * inc_row_A; int a8 = 7 * inc_row_A; for (j = 0; j < k; ++j) { buffer[0] = A[0]; buffer[1] = A[a2]; buffer[2] = A[a3]; buffer[3] = A[a4]; buffer[4] = A[a5]; buffer[5] = A[a6]; buffer[6] = A[a7]; buffer[7] = A[a8]; A += 1; buffer += mr; } } static void sgemm_nn_pack_MRxk4(int k, const float *A, int inc_row_A, int inc_col_A, float *buffer, int mr) { int j, a2 = inc_row_A, a3 = 2 * inc_row_A, a4 = 3 * inc_row_A; for (j = 0; j < k; ++j) { buffer[0] = A[0]; buffer[1] = A[a2]; buffer[2] = A[a3]; buffer[3] = A[a4]; A += 1; buffer += mr; } } static void sgemm_nn_pack_A(int mc, int kc, const float *A, int inc_row_A, int inc_col_A, float *buffer, int mr, int num_threads) { int mp = mc / mr; int _mr = mc % mr; int tmp1 = kc * mr; int tmp2 = mr * inc_row_A; #pragma omp parallel for num_threads(num_threads) for (int i = 0; i < mp; ++i) { #ifdef BCNN_USE_NEON #if (defined(__aarch64__)) sgemm_nn_pack_MRxk8(kc, A + tmp2 * i, inc_row_A, inc_col_A, buffer + tmp1 * i, mr); #else sgemm_nn_pack_MRxk4(kc, A + tmp2 * i, inc_row_A, inc_col_A, buffer + tmp1 * i, mr); #endif // __aarch64__ #else sgemm_nn_pack_MRxk8(kc, A + tmp2 * i, inc_row_A, inc_col_A, buffer + tmp1 * i, mr); #endif } A += (tmp2 * mp); buffer += (tmp1 * mp); if (_mr > 0) { for (int j = 0; j < kc; ++j) { for (int i = 0; i < _mr; ++i) { buffer[i] = A[i * inc_row_A]; } for (int i = _mr; i < mr; ++i) { buffer[i] = 0.0; } A += 1; buffer += mr; } } } static void sgemm_pack_A(int mc, int kc, const float *A, int inc_row_A, int inc_col_A, float *p, int mr) { int j, l, i0, i, nu; int mp = (mc + mr - 1) / mr; for (j = 0; j < kc; ++j) { for (l = 0; l < mp; ++l) { for (i0 = 0; i0 < mr; ++i0) { i = l * mr + i0; nu = l * mr * kc + j * mr + i0; p[nu] = (i < mc) ? A[i * inc_row_A + j * inc_col_A] : 0; } } } } static void sgemm_pack_B(int kc, int nc, const float *B, int inc_row_B, int inc_col_B, float *p, int nr) { int i, l, j0; const int np = (nc + nr - 1) / nr; for (l = 0; l < np; ++l) { for (i = 0; i < kc; ++i) { for (j0 = 0; j0 < nr; ++j0) { int j = l * nr + j0; int nu = l * nr * kc + i * nr + j0; p[nu] = (j < nc) ? B[i * inc_row_B + j * inc_col_B] : 0; } } } } static void sgemm_nn_pack_kxNR(int k, const float *B, int inc_row_B, int inc_col_B, float *buffer, int nr) { int i, j; for (i = 0; i < k; ++i) { for (j = 0; j < nr; ++j) { buffer[j] = B[j]; } B += inc_row_B; buffer += nr; } } static void sgemm_nn_pack_B(int kc, int nc, const float *B, int inc_row_B, int inc_col_B, float *buffer, int nr, int num_threads) { int np = nc / nr; int _nr = nc % nr; int tmp1 = kc * nr; #pragma omp parallel for num_threads(num_threads) for (int j = 0; j < np; ++j) { sgemm_nn_pack_kxNR(kc, B + nr * j, inc_row_B, inc_col_B, buffer + tmp1 * j, nr); } B += (nr * np); buffer += (tmp1 * np); if (_nr > 0) { for (int i = 0; i < kc; ++i) { for (int j = 0; j < _nr; ++j) { buffer[j] = B[j]; } for (int j = _nr; j < nr; ++j) { buffer[j] = 0.0; } buffer += nr; B += inc_row_B; } } } static void sgemm_ukernel(int kc, float alpha, const float *A, const float *B, float beta, float *C, int inc_row_C, int inc_col_C, int mr, int nr, float *AB0) { float AB[MR * NR] __attribute__((aligned(32))); #if (defined(BCNN_USE_AVX)) __m256 abv0 = _mm256_setzero_ps(); __m256 abv1 = _mm256_setzero_ps(); __m256 abv2 = _mm256_setzero_ps(); __m256 abv3 = _mm256_setzero_ps(); __m256 abv4 = _mm256_setzero_ps(); __m256 abv5 = _mm256_setzero_ps(); __m256 abv6 = _mm256_setzero_ps(); __m256 abv7 = _mm256_setzero_ps(); __m256 av; for (int l = 0; l < kc; ++l) { av = _mm256_load_ps(A); abv0 = _mm256_add_ps(abv0, _mm256_mul_ps(_mm256_broadcast_ss(B), av)); abv1 = _mm256_add_ps(abv1, _mm256_mul_ps(_mm256_broadcast_ss(B + 1), av)); abv2 = _mm256_add_ps(abv2, _mm256_mul_ps(_mm256_broadcast_ss(B + 2), av)); abv3 = _mm256_add_ps(abv3, _mm256_mul_ps(_mm256_broadcast_ss(B + 3), av)); abv4 = _mm256_add_ps(abv4, _mm256_mul_ps(_mm256_broadcast_ss(B + 4), av)); abv5 = _mm256_add_ps(abv5, _mm256_mul_ps(_mm256_broadcast_ss(B + 5), av)); abv6 = _mm256_add_ps(abv6, _mm256_mul_ps(_mm256_broadcast_ss(B + 6), av)); abv7 = _mm256_add_ps(abv7, _mm256_mul_ps(_mm256_broadcast_ss(B + 7), av)); A += mr; B += nr; } _mm256_store_ps(AB + 0, abv0); _mm256_store_ps(AB + 8, abv1); _mm256_store_ps(AB + 16, abv2); _mm256_store_ps(AB + 24, abv3); _mm256_store_ps(AB + 32, abv4); _mm256_store_ps(AB + 40, abv5); _mm256_store_ps(AB + 48, abv6); _mm256_store_ps(AB + 56, abv7); #elif (defined(BCNN_USE_NEON)) #if (defined(__aarch64__)) float32x4_t av0, av1, bv0, bv1; float32x4_t abv0, abv1, abv2, abv3, abv4, abv5, abv6, abv7, abv8, abv9, abv10, abv11, abv12, abv13, abv14, abv15; abv0 = vdupq_n_f32(0.0f); abv1 = vdupq_n_f32(0.0f); abv2 = vdupq_n_f32(0.0f); abv3 = vdupq_n_f32(0.0f); abv4 = vdupq_n_f32(0.0f); abv5 = vdupq_n_f32(0.0f); abv6 = vdupq_n_f32(0.0f); abv7 = vdupq_n_f32(0.0f); abv8 = vdupq_n_f32(0.0f); abv9 = vdupq_n_f32(0.0f); abv10 = vdupq_n_f32(0.0f); abv11 = vdupq_n_f32(0.0f); abv12 = vdupq_n_f32(0.0f); abv13 = vdupq_n_f32(0.0f); abv14 = vdupq_n_f32(0.0f); abv15 = vdupq_n_f32(0.0f); for (int p = 0; p < kc; ++p) { av0 = vld1q_f32(A); av1 = vld1q_f32(A + 4); bv0 = vld1q_f32(B); bv1 = vld1q_f32(B + 4); abv0 = vfmaq_laneq_f32(abv0, av0, bv0, 0); abv1 = vfmaq_laneq_f32(abv1, av1, bv0, 0); abv2 = vfmaq_laneq_f32(abv2, av0, bv0, 1); abv3 = vfmaq_laneq_f32(abv3, av1, bv0, 1); abv4 = vfmaq_laneq_f32(abv4, av0, bv0, 2); abv5 = vfmaq_laneq_f32(abv5, av1, bv0, 2); abv6 = vfmaq_laneq_f32(abv6, av0, bv0, 3); abv7 = vfmaq_laneq_f32(abv7, av1, bv0, 3); abv8 = vfmaq_laneq_f32(abv8, av0, bv1, 0); abv9 = vfmaq_laneq_f32(abv9, av1, bv1, 0); abv10 = vfmaq_laneq_f32(abv10, av0, bv1, 1); abv11 = vfmaq_laneq_f32(abv11, av1, bv1, 1); abv12 = vfmaq_laneq_f32(abv12, av0, bv1, 2); abv13 = vfmaq_laneq_f32(abv13, av1, bv1, 2); abv14 = vfmaq_laneq_f32(abv14, av0, bv1, 3); abv15 = vfmaq_laneq_f32(abv15, av1, bv1, 3); B += nr; A += mr; } vst1q_f32(AB, abv0); vst1q_f32(AB + 4, abv1); vst1q_f32(AB + 8, abv2); vst1q_f32(AB + 12, abv3); vst1q_f32(AB + 16, abv4); vst1q_f32(AB + 20, abv5); vst1q_f32(AB + 24, abv6); vst1q_f32(AB + 28, abv7); vst1q_f32(AB + 32, abv8); vst1q_f32(AB + 36, abv9); vst1q_f32(AB + 40, abv10); vst1q_f32(AB + 44, abv11); vst1q_f32(AB + 48, abv12); vst1q_f32(AB + 52, abv13); vst1q_f32(AB + 56, abv14); vst1q_f32(AB + 60, abv15); #else float32x4_t abv0 = vdupq_n_f32(0.0f); float32x4_t abv1 = vdupq_n_f32(0.0f); float32x4_t abv2 = vdupq_n_f32(0.0f); float32x4_t abv3 = vdupq_n_f32(0.0f); float32x4_t av; float32x4_t bv; float32x2_t bv01; float32x2_t bv23; for (int p = 0; p < kc; ++p) { av = vld1q_f32(A); bv = vld1q_f32(B); bv01 = vget_low_f32(bv); abv0 = vmlaq_lane_f32(abv0, av, bv01, 0); abv1 = vmlaq_lane_f32(abv1, av, bv01, 1); bv23 = vget_high_f32(bv); abv2 = vmlaq_lane_f32(abv2, av, bv23, 0); abv3 = vmlaq_lane_f32(abv3, av, bv23, 1); A += nr; B += nr; } vst1q_f32(AB + 0, abv0); vst1q_f32(AB + 4, abv1); vst1q_f32(AB + 8, abv2); vst1q_f32(AB + 12, abv3); #endif // __aarch64__ #else for (int i = 0; i < nr * nr; ++i) { AB[i] = 0.0f; } for (int l = 0; l < kc; ++l) { for (int j = 0; j < nr; ++j) { for (int i = 0; i < mr; ++i) { AB[i + j * mr] += A[i] * B[j]; } } A += mr; B += nr; } #endif if (equal(beta, 0.0)) { for (int j = 0; j < nr; ++j) { for (int i = 0; i < mr; ++i) { C[i * inc_row_C + j * inc_col_C] = 0.0; } } } else if (!equal(beta, 1.0)) { for (int j = 0; j < nr; ++j) { for (int i = 0; i < mr; ++i) { C[i * inc_row_C + j * inc_col_C] *= beta; } } } if (!equal(alpha, 1.0)) { for (int j = 0; j < nr; ++j) { for (int i = 0; i < mr; ++i) { C[i * inc_row_C + j * inc_col_C] += alpha * AB[i + j * mr]; } } } else { for (int j = 0; j < nr; ++j) { for (int i = 0; i < mr; ++i) { C[i * inc_row_C + j * inc_col_C] += AB[i + j * mr]; } } } } static void sgemm_axpy(int m, int n, float alpha, const float *X, int incRowX, int incColX, float *Y, int incRowY, int incColY) { int i, j; if (!equal(alpha, 1.0)) { for (j = 0; j < n; ++j) { for (i = 0; i < m; ++i) { Y[i * incRowY + j] += alpha * X[i + j * incColX]; } } } else { for (j = 0; j < n; ++j) { for (i = 0; i < m; ++i) { Y[i * incRowY + j] += X[i + j * incColX]; } } } } static void sgemm_scal(int m, int n, float alpha, float *X, int incRowX, int incColX) { int i, j; if (!equal(alpha, 0.0)) { for (i = 0; i < m; ++i) { for (j = 0; j < n; ++j) { X[i * incRowX + j] *= alpha; } } } else { for (i = 0; i < m; ++i) { for (j = 0; j < n; ++j) { X[i * incRowX + j] = 0.0; } } } } static void sgemm_mkernel(int mc, int nc, int kc, float alpha, float beta, float *C, int inc_row_C, int inc_col_C, float *buffer_A, float *buffer_B, float *buffer_AB, float *buffer_C, int mr, int nr, int num_threads) { int mp = (mc + mr - 1) / mr; int np = (nc + nr - 1) / nr; int _mr = mc % mr; int _nr = nc % nr; #pragma omp parallel for num_threads(num_threads) for (int j = 0; j < np; ++j) { int nrj = (j != np - 1 || _nr == 0) ? nr : _nr; #pragma omp parallel for num_threads(num_threads) for (int i = 0; i < mp; ++i) { int mri = (i != mp - 1 || _mr == 0) ? mr : _mr; if (mri == mr && nrj == nr) { sgemm_ukernel(kc, alpha, &buffer_A[i * kc * mr], &buffer_B[j * kc * nr], beta, &C[i * mr * inc_row_C + j * nr], inc_row_C, inc_col_C, mr, nr, buffer_AB); } else { float buf_c[MR * NR]; sgemm_ukernel(kc, alpha, &buffer_A[i * kc * mr], &buffer_B[j * kc * nr], 0.0, buf_c, 1, mr, mr, nr, buffer_AB); sgemm_scal(mri, nrj, beta, &C[i * mr * inc_row_C + j * nr], inc_row_C, inc_col_C); sgemm_axpy(mri, nrj, 1.0, buf_c, 1, mr, &C[i * mr * inc_row_C + j * nr], inc_row_C, inc_col_C); } } } } static void sgemm_nn(bcnn_gemm_context *ctx, int m, int n, int k, float alpha, const float *A, int inc_row_A, int inc_col_A, const float *B, int inc_row_B, int inc_col_B, float beta, float *C, int inc_row_C, int inc_col_C, int num_threads) { int mb = (m + MC - 1) / MC; int nb = (n + NC - 1) / NC; int kb = (k + KC - 1) / KC; int _mc = m % MC; int _nc = n % NC; int _kc = k % KC; if (equal(alpha, 0.0) || k == 0) { sgemm_scal(m, n, beta, C, inc_row_C, inc_col_C); return; } for (int j = 0; j < nb; ++j) { int nc = (j != nb - 1 || _nc == 0) ? NC : _nc; for (int l = 0; l < kb; ++l) { int kc = (l != kb - 1 || _kc == 0) ? KC : _kc; float _beta = (l == 0) ? beta : 1.0f; sgemm_nn_pack_B(kc, nc, &B[l * KC * inc_row_B + j * NC], inc_row_B, inc_col_B, ctx->buffer_b, NR, num_threads); for (int i = 0; i < mb; ++i) { int mc = (i != mb - 1 || _mc == 0) ? MC : _mc; sgemm_nn_pack_A(mc, kc, &A[i * MC * inc_row_A + l * KC], inc_row_A, inc_col_A, ctx->buffer_a, MR, num_threads); sgemm_mkernel( mc, nc, kc, alpha, _beta, &C[i * MC * inc_row_C + j * NC], inc_row_C, inc_col_C, ctx->buffer_a, ctx->buffer_b, ctx->buffer_ab, ctx->buffer_c, MR, NR, num_threads); } } } } static void sgemm(bcnn_gemm_context *ctx, int m, int n, int k, float alpha, const float *A, int inc_row_A, int inc_col_A, const float *B, int inc_row_B, int inc_col_B, float beta, float *C, int inc_row_C, int inc_col_C, int num_threads) { int mb = (m + MC - 1) / MC; int nb = (n + NC - 1) / NC; int kb = (k + KC - 1) / KC; int _mc = m % MC; int _nc = n % NC; int _kc = k % KC; if (equal(alpha, 0.0) || k == 0) { sgemm_scal(m, n, beta, C, inc_row_C, inc_col_C); return; } for (int j = 0; j < nb; ++j) { int nc = (j != nb - 1 || _nc == 0) ? NC : _nc; for (int l = 0; l < kb; ++l) { int kc = (l != kb - 1 || _kc == 0) ? KC : _kc; float _beta = (l == 0) ? beta : 1.0f; sgemm_pack_B(kc, nc, &B[l * KC * inc_row_B + j * NC], inc_row_B, inc_col_B, ctx->buffer_b, NR); for (int i = 0; i < mb; ++i) { int mc = (i != mb - 1 || _mc == 0) ? MC : _mc; sgemm_pack_A(mc, kc, &A[i * MC * inc_row_A + l * KC], inc_row_A, inc_col_A, ctx->buffer_a, MR); sgemm_mkernel( mc, nc, kc, alpha, _beta, &C[i * MC * inc_row_C + j * NC], inc_row_C, inc_col_C, ctx->buffer_a, ctx->buffer_b, ctx->buffer_ab, ctx->buffer_c, MR, NR, num_threads); } } } } int bcnn_gemm(bcnn_gemm_context *ctx, int trans_a, int trans_b, int m, int n, int k, float alpha, float *A, int lda, float *B, int ldb, float beta, float *C, int ldc, int num_threads) { #if (defined(__aarch64__)) // Switch A and B as OpenBlas is column major openblas_sgemm(ctx, trans_b, trans_a, n, m, k, alpha, B, ldb, A, lda, beta, C, ldc); #else int inc_row_A = (!trans_a) ? lda : 1; int inc_col_A = (!trans_a) ? 1 : lda; int inc_row_B = (!trans_b) ? ldb : 1; int inc_col_B = (!trans_b) ? 1 : ldb; if (!trans_a && !trans_b) { sgemm_nn(ctx, m, n, k, alpha, A, inc_row_A, inc_col_A, B, inc_row_B, inc_col_B, beta, C, ldc, 1, num_threads); } else { sgemm(ctx, m, n, k, alpha, A, inc_row_A, inc_col_A, B, inc_row_B, inc_col_B, beta, C, ldc, 1, num_threads); } #endif return 0; }
fc_kernel_int8_arm.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include <arm_neon.h> #include "fc_kernel_int8_arm.h" void gemv_1x8_int8(int32_t *biases, const float *scales, int8_t *inp, int8_t *kernel, long kernel_size, int8_t *output) { int8x8_t input; int8x16_t weight_0_1, weight_2_3, weight_4_5, weight_6_7; int16x8_t weight0_16, weight1_16, weight2_16, weight3_16; int16x8_t weight4_16, weight5_16, weight6_16, weight7_16; int32x4_t res = {0, 0, 0, 0}; int32x4_t res1 = {0, 0, 0, 0}; int8_t *input_ptr = inp; int8_t *weight_ptr = kernel; int remainw = (kernel_size >> 3) << 3; for (int i = 0; i < remainw; i = i + 8) { input = vld1_s8(input_ptr); weight_0_1 = vld1q_s8(weight_ptr); weight_2_3 = vld1q_s8(weight_ptr + 16); weight_4_5 = vld1q_s8(weight_ptr + 32); weight_6_7 = vld1q_s8(weight_ptr + 48); weight0_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 0)), vget_low_s8(weight_0_1)); weight1_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 1)), vget_high_s8(weight_0_1)); weight2_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 2)), vget_low_s8(weight_2_3)); weight3_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 3)), vget_high_s8(weight_2_3)); weight4_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 4)), vget_low_s8(weight_4_5)); weight5_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 5)), vget_high_s8(weight_4_5)); weight6_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 6)), vget_low_s8(weight_6_7)); weight7_16 = vmull_s8(vdup_n_s8(vget_lane_s8(input, 7)), vget_high_s8(weight_6_7)); res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight0_16), vget_low_s16(weight1_16))); res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight2_16), vget_low_s16(weight3_16))); res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight4_16), vget_low_s16(weight5_16))); res = vaddq_s32(res, vaddl_s16(vget_low_s16(weight6_16), vget_low_s16(weight7_16))); res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight0_16), vget_high_s16(weight1_16))); res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight2_16), vget_high_s16(weight3_16))); res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight4_16), vget_high_s16(weight5_16))); res1 = vaddq_s32(res1, vaddl_s16(vget_high_s16(weight6_16), vget_high_s16(weight7_16))); input_ptr += 8; weight_ptr += 64; } for (int i = remainw; i < kernel_size; ++i) { weight0_16 = vmull_s8(vdup_n_s8(input_ptr[0]), vld1_s8(weight_ptr)); res = vaddq_s32(vmovl_s16(vget_low_s16(weight0_16)), res); res1 = vaddq_s32(vmovl_s16(vget_high_s16(weight0_16)), res1); input_ptr += 1; weight_ptr += 8; } if (biases) { int32x4_t bias = vld1q_s32(biases); int32x4_t bias1 = vld1q_s32(biases + 4); res = vaddq_s32(res,bias); res1 = vaddq_s32(res1,bias1); } float32x4_t res_f = vcvtq_f32_s32(res); float32x4_t res1_f = vcvtq_f32_s32(res1); float32x4_t scale = vld1q_f32(scales); float32x4_t scale_1 = vld1q_f32(scales + 4); res_f = vmulq_f32(res_f, scale); res1_f = vmulq_f32(res1_f, scale_1); res_f = vaddq_f32(res_f,vdupq_n_f32(0.5f)); res1_f = vaddq_f32(res1_f,vdupq_n_f32(0.5f)); res = vcvtq_s32_f32(res_f); res1 = vcvtq_s32_f32(res1_f); int16x4_t res_16 = vmovn_s32(res); int16x4_t res1_16 = vmovn_s32(res1); int8x8_t result = vmovn_s16(vcombine_s16(res_16, res1_16)); int8x8_t _m127 = vdup_n_s8(127); int8x8_t _m_127 = vdup_n_s8(-127); result = vmax_s8(_m_127, result); result = vmin_s8(_m127, result); vst1_s8(output, result); } void gemv_1x2_int8(const int32_t *biases, const float *scales, int8_t *inp, int8_t *kernel, long kernel_size, int8_t *output) { int8_t *input_ptr = inp; int8_t *weight_ptr = kernel; int remainw = (kernel_size << 3) >> 3; int8x8x2_t weight; int8x8_t input; int16x8_t out_16_0, out_16_1, out_32_0, out_32_1; int32_t sum0 = 0, sum1 = 0; for (int i = 0; i < remainw; i = i + 8) { weight = vld2_s8(weight_ptr); input = vld1_s8(input_ptr); out_16_0 = vmull_s8(weight.val[0], input); out_16_1 = vmull_s8(weight.val[1], input); out_32_0 = vpaddlq_s16(out_16_0); out_32_1 = vpaddlq_s16(out_16_1); sum0 += vgetq_lane_s32(out_32_0, 0) + vgetq_lane_s32(out_32_0, 1) + vgetq_lane_s32(out_32_0, 2) + vgetq_lane_s32(out_32_0, 3); sum1 += vgetq_lane_s32(out_32_1, 0) + vgetq_lane_s32(out_32_1, 1) + vgetq_lane_s32(out_32_1, 2) + vgetq_lane_s32(out_32_1, 3); weight_ptr += 16; input_ptr += 8; } for (int i = remainw; i < kernel_size; ++i) { sum0 += weight_ptr[0] * input_ptr[0]; sum1 += weight_ptr[1] * input_ptr[0]; input_ptr++; weight_ptr += 2; } if (biases) { sum0 += biases[0]; sum1 += biases[1]; } int data_i32_0 = round(sum0 * scales[0]); if (data_i32_0 > 127) data_i32_0 = 127; else if (data_i32_0 < -127) data_i32_0 = -127; int data_i32_1 = round(sum1 * scales[1]); if (data_i32_1 > 127) data_i32_1 = 127; else if (data_i32_0 < -127) data_i32_1 = -127; output[0] = data_i32_0; output[1] = data_i32_1; } // start and end channel must be 8 aligned void gemv1x8(const int8_t *input, const int8_t *output, int8_t *weight_interleaved, const int32_t *biases, const float *scales, int kernel_size, int start_channel, int end_channel, int num_thread, int cpu_affinity) { int ch = 0; int8_t *cur_kernel, *cur_result; int32_t *cur_biases; const float *cur_scales; // #pragma omp parallel for num_threads(num_thread) for (ch = start_channel; ch < end_channel; ch += 8) { cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch); cur_result = (int8_t *) (output + ch); cur_biases = biases ? (int32_t *) (biases + ch) : NULL; cur_scales = scales + ch; gemv_1x8_int8(cur_biases, cur_scales, (int8_t *) input, cur_kernel, kernel_size, cur_result); } } // start channel must be 2 aligned void gemv1x2(const int8_t *input, int8_t *output, int8_t *weight_interleaved, const int32_t *biases, const float *scales, int kernel_size,int start_channel,int end_channel,int num_thread,int cpu_affinity) { int32_t sum; int ch = 0; int8_t *cur_kernel; int32_t *cur_biases; int8_t *cur_result; const float* cur_scales; for (ch = start_channel; ch < (end_channel & -2); ch += 2) { cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch); cur_result = (int8_t *) (output + ch); cur_biases = biases ? (int32_t *) (biases + ch) : NULL; cur_scales = scales + ch; gemv_1x2_int8(cur_biases, cur_scales, (int8_t*) input, cur_kernel, kernel_size, cur_result); } if (end_channel & 0x1) { cur_kernel = (int8_t *) (weight_interleaved + kernel_size * ch); cur_result = (int8_t *) (output + ch); sum = biases ? *(biases + ch) : 0; for (int j = 0; j < kernel_size; j++) sum = sum + input[j] * cur_kernel[j]; int data_i32_0 = round(sum * cur_scales[0]); if (data_i32_0 > 127) data_i32_0 = 127; else if (data_i32_0 < -127) data_i32_0 = -127; *cur_result = data_i32_0; } } static void interleave_kernel(const int8_t *kernel, int8_t *kernel_interleaved, int out_chan, int kernel_size) { int i, j, k; int8_t *cur_kernel[8]; int8_t *cur_kernel_interleaved; // interleave 8 kernel for (i = 0; i < (out_chan & -8); i += 8) { for (j = 0; j < 8; j++) cur_kernel[j] = (int8_t *) kernel + kernel_size * (i + j); cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i; for (k = 0; k < kernel_size; k++) for (j = 0; j < 8; j++) cur_kernel_interleaved[8 * k + j] = *(cur_kernel[j] + k); } // interleave 2 kernel for (; i < (out_chan & -2); i += 2) { for (j = 0; j < 2; j++) cur_kernel[j] = (int8_t *) kernel + kernel_size * (i + j); cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i; for (k = 0; k < kernel_size; k++) for (j = 0; j < 2; j++) cur_kernel_interleaved[2 * k + j] = *(cur_kernel[j] + k); } // copy last kernel if (out_chan & 0x1) { cur_kernel[0] = (int8_t *) kernel + kernel_size * i; cur_kernel_interleaved = (int8_t *) kernel_interleaved + kernel_size * i; for (k = 0; k < kernel_size; k++) cur_kernel_interleaved[k] = *(cur_kernel[0] + k); } return; } int int8_fc_kernel_prerun(struct ir_tensor *input_tensor, \ struct ir_tensor *filter_tensor, \ struct ir_tensor *output_tensor, \ struct fc_priv_info *priv_info, \ struct fc_param *param) { int num_output = param->num_output; int kernel_size = filter_tensor->dims[1]; int kernel_align = ((kernel_size + 1) & -2); if (!priv_info->interleave_buffer) { int mem_size = num_output * kernel_align; void *mem = sys_malloc(mem_size); priv_info->interleave_buffer = mem; priv_info->interleave_buffer_size = mem_size; } if (!priv_info->input_buffer) { int mem_size = kernel_align; void *mem = sys_malloc(mem_size); priv_info->input_buffer = mem; priv_info->input_buffer_size = mem_size; } int8_t *filter_data = (int8_t *) filter_tensor->data; interleave_kernel(filter_data, (int8_t *) priv_info->interleave_buffer, num_output, kernel_size); return 0; } int int8_fc_kernel_run(struct ir_tensor *input_tensor, \ struct ir_tensor *filter_tensor, \ struct ir_tensor *bias_tensor, \ struct ir_tensor *output_tensor, \ struct fc_priv_info *priv_info, \ struct fc_param *param, \ int num_thread, int cpu_affinity) { int out_num = param->num_output; int kernel_size = filter_tensor->dims[1]; int8_t *input = (int8_t *) input_tensor->data; int8_t *output = (int8_t *) output_tensor->data; int8_t *weight = (int8_t *) priv_info->interleave_buffer; int32_t *biases = NULL; if (bias_tensor) biases = (int32_t *) bias_tensor->data; float input_scale = input_tensor->scale; float output_scale = output_tensor->scale; float *weight_scales = filter_tensor->scale_list; float *requant_scales = (float *) malloc(out_num * sizeof(float)); for (int i = 0; i < out_num; i++) requant_scales[i] = (input_scale * weight_scales[i]) / output_scale; int out_num_8 = out_num & ~7; for (int i = 0; i < input_tensor->dims[0]; i++) { int8_t *cur_input = input + i * kernel_size; int8_t *cur_output = output + i * out_num; gemv1x8(cur_input, cur_output, weight, biases, requant_scales, kernel_size, 0, out_num_8, num_thread, cpu_affinity); if (out_num & 0x7) gemv1x2(cur_input, cur_output, weight, biases, requant_scales, kernel_size, out_num_8,out_num,num_thread, cpu_affinity); } return 0; }
GB_unop__cosh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__cosh_fc64_fc64 // op(A') function: GB_unop_tran__cosh_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = ccosh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ccosh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = ccosh (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COSH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__cosh_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ccosh (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ccosh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__cosh_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
utils.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. #pragma once #include <fcntl.h> #include <algorithm> #include <cassert> #include <cstdlib> #include <cstring> #include <fstream> #include <iostream> #include <string> #include <memory> #include <random> #include <set> #ifdef __APPLE__ #else #include <malloc.h> #endif #ifdef _WINDOWS #include <Windows.h> typedef HANDLE FileHandle; #else #include <unistd.h> typedef int FileHandle; #endif #include "logger.h" #include "cached_io.h" #include "common_includes.h" #include "windows_customizations.h" #ifdef EXEC_ENV_OLS #include "content_buf.h" #include "memory_mapped_files.h" #endif // taken from // https://github.com/Microsoft/BLAS-on-flash/blob/master/include/utils.h // round up X to the nearest multiple of Y #define ROUND_UP(X, Y) \ ((((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0)) * (Y)) #define DIV_ROUND_UP(X, Y) (((uint64_t)(X) / (Y)) + ((uint64_t)(X) % (Y) != 0)) // round down X to the nearest multiple of Y #define ROUND_DOWN(X, Y) (((uint64_t)(X) / (Y)) * (Y)) // alignment tests #define IS_ALIGNED(X, Y) ((uint64_t)(X) % (uint64_t)(Y) == 0) #define IS_512_ALIGNED(X) IS_ALIGNED(X, 512) #define IS_4096_ALIGNED(X) IS_ALIGNED(X, 4096) typedef uint64_t _u64; typedef int64_t _s64; typedef uint32_t _u32; typedef int32_t _s32; typedef uint16_t _u16; typedef int16_t _s16; typedef uint8_t _u8; typedef int8_t _s8; namespace diskann { static const size_t MAX_SIZE_OF_STREAMBUF = 2LL * 1024 * 1024 * 1024; enum Metric { L2 = 0, INNER_PRODUCT = 1, FAST_L2 = 2, PQ = 3 }; inline void alloc_aligned(void** ptr, size_t size, size_t align) { *ptr = nullptr; assert(IS_ALIGNED(size, align)); #ifndef _WINDOWS *ptr = ::aligned_alloc(align, size); #else *ptr = ::_aligned_malloc(size, align); // note the swapped arguments! #endif assert(*ptr != nullptr); } inline void aligned_free(void* ptr) { // Gopal. Must have a check here if the pointer was actually allocated by // _alloc_aligned if (ptr == nullptr) { return; } #ifndef _WINDOWS free(ptr); #else ::_aligned_free(ptr); #endif } inline void GenRandom(std::mt19937& rng, unsigned* addr, unsigned size, unsigned N) { for (unsigned i = 0; i < size; ++i) { addr[i] = rng() % (N - size); } std::sort(addr, addr + size); for (unsigned i = 1; i < size; ++i) { if (addr[i] <= addr[i - 1]) { addr[i] = addr[i - 1] + 1; } } unsigned off = rng() % N; for (unsigned i = 0; i < size; ++i) { addr[i] = (addr[i] + off) % N; } } // get_bin_metadata functions START inline void get_bin_metadata_impl(std::basic_istream<char>& reader, size_t& nrows, size_t& ncols) { int nrows_32, ncols_32; reader.read((char*) &nrows_32, sizeof(int)); reader.read((char*) &ncols_32, sizeof(int)); nrows = nrows_32; ncols = ncols_32; } #ifdef EXEC_ENV_OLS inline void get_bin_metadata(MemoryMappedFiles& files, const std::string& bin_file, size_t& nrows, size_t& ncols) { diskann::cout << "Getting metadata for file: " << bin_file << std::endl; auto fc = files.getContent(bin_file); auto cb = ContentBuf((char*) fc._content, fc._size); std::basic_istream<char> reader(&cb); get_bin_metadata_impl(reader, nrows, ncols); } #endif inline void get_bin_metadata(const std::string& bin_file, size_t& nrows, size_t& ncols) { std::ifstream reader(bin_file.c_str(), std::ios::binary); get_bin_metadata_impl(reader, nrows, ncols); } // get_bin_metadata functions END template<typename T> inline std::string getValues(T* data, size_t num) { std::stringstream stream; stream << "["; for (size_t i = 0; i < num; i++) { stream << std::to_string(data[i]) << ","; } stream << "]" << std::endl; return stream.str(); } // load_bin functions START template<typename T> inline void load_bin_impl(std::basic_istream<char>& reader, size_t actual_file_size, T*& data, size_t& npts, size_t& dim) { int npts_i32, dim_i32; reader.read((char*) &npts_i32, sizeof(int)); reader.read((char*) &dim_i32, sizeof(int)); npts = (unsigned) npts_i32; dim = (unsigned) dim_i32; diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..." << std::endl; size_t expected_actual_file_size = npts * dim * sizeof(T) + 2 * sizeof(uint32_t); if (actual_file_size != expected_actual_file_size) { std::stringstream stream; stream << "Error. File size mismatch. Actual size is " << actual_file_size << " while expected size is " << expected_actual_file_size << " npts = " << npts << " dim = " << dim << " size of <T>= " << sizeof(T) << std::endl; diskann::cout << stream.str(); throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__, __LINE__); } data = new T[npts * dim]; reader.read((char*) data, npts * dim * sizeof(T)); // diskann::cout << "Last bytes: " // << getValues<T>(data + (npts - 2) * dim, dim); // diskann::cout << "Finished reading bin file." << std::endl; } #ifdef EXEC_ENV_OLS template<typename T> inline void load_bin(MemoryMappedFiles& files, const std::string& bin_file, T*& data, size_t& npts, size_t& dim) { diskann::cout << "Reading bin file " << bin_file.c_str() << " ..." << std::endl; auto fc = files.getContent(bin_file); uint32_t t_npts, t_dim; uint32_t* contentAsIntPtr = (uint32_t*) (fc._content); t_npts = *(contentAsIntPtr); t_dim = *(contentAsIntPtr + 1); npts = t_npts; dim = t_dim; auto actual_file_size = npts * dim * sizeof(T) + 2 * sizeof(uint32_t); if (actual_file_size != fc._size) { std::stringstream stream; stream << "Error. File size mismatch. Actual size is " << fc._size << " while expected size is " << actual_file_size << " npts = " << npts << " dim = " << dim << " size of <T>= " << sizeof(T) << std::endl; diskann::cout << stream.str(); throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__, __LINE__); } data = (T*) ((char*) fc._content + 2 * sizeof(uint32_t)); // No need to copy! } #endif template<typename T> inline void load_bin(const std::string& bin_file, T*& data, size_t& npts, size_t& dim) { // OLS //_u64 read_blk_size = 64 * 1024 * 1024; // cached_ifstream reader(bin_file, read_blk_size); // size_t actual_file_size = reader.get_file_size(); // END OLS diskann::cout << "Reading bin file " << bin_file.c_str() << " ..." << std::endl; std::ifstream reader(bin_file, std::ios::binary | std::ios::ate); uint64_t fsize = reader.tellg(); reader.seekg(0); load_bin_impl<T>(reader, fsize, data, npts, dim); } // load_bin functions END inline void load_truthset(const std::string& bin_file, uint32_t*& ids, float*& dists, size_t& npts, size_t& dim) { _u64 read_blk_size = 64 * 1024 * 1024; cached_ifstream reader(bin_file, read_blk_size); diskann::cout << "Reading truthset file " << bin_file.c_str() << " ..." << std::endl; size_t actual_file_size = reader.get_file_size(); int npts_i32, dim_i32; reader.read((char*) &npts_i32, sizeof(int)); reader.read((char*) &dim_i32, sizeof(int)); npts = (unsigned) npts_i32; dim = (unsigned) dim_i32; diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << "..." << std::endl; int truthset_type = -1; // 1 means truthset has ids and distances, 2 means // only ids, -1 is error size_t expected_file_size_with_dists = 2 * npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t); if (actual_file_size == expected_file_size_with_dists) truthset_type = 1; size_t expected_file_size_just_ids = npts * dim * sizeof(uint32_t) + 2 * sizeof(uint32_t); if (actual_file_size == expected_file_size_just_ids) truthset_type = 2; if (truthset_type == -1) { std::stringstream stream; stream << "Error. File size mismatch. File should have bin format, with " "npts followed by ngt followed by npts*ngt ids and optionally " "followed by npts*ngt distance values; actual size: " << actual_file_size << ", expected: " << expected_file_size_with_dists << " or " << expected_file_size_just_ids; diskann::cout << stream.str(); throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__, __LINE__); } ids = new uint32_t[npts * dim]; reader.read((char*) ids, npts * dim * sizeof(uint32_t)); if (truthset_type == 1) { dists = new float[npts * dim]; reader.read((char*) dists, npts * dim * sizeof(float)); } } #ifdef EXEC_ENV_OLS template<typename T> inline void load_bin(MemoryMappedFiles& files, const std::string& bin_file, std::unique_ptr<T[]>& data, size_t& npts, size_t& dim) { T* ptr; load_bin<T>(files, bin_file, ptr, npts, dim); data.reset(ptr); } #endif template<typename T> inline void load_bin(const std::string& bin_file, std::unique_ptr<T[]>& data, size_t& npts, size_t& dim) { T* ptr; load_bin<T>(bin_file, ptr, npts, dim); data.reset(ptr); } template<typename T> inline void save_bin(const std::string& filename, T* data, size_t npts, size_t ndims) { std::ofstream writer(filename, std::ios::binary | std::ios::out); diskann::cout << "Writing bin: " << filename.c_str() << std::endl; int npts_i32 = (int) npts, ndims_i32 = (int) ndims; writer.write((char*) &npts_i32, sizeof(int)); writer.write((char*) &ndims_i32, sizeof(int)); diskann::cout << "bin: #pts = " << npts << ", #dims = " << ndims << ", size = " << npts * ndims * sizeof(T) + 2 * sizeof(int) << "B" << std::endl; // data = new T[npts_u64 * ndims_u64]; writer.write((char*) data, npts * ndims * sizeof(T)); writer.close(); diskann::cout << "Finished writing bin." << std::endl; } // load_aligned_bin functions START template<typename T> inline void load_aligned_bin_impl(std::basic_istream<char>& reader, size_t actual_file_size, T*& data, size_t& npts, size_t& dim, size_t& rounded_dim) { int npts_i32, dim_i32; reader.read((char*) &npts_i32, sizeof(int)); reader.read((char*) &dim_i32, sizeof(int)); npts = (unsigned) npts_i32; dim = (unsigned) dim_i32; size_t expected_actual_file_size = npts * dim * sizeof(T) + 2 * sizeof(uint32_t); if (actual_file_size != expected_actual_file_size) { std::stringstream stream; stream << "Error. File size mismatch. Actual size is " << actual_file_size << " while expected size is " << expected_actual_file_size << " npts = " << npts << " dim = " << dim << " size of <T>= " << sizeof(T) << std::endl; diskann::cout << stream.str() << std::endl; throw diskann::ANNException(stream.str(), -1, __FUNCSIG__, __FILE__, __LINE__); } rounded_dim = ROUND_UP(dim, 8); diskann::cout << "Metadata: #pts = " << npts << ", #dims = " << dim << ", aligned_dim = " << rounded_dim << "..." << std::flush; size_t allocSize = npts * rounded_dim * sizeof(T); diskann::cout << "allocating aligned memory, " << allocSize << " bytes..." << std::flush; alloc_aligned(((void**) &data), allocSize, 8 * sizeof(T)); diskann::cout << "done. Copying data..." << std::flush; for (size_t i = 0; i < npts; i++) { reader.read((char*) (data + i * rounded_dim), dim * sizeof(T)); memset(data + i * rounded_dim + dim, 0, (rounded_dim - dim) * sizeof(T)); } diskann::cout << " done." << std::endl; } #ifdef EXEC_ENV_OLS template<typename T> inline void load_aligned_bin(MemoryMappedFiles& files, const std::string& bin_file, T*& data, size_t& npts, size_t& dim, size_t& rounded_dim) { diskann::cout << "Reading bin file " << bin_file << " ..." << std::flush; FileContent fc = files.getContent(bin_file); ContentBuf buf((char*) fc._content, fc._size); std::basic_istream<char> reader(&buf); size_t actual_file_size = fc._size; load_aligned_bin_impl(reader, actual_file_size, data, npts, dim, rounded_dim); } #endif template<typename T> inline void load_aligned_bin(const std::string& bin_file, T*& data, size_t& npts, size_t& dim, size_t& rounded_dim) { diskann::cout << "Reading bin file " << bin_file << " ..." << std::flush; // START OLS //_u64 read_blk_size = 64 * 1024 * 1024; // cached_ifstream reader(bin_file, read_blk_size); // size_t actual_file_size = reader.get_file_size(); // END OLS std::ifstream reader(bin_file, std::ios::binary | std::ios::ate); uint64_t fsize = reader.tellg(); reader.seekg(0); load_aligned_bin_impl(reader, fsize, data, npts, dim, rounded_dim); } template<typename InType, typename OutType> void convert_types(const InType* srcmat, OutType* destmat, size_t npts, size_t dim) { #pragma omp parallel for schedule(static, 65536) for (int64_t i = 0; i < (_s64) npts; i++) { for (uint64_t j = 0; j < dim; j++) { destmat[i * dim + j] = (OutType) srcmat[i * dim + j]; } } } // plain saves data as npts X ndims array into filename template<typename T> void save_Tvecs(const char* filename, T* data, size_t npts, size_t ndims) { std::string fname(filename); // create cached ofstream with 64MB cache cached_ofstream writer(fname, 64 * 1048576); unsigned dims_u32 = (unsigned) ndims; // start writing for (uint64_t i = 0; i < npts; i++) { // write dims in u32 writer.write((char*) &dims_u32, sizeof(unsigned)); // get cur point in data T* cur_pt = data + i * ndims; writer.write((char*) cur_pt, ndims * sizeof(T)); } } // NOTE :: good efficiency when total_vec_size is integral multiple of 64 inline void prefetch_vector(const char* vec, size_t vecsize) { size_t max_prefetch_size = (vecsize / 64) * 64; for (size_t d = 0; d < max_prefetch_size; d += 64) _mm_prefetch((const char*) vec + d, _MM_HINT_T0); } // NOTE :: good efficiency when total_vec_size is integral multiple of 64 inline void prefetch_vector_l2(const char* vec, size_t vecsize) { size_t max_prefetch_size = (vecsize / 64) * 64; for (size_t d = 0; d < max_prefetch_size; d += 64) _mm_prefetch((const char*) vec + d, _MM_HINT_T1); } }; // namespace diskann struct PivotContainer { PivotContainer() = default; PivotContainer(size_t pivo_id, float pivo_dist) : piv_id{pivo_id}, piv_dist{pivo_dist} { } bool operator<(const PivotContainer& p) const { return p.piv_dist < piv_dist; } bool operator>(const PivotContainer& p) const { return p.piv_dist > piv_dist; } size_t piv_id; float piv_dist; }; inline bool file_exists(const std::string& name) { struct stat buffer; auto val = stat(name.c_str(), &buffer); diskann::cout << " Stat(" << name.c_str() << ") returned: " << val << std::endl; return (val == 0); } inline _u64 get_file_size(const std::string& fname) { std::ifstream reader(fname, std::ios::binary | std::ios::ate); if (!reader.fail() && reader.is_open()) { _u64 end_pos = reader.tellg(); diskann::cout << " Tellg: " << reader.tellg() << " as u64: " << end_pos << std::endl; reader.close(); return end_pos; } else { diskann::cout << "Could not open file: " << fname << std::endl; return 0; } } inline bool validate_file_size(const std::string& name) { std::ifstream in(std::string(name), std::ios::binary); in.seekg(0, in.end); size_t actual_file_size = in.tellg(); in.seekg(0, in.beg); size_t expected_file_size; in.read((char*) &expected_file_size, sizeof(uint64_t)); if (actual_file_size != expected_file_size) { diskann::cout << "Error loading" << name << ". Expected " "size (metadata): " << expected_file_size << ", actual file size : " << actual_file_size << ". Exitting." << std::endl; in.close(); return false; } in.close(); return true; } #ifdef _WINDOWS #include <intrin.h> #include <Psapi.h> inline void printProcessMemory(const char* message) { PROCESS_MEMORY_COUNTERS counters; HANDLE h = GetCurrentProcess(); GetProcessMemoryInfo(h, &counters, sizeof(counters)); diskann::cout << message << " [Peaking Working Set size: " << counters.PeakWorkingSetSize * 1.0 / (1024 * 1024 * 1024) << "GB Working set size: " << counters.WorkingSetSize * 1.0 / (1024 * 1024 * 1024) << "GB Private bytes " << counters.PagefileUsage * 1.0 / (1024 * 1024 * 1024) << "GB]" << std::endl; } #else // need to check and change this inline bool avx2Supported() { return true; } inline void printProcessMemory(const char* message) { diskann::cout << message << std::endl; } #endif extern bool AvxSupportedCPU; extern bool Avx2SupportedCPU;
pr68960.c
/* PR middle-end/68960 */ /* { dg-do run } */ int main () { int temp[257] __attribute__ ((aligned (256))) = { 0 }; #pragma omp parallel private (temp) num_threads (2) { int *p = &temp[0]; asm volatile ("" : "+g" (p)); if (((__UINTPTR_TYPE__) p) & 255) __builtin_abort (); } #pragma omp parallel num_threads (2) #pragma omp single #pragma omp task firstprivate (temp) { int *p = &temp[0]; asm volatile ("" : "+g" (p)); if (((__UINTPTR_TYPE__) p) & 255) __builtin_abort (); } return 0; }
GB_binop__iseq_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__iseq_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__iseq_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__iseq_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__iseq_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_uint64) // A*D function (colscale): GB (_AxD__iseq_uint64) // D*A function (rowscale): GB (_DxB__iseq_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__iseq_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__iseq_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_uint64) // C=scalar+B GB (_bind1st__iseq_uint64) // C=scalar+B' GB (_bind1st_tran__iseq_uint64) // C=A+scalar GB (_bind2nd__iseq_uint64) // C=A'+scalar GB (_bind2nd_tran__iseq_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_UINT64 || GxB_NO_ISEQ_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__iseq_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__iseq_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__iseq_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__iseq_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__iseq_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__iseq_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__iseq_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__iseq_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__iseq_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__iseq_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__iseq_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__iseq_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__iseq_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__iseq_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
matrix.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "base.h" #include "matrix.h" #include "util.h" #include "timer.h" #include "splatt_lapack.h" #include <math.h> /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ /** * @brief Form the Gram matrix from A^T * A. * * @param[out] neq_matrix The matrix to fill. * @param aTa The individual Gram matrices. * @param mode Which mode we are computing for. * @param nmodes How many total modes. * @param reg Regularization parameter (to add to the diagonal). */ static void p_form_gram( matrix_t * neq_matrix, matrix_t * * aTa, idx_t const mode, idx_t const nmodes, val_t const reg) { /* nfactors */ splatt_blas_int N = aTa[0]->J; /* form upper-triangual normal equations */ val_t * const restrict neqs = neq_matrix->vals; #pragma omp parallel { /* first initialize with 1s */ #pragma omp for schedule(static, 1) for(splatt_blas_int i=0; i < N; ++i) { neqs[i+(i*N)] = 1. + reg; for(splatt_blas_int j=0; j < N; ++j) { neqs[j+(i*N)] = 1.; } } /* now Hadamard product all (A^T * A) matrices */ for(idx_t m=0; m < nmodes; ++m) { if(m == mode) { continue; } val_t const * const restrict mat = aTa[m]->vals; #pragma omp for schedule(static, 1) for(splatt_blas_int i=0; i < N; ++i) { /* * `mat` is symmetric but stored upper right triangular, so be careful * to only access that. */ /* copy upper triangle */ for(splatt_blas_int j=i; j < N; ++j) { neqs[j+(i*N)] *= mat[j+(i*N)]; } } } /* foreach mode */ #pragma omp barrier /* now copy lower triangular */ #pragma omp for schedule(static, 1) for(splatt_blas_int i=0; i < N; ++i) { for(splatt_blas_int j=0; j < i; ++j) { neqs[j+(i*N)] = neqs[i+(j*N)]; } } } /* omp parallel */ } static void p_mat_2norm( matrix_t * const A, val_t * const restrict lambda, rank_info * const rinfo, thd_info * const thds) { idx_t const I = A->I; idx_t const J = A->J; val_t * const restrict vals = A->vals; #pragma omp parallel { int const tid = splatt_omp_get_thread_num(); val_t * const mylambda = (val_t *) thds[tid].scratch[0]; for(idx_t j=0; j < J; ++j) { mylambda[j] = 0; } #pragma omp for schedule(static) for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { mylambda[j] += vals[j + (i*J)] * vals[j + (i*J)]; } } /* do reduction on partial sums */ thd_reduce(thds, 0, J, REDUCE_SUM); #pragma omp master { #ifdef SPLATT_USE_MPI /* now do an MPI reduction to get the global lambda */ timer_start(&timers[TIMER_MPI_NORM]); timer_start(&timers[TIMER_MPI_COMM]); MPI_Allreduce(mylambda, lambda, J, SPLATT_MPI_VAL, MPI_SUM, rinfo->comm_3d); timer_stop(&timers[TIMER_MPI_COMM]); timer_stop(&timers[TIMER_MPI_NORM]); #else memcpy(lambda, mylambda, J * sizeof(val_t)); #endif } #pragma omp barrier #pragma omp for schedule(static) for(idx_t j=0; j < J; ++j) { lambda[j] = sqrt(lambda[j]); } /* do the normalization */ #pragma omp for schedule(static) for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { vals[j+(i*J)] /= lambda[j]; } } } /* end omp for */ } static void p_mat_maxnorm( matrix_t * const A, val_t * const restrict lambda, rank_info * const rinfo, thd_info * const thds) { idx_t const I = A->I; idx_t const J = A->J; val_t * const restrict vals = A->vals; #pragma omp parallel { int const tid = splatt_omp_get_thread_num(); val_t * const mylambda = (val_t *) thds[tid].scratch[0]; for(idx_t j=0; j < J; ++j) { mylambda[j] = 0; } #pragma omp for schedule(static) for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { mylambda[j] = SS_MAX(mylambda[j], vals[j+(i*J)]); } } /* do reduction on partial maxes */ thd_reduce(thds, 0, J, REDUCE_MAX); #pragma omp master { #ifdef SPLATT_USE_MPI /* now do an MPI reduction to get the global lambda */ timer_start(&timers[TIMER_MPI_NORM]); timer_start(&timers[TIMER_MPI_COMM]); MPI_Allreduce(mylambda, lambda, J, SPLATT_MPI_VAL, MPI_MAX, rinfo->comm_3d); timer_stop(&timers[TIMER_MPI_COMM]); timer_stop(&timers[TIMER_MPI_NORM]); #else memcpy(lambda, mylambda, J * sizeof(val_t)); #endif } #pragma omp barrier #pragma omp for schedule(static) for(idx_t j=0; j < J; ++j) { lambda[j] = SS_MAX(lambda[j], 1.); } /* do the normalization */ #pragma omp for schedule(static) for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { vals[j+(i*J)] /= lambda[j]; } } } /* end omp parallel */ } /** * @brief Solve the system LX = B. * * @param L The lower triangular matrix of coefficients. * @param B The right-hand side which is overwritten with X. */ static void p_mat_forwardsolve( matrix_t const * const L, matrix_t * const B) { /* check dimensions */ idx_t const N = L->I; val_t const * const restrict lv = L->vals; val_t * const restrict bv = B->vals; /* first row of X is easy */ for(idx_t j=0; j < N; ++j) { bv[j] /= lv[0]; } /* now do forward substitution */ for(idx_t i=1; i < N; ++i) { /* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} L(i,j)X(i,j) */ for(idx_t j=0; j < i; ++j) { for(idx_t f=0; f < N; ++f) { bv[f+(i*N)] -= lv[j+(i*N)] * bv[f+(j*N)]; } } for(idx_t f=0; f < N; ++f) { bv[f+(i*N)] /= lv[i+(i*N)]; } } } /** * @brief Solve the system UX = B. * * @param U The upper triangular matrix of coefficients. * @param B The right-hand side which is overwritten with X. */ static void p_mat_backwardsolve( matrix_t const * const U, matrix_t * const B) { /* check dimensions */ idx_t const N = U->I; val_t const * const restrict rv = U->vals; val_t * const restrict bv = B->vals; /* last row of X is easy */ for(idx_t f=0; f < N; ++f) { idx_t const i = N-1; bv[f+(i*N)] /= rv[i+(i*N)]; } /* now do backward substitution */ for(idx_t row=2; row <= N; ++row) { /* operate with (N - row) to make unsigned comparisons easy */ idx_t const i = N - row; /* X(i,f) = B(i,f) - \sum_{j=0}^{i-1} R(i,j)X(i,j) */ for(idx_t j=i+1; j < N; ++j) { for(idx_t f=0; f < N; ++f) { bv[f+(i*N)] -= rv[j+(i*N)] * bv[f+(j*N)]; } } for(idx_t f=0; f < N; ++f) { bv[f+(i*N)] /= rv[i+(i*N)]; } } } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ void mat_syminv( matrix_t * const A) { /* check dimensions */ assert(A->I == A->J); idx_t const N = A->I; matrix_t * L = mat_alloc(N, N); /* do a Cholesky factorization on A */ mat_cholesky(A, L); /* setup identity matrix */ memset(A->vals, 0, N*N*sizeof(val_t)); for(idx_t n=0; n < N; ++n) { A->vals[n+(n*N)] = 1.; } /* Solve L*Y = I */ p_mat_forwardsolve(L, A); /* transpose L */ for(idx_t i=0; i < N; ++i) { for(idx_t j=i+1; j < N; ++j) { L->vals[j+(i*N)] = L->vals[i+(j*N)]; L->vals[i+(j*N)] = 0.; } } /* Solve U*A = Y */ p_mat_backwardsolve(L, A); mat_free(L); } void mat_cholesky( matrix_t const * const A, matrix_t * const L) { /* check dimensions */ assert(A->I == A->J); assert(A->I == L->J); assert(L->I == L->J); idx_t const N = A->I; val_t const * const restrict av = A->vals; val_t * const restrict lv = L->vals; memset(lv, 0, N*N*sizeof(val_t)); for (idx_t i = 0; i < N; ++i) { for (idx_t j = 0; j <= i; ++j) { val_t inner = 0; for (idx_t k = 0; k < j; ++k) { inner += lv[k+(i*N)] * lv[k+(j*N)]; } if(i == j) { lv[j+(i*N)] = sqrt(av[i+(i*N)] - inner); } else { lv[j+(i*N)] = 1.0 / lv[j+(j*N)] * (av[j+(i*N)] - inner); } } } } void mat_aTa_hada( matrix_t ** mats, idx_t const start, idx_t const nmults, idx_t const nmats, matrix_t * const buf, matrix_t * const ret) { idx_t const F = mats[0]->J; /* check matrix dimensions */ assert(ret->I == ret->J); assert(ret->I == F); assert(buf->I == F); assert(buf->J == F); assert(ret->vals != NULL); assert(mats[0]->rowmajor); assert(ret->rowmajor); val_t * const restrict rv = ret->vals; val_t * const restrict bufv = buf->vals; for(idx_t i=0; i < F; ++i) { for(idx_t j=i; j < F; ++j) { rv[j+(i*F)] = 1.; } } for(idx_t mode=0; mode < nmults; ++mode) { idx_t const m = (start+mode) % nmats; idx_t const I = mats[m]->I; val_t const * const Av = mats[m]->vals; memset(bufv, 0, F * F * sizeof(val_t)); /* compute upper triangular matrix */ for(idx_t i=0; i < I; ++i) { for(idx_t mi=0; mi < F; ++mi) { for(idx_t mj=mi; mj < F; ++mj) { bufv[mj + (mi*F)] += Av[mi + (i*F)] * Av[mj + (i*F)]; } } } /* hadamard product */ for(idx_t mi=0; mi < F; ++mi) { for(idx_t mj=mi; mj < F; ++mj) { rv[mj + (mi*F)] *= bufv[mj + (mi*F)]; } } } /* copy to lower triangular matrix */ for(idx_t i=1; i < F; ++i) { for(idx_t j=0; j < i; ++j) { rv[j + (i*F)] = rv[i + (j*F)]; } } } void mat_aTa( matrix_t const * const A, matrix_t * const ret, rank_info * const rinfo, thd_info * const thds, idx_t const nthreads) { // timer_start(&timers[TIMER_ATA]); // /* check matrix dimensions */ // assert(ret->I == ret->J); // assert(ret->I == A->J); // assert(ret->vals != NULL); // assert(A->rowmajor); // assert(ret->rowmajor); // // idx_t const I = A->I; // idx_t const F = A->J; // val_t const * const restrict Av = A->vals; // // char uplo = 'L'; // char trans = 'N'; /* actually do A * A' due to row-major ordering */ // splatt_blas_int N = (splatt_blas_int) F; // splatt_blas_int K = (splatt_blas_int) I; // splatt_blas_int lda = N; // splatt_blas_int ldc = N; // val_t alpha = 1.; // val_t beta = 0.; // // SPLATT_BLAS(syrk)(&uplo, &trans, &N, &K, &alpha, A->vals, &lda, &beta, ret->vals, // &ldc); // //#ifdef SPLATT_USE_MPI // timer_start(&timers[TIMER_MPI_ATA]); // timer_start(&timers[TIMER_MPI_COMM]); // MPI_Allreduce(MPI_IN_PLACE, ret->vals, F * F, SPLATT_MPI_VAL, MPI_SUM, // rinfo->comm_3d); // timer_stop(&timers[TIMER_MPI_COMM]); // timer_stop(&timers[TIMER_MPI_ATA]); //#endif // // timer_stop(&timers[TIMER_ATA]); } void mat_matmul( matrix_t const * const A, matrix_t const * const B, matrix_t * const C) { timer_start(&timers[TIMER_MATMUL]); /* check dimensions */ assert(A->J == B->I); assert(C->I * C->J <= A->I * B->J); /* set dimensions */ C->I = A->I; C->J = B->J; val_t const * const restrict av = A->vals; val_t const * const restrict bv = B->vals; val_t * const restrict cv = C->vals; idx_t const M = A->I; idx_t const N = B->J; idx_t const Na = A->J; /* tiled matrix multiplication */ idx_t const TILE = 16; #pragma omp parallel for schedule(static) for(idx_t i=0; i < M; ++i) { for(idx_t jt=0; jt < N; jt += TILE) { for(idx_t kt=0; kt < Na; kt += TILE) { idx_t const JSTOP = SS_MIN(jt+TILE, N); for(idx_t j=jt; j < JSTOP; ++j) { val_t accum = 0; idx_t const KSTOP = SS_MIN(kt+TILE, Na); for(idx_t k=kt; k < KSTOP; ++k) { accum += av[k + (i*Na)] * bv[j + (k*N)]; } cv[j + (i*N)] += accum; } } } } timer_stop(&timers[TIMER_MATMUL]); } void mat_normalize( matrix_t * const A, val_t * const restrict lambda, splatt_mat_norm const which, rank_info * const rinfo, thd_info * const thds, idx_t const nthreads) { timer_start(&timers[TIMER_MATNORM]); splatt_omp_set_num_threads(nthreads); switch(which) { case MAT_NORM_2: p_mat_2norm(A, lambda, rinfo, thds); break; case MAT_NORM_MAX: p_mat_maxnorm(A, lambda, rinfo, thds); break; default: fprintf(stderr, "SPLATT: mat_normalize supports 2 and MAX only.\n"); abort(); } timer_stop(&timers[TIMER_MATNORM]); } void mat_solve_normals( idx_t const mode, idx_t const nmodes, matrix_t * * aTa, matrix_t * rhs, val_t const reg) { // timer_start(&timers[TIMER_INV]); // // /* nfactors */ // splatt_blas_int N = aTa[0]->J; // // p_form_gram(aTa[MAX_NMODES], aTa, mode, nmodes, reg); // // splatt_blas_int info; // char uplo = 'L'; // splatt_blas_int lda = N; // splatt_blas_int ldb = N; // splatt_blas_int order = N; // splatt_blas_int nrhs = (splatt_blas_int) rhs->I; // // val_t * const neqs = aTa[MAX_NMODES]->vals; // // /* Cholesky factorization */ // bool is_spd = true; // SPLATT_BLAS(potrf)(&uplo, &order, neqs, &lda, &info); // if(info) { // fprintf(stderr, "SPLATT: Gram matrix is not SPD. Trying `GELSS`.\n"); // is_spd = false; // } // // /* Continue with Cholesky */ // if(is_spd) { // /* Solve against rhs */ // SPLATT_BLAS(potrs)(&uplo, &order, &nrhs, neqs, &lda, rhs->vals, &ldb, &info); // if(info) { // fprintf(stderr, "SPLATT: DPOTRS returned %d\n", info); // } // } else { // /* restore gram matrix */ // p_form_gram(aTa[MAX_NMODES], aTa, mode, nmodes, reg); // // splatt_blas_int effective_rank; // val_t * conditions = malloc(N * sizeof(*conditions)); // // /* query worksize */ // splatt_blas_int lwork = -1; // // val_t rcond = -1.0f; // // val_t work_query; // SPLATT_BLAS(gelss)(&N, &N, &nrhs, // neqs, &lda, // rhs->vals, &ldb, // conditions, &rcond, &effective_rank, // &work_query, &lwork, &info); // lwork = (splatt_blas_int) work_query; // // /* setup workspace */ // val_t * work = malloc(lwork * sizeof(*work)); // // /* Use an SVD solver */ // SPLATT_BLAS(gelss)(&N, &N, &nrhs, // neqs, &lda, // rhs->vals, &ldb, // conditions, &rcond, &effective_rank, // work, &lwork, &info); // if(info) { // printf("SPLATT: DGELSS returned %d\n", info); // } // printf("SPLATT: DGELSS effective rank: %d\n", effective_rank); // // free(conditions); // free(work); // } // // timer_stop(&timers[TIMER_INV]); } void calc_gram_inv( idx_t const mode, idx_t const nmodes, matrix_t ** aTa) { timer_start(&timers[TIMER_INV]); idx_t const rank = aTa[0]->J; val_t * const restrict av = aTa[MAX_NMODES]->vals; /* ata[MAX_NMODES] = hada(aTa[0], aTa[1], ...) */ for(idx_t x=0; x < rank*rank; ++x) { av[x] = 1.; } for(idx_t m=1; m < nmodes; ++m) { idx_t const madjust = (mode + m) % nmodes; val_t const * const vals = aTa[madjust]->vals; for(idx_t x=0; x < rank*rank; ++x) { av[x] *= vals[x]; } } /* M2 = M2^-1 */ mat_syminv(aTa[MAX_NMODES]); timer_stop(&timers[TIMER_INV]); } matrix_t * mat_alloc( idx_t const nrows, idx_t const ncols) { matrix_t * mat = (matrix_t *) malloc(sizeof(matrix_t)); mat->I = nrows; mat->J = ncols; mat->vals = (val_t *) malloc(nrows * ncols * sizeof(val_t)); mat->rowmajor = 1; return mat; } matrix_t * mat_rand( idx_t const nrows, idx_t const ncols) { matrix_t * mat = mat_alloc(nrows, ncols); val_t * const vals = mat->vals; fill_rand(vals, nrows * ncols); return mat; } void mat_free( matrix_t * mat) { free(mat->vals); free(mat); } matrix_t * mat_mkrow( matrix_t const * const mat) { assert(mat->rowmajor == 0); idx_t const I = mat->I; idx_t const J = mat->J; matrix_t * row = mat_alloc(I, J); val_t * const restrict rowv = row->vals; val_t const * const restrict colv = mat->vals; for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { rowv[j + (i*J)] = colv[i + (j*I)]; } } return row; } matrix_t * mat_mkcol( matrix_t const * const mat) { assert(mat->rowmajor == 1); idx_t const I = mat->I; idx_t const J = mat->J; matrix_t * col = mat_alloc(I, J); val_t * const restrict colv = col->vals; val_t const * const restrict rowv = mat->vals; for(idx_t i=0; i < I; ++i) { for(idx_t j=0; j < J; ++j) { colv[i + (j*I)] = rowv[j + (i*J)]; } } col->rowmajor = 0; return col; } spmatrix_t * spmat_alloc( idx_t const nrows, idx_t const ncols, idx_t const nnz) { spmatrix_t * mat = (spmatrix_t*) malloc(sizeof(spmatrix_t)); mat->I = nrows; mat->J = ncols; mat->nnz = nnz; mat->rowptr = (idx_t*) malloc((nrows+1) * sizeof(idx_t)); mat->colind = (idx_t*) malloc(nnz * sizeof(idx_t)); mat->vals = (val_t*) malloc(nnz * sizeof(val_t)); return mat; } void spmat_free( spmatrix_t * mat) { free(mat->rowptr); free(mat->colind); free(mat->vals); free(mat); }
wave_energy.c
/*********************************************************************************/ /* */ /* Animation of wave equation in a planar domain */ /* */ /* N. Berglund, december 2012, may 2021 */ /* */ /* UPDATE 24/04: distinction between damping and "elasticity" parameters */ /* UPDATE 27/04: new billiard shapes, bug in color scheme fixed */ /* UPDATE 28/04: code made more efficient, with help of Marco Mancini */ /* */ /* Feel free to reuse, but if doing so it would be nice to drop a */ /* line to nils.berglund@univ-orleans.fr - Thanks! */ /* */ /* compile with */ /* gcc -o wave_billiard wave_billiard.c */ /* -L/usr/X11R6/lib -ltiff -lm -lGL -lGLU -lX11 -lXmu -lglut -O3 -fopenmp */ /* */ /* OMP acceleration may be more effective after executing */ /* export OMP_NUM_THREADS=2 in the shell before running the program */ /* */ /* To make a video, set MOVIE to 1 and create subfolder tif_wave */ /* It may be possible to increase parameter PAUSE */ /* */ /* create movie using */ /* ffmpeg -i wave.%05d.tif -vcodec libx264 wave.mp4 */ /* */ /*********************************************************************************/ /*********************************************************************************/ /* */ /* NB: The algorithm used to simulate the wave equation is highly paralellizable */ /* One could make it much faster by using a GPU */ /* */ /*********************************************************************************/ #include <math.h> #include <string.h> #include <GL/glut.h> #include <GL/glu.h> #include <unistd.h> #include <sys/types.h> #include <tiffio.h> /* Sam Leffler's libtiff library. */ #include <omp.h> #define MOVIE 0 /* set to 1 to generate movie */ #define WINWIDTH 1280 /* window width */ #define WINHEIGHT 720 /* window height */ #define NX 1280 /* number of grid points on x axis */ #define NY 720 /* number of grid points on y axis */ // #define XMIN -1.777777778 // #define XMAX 1.777777778 /* x interval */ // #define YMIN -1.0 // #define YMAX 1.0 /* y interval for 9/16 aspect ratio */ #define XMIN -2.0 #define XMAX 2.0 /* x interval */ #define YMIN -1.125 #define YMAX 1.125 /* y interval for 9/16 aspect ratio */ #define JULIA_SCALE 1.0 /* scaling for Julia sets */ /* Choice of the billiard table */ #define B_DOMAIN 20 /* choice of domain shape, see list in global_pdes.c */ #define B_DOMAIN_B 20 /* choice of domain shape, see list in global_pdes.c */ #define CIRCLE_PATTERN 2 /* pattern of circles, see list in global_pdes.c */ #define CIRCLE_PATTERN_B 11 /* pattern of circles, see list in global_pdes.c */ #define P_PERCOL 0.25 /* probability of having a circle in C_RAND_PERCOL arrangement */ #define NPOISSON 300 /* number of points for Poisson C_RAND_POISSON arrangement */ #define RANDOM_POLY_ANGLE 1 /* set to 1 to randomize angle of polygons */ #define LAMBDA 0.75 /* parameter controlling the dimensions of domain */ #define MU 0.03 /* parameter controlling the dimensions of domain */ #define MUB 0.03 /* parameter controlling the dimensions of domain */ #define NPOLY 3 /* number of sides of polygon */ #define APOLY 1.0 /* angle by which to turn polygon, in units of Pi/2 */ #define MDEPTH 4 /* depth of computation of Menger gasket */ #define MRATIO 3 /* ratio defining Menger gasket */ #define MANDELLEVEL 1000 /* iteration level for Mandelbrot set */ #define MANDELLIMIT 10.0 /* limit value for approximation of Mandelbrot set */ #define FOCI 1 /* set to 1 to draw focal points of ellipse */ #define NGRIDX 15 /* number of grid point for grid of disks */ #define NGRIDY 20 /* number of grid point for grid of disks */ #define X_SHOOTER -0.2 #define Y_SHOOTER -0.6 #define X_TARGET 0.4 #define Y_TARGET 0.7 /* shooter and target positions in laser fight */ #define ISO_XSHIFT_LEFT -1.65 #define ISO_XSHIFT_RIGHT 0.4 #define ISO_YSHIFT_LEFT -0.05 #define ISO_YSHIFT_RIGHT -0.05 #define ISO_SCALE 0.85 /* coordinates for isospectral billiards */ /* You can add more billiard tables by adapting the functions */ /* xy_in_billiard and draw_billiard below */ /* Physical parameters of wave equation */ #define TWOSPEEDS 0 /* set to 1 to replace hardcore boundary by medium with different speed */ #define OSCILLATE_LEFT 0 /* set to 1 to add oscilating boundary condition on the left */ #define OSCILLATE_TOPBOT 0 /* set to 1 to enforce a planar wave on top and bottom boundary */ #define OMEGA 0.0 /* frequency of periodic excitation */ #define AMPLITUDE 0.025 /* amplitude of periodic excitation */ #define COURANT 0.02 /* Courant number */ #define COURANTB 0.004 /* Courant number in medium B */ #define GAMMA 0.0 /* damping factor in wave equation */ #define GAMMAB 1.0e-8 /* damping factor in wave equation */ #define GAMMA_SIDES 1.0e-4 /* damping factor on boundary */ #define GAMMA_TOPBOT 1.0e-6 /* damping factor on boundary */ #define KAPPA 0.0 /* "elasticity" term enforcing oscillations */ #define KAPPA_SIDES 5.0e-4 /* "elasticity" term on absorbing boundary */ #define KAPPA_TOPBOT 0.0 /* "elasticity" term on absorbing boundary */ /* The Courant number is given by c*DT/DX, where DT is the time step and DX the lattice spacing */ /* The physical damping coefficient is given by GAMMA/(DT)^2 */ /* Increasing COURANT speeds up the simulation, but decreases accuracy */ /* For similar wave forms, COURANT^2*GAMMA should be kept constant */ /* Boundary conditions, see list in global_pdes.c */ // #define B_COND 2 #define B_COND 3 /* Parameters for length and speed of simulation */ #define NSTEPS 3750 /* number of frames of movie */ #define NVID 25 /* number of iterations between images displayed on screen */ #define NSEG 100 /* number of segments of boundary */ #define INITIAL_TIME 200 /* time after which to start saving frames */ #define COMPUTE_ENERGIES 1 /* set to 1 to compute and print energies */ #define BOUNDARY_WIDTH 2 /* width of billiard boundary */ #define PAUSE 1000 /* number of frames after which to pause */ #define PSLEEP 1 /* sleep time during pause */ #define SLEEP1 1 /* initial sleeping time */ #define SLEEP2 1 /* final sleeping time */ #define END_FRAMES 100 /* number of still frames at end of movie */ /* Parameters of initial condition */ #define INITIAL_AMP 0.2 /* amplitude of initial condition */ #define INITIAL_VARIANCE 0.002 /* variance of initial condition */ #define INITIAL_WAVELENGTH 0.1 /* wavelength of initial condition */ /* Plot type, see list in global_pdes.c */ #define PLOT 1 /* Color schemes */ #define COLOR_PALETTE 14 /* Color palette, see list in global_pdes.c */ #define BLACK 1 /* background */ #define COLOR_SCHEME 3 /* choice of color scheme, see list in global_pdes.c */ #define SCALE 0 /* set to 1 to adjust color scheme to variance of field */ #define SLOPE 10.0 /* sensitivity of color on wave amplitude */ #define ATTENUATION 0.0 /* exponential attenuation coefficient of contrast with time */ #define E_SCALE 500.0 /* scaling factor for energy representation */ #define COLORHUE 260 /* initial hue of water color for scheme C_LUM */ #define COLORDRIFT 0.0 /* how much the color hue drifts during the whole simulation */ #define LUMMEAN 0.5 /* amplitude of luminosity variation for scheme C_LUM */ #define LUMAMP 0.3 /* amplitude of luminosity variation for scheme C_LUM */ #define HUEMEAN 220.0 /* mean value of hue for color scheme C_HUE */ #define HUEAMP -220.0 /* amplitude of variation of hue for color scheme C_HUE */ #define DRAW_COLOR_SCHEME 0 /* set to 1 to plot the color scheme */ #define COLORBAR_RANGE 4.0 /* scale of color scheme bar */ #define COLORBAR_RANGE_B 12.0 /* scale of color scheme bar for 2nd part */ #define ROTATE_COLOR_SCHEME 0 /* set to 1 to draw color scheme horizontally */ /* For debugging purposes only */ #define FLOOR 0 /* set to 1 to limit wave amplitude to VMAX */ #define VMAX 5.0 /* max value of wave amplitude */ #include "global_pdes.c" /* constants and global variables */ #include "sub_wave.c" /* common functions for wave_billiard, heat and schrodinger */ #include "wave_common.c" /* common functions for wave_billiard, wave_comparison, etc */ #include "sub_wave_comp.c" /* some functions specific to wave_comparison */ double courant2, courantb2; /* Courant parameters squared */ double compute_energy_x(int i, double *phi[NX], double *psi[NX], short int *xy_in[NX]) /* compute energy in column i */ { double energy = 0.0; int j; for (j=0; j<NY/2; j++) energy += compute_energy(phi, psi, xy_in, i, j); return(energy); } double logscale_y(double energy) { static double ymid, yscale; static int first = 1; if (first) { ymid = 0.5*(YMIN + YMAX); yscale = (YMAX - YMIN)*0.5/2.25; } return(ymid + yscale*(1.0 + 0.2*log(energy))); // return(ymid + 0.5*(1.0 + 0.2*log(energy))); } void draw_wave_energy(double *phi[NX], double *psi[NX], short int *xy_in[NX], double scale, int time) /* draw the field */ { int i, j, iplus, iminus, jplus, jminus; double rgb[3], xy[2], x, y, x1, y1, x2, y2, velocity, energy, gradientx2, gradienty2, pos[2], escale; double energies[NX], ymid; static double dtinverse = ((double)NX)/(COURANT*(XMAX-XMIN)), dx = (XMAX-XMIN)/((double)NX); char message[50]; ymid = 0.5*(YMIN + YMAX); glBegin(GL_QUADS); // printf("dtinverse = %.5lg\n", dtinverse); for (i=0; i<NX; i++) for (j=0; j<NY/2; j++) { if (((TWOSPEEDS)&&(xy_in[i][j] != 2))||(xy_in[i][j] == 1)) { switch (PLOT) { case (P_AMPLITUDE): { /* make wave luminosity larger inside obstacles */ if (!(xy_in[i][j])) color_scheme_lum(COLOR_SCHEME, phi[i][j], scale, time, 0.7, rgb); else color_scheme(COLOR_SCHEME, phi[i][j], scale, time, rgb); break; } case (P_ENERGY): { energy = compute_energy(phi, psi, xy_in, i, j); /* adjust energy to color palette */ if (COLOR_PALETTE >= COL_TURBO) color_scheme_asym(COLOR_SCHEME, energy, scale, time, rgb); else color_scheme(COLOR_SCHEME, energy, scale, time, rgb); break; } case (P_MIXED): { if (j > NY/2) color_scheme(COLOR_SCHEME, phi[i][j], scale, time, rgb); else color_scheme(COLOR_SCHEME, compute_energy(phi, psi, xy_in, i, j), scale, time, rgb); break; } } glColor3f(rgb[0], rgb[1], rgb[2]); glVertex2i(i, j); glVertex2i(i+1, j); glVertex2i(i+1, j+1); glVertex2i(i, j+1); } } glEnd (); /* compute and plot energies */ for (i=0; i<NX; i++) energies[i] = compute_energy_x(i, phi, psi, xy_in); glColor3f(0.0, 0.0, 0.0); glBegin(GL_QUADS); glVertex2i(0, NY/2); glVertex2i(NX, NY/2); glVertex2i(NX, NY); glVertex2i(0, NY); glEnd(); /* log coordinate lines */ glLineWidth(1); glColor3f(1.0, 1.0, 1.0); for (i=-2; i<3; i++) { energy = pow(10.0, (double)i); y = logscale_y(energy); glBegin(GL_LINE_STRIP); x = XMIN; xy_to_pos(x, y, pos); glVertex2d(pos[0], pos[1]); x = XMAX; xy_to_pos(x, y, pos); glVertex2d(pos[0], pos[1]); glEnd(); } glColor3f(0.5, 0.5, 0.5); for (i=-2; i<3; i++) { for (j=2; j<10; j++) { energy = (double)j*pow(10.0, (double)i); y = logscale_y(energy); glBegin(GL_LINE_STRIP); x = XMIN; xy_to_pos(x, y, pos); glVertex2d(pos[0], pos[1]); x = XMAX; xy_to_pos(x, y, pos); glVertex2d(pos[0], pos[1]); glEnd(); } } erase_area_hsl(XMAX - 0.4, YMAX - 0.1, 0.35, 0.07, 0.0, 1.0, 0.0); erase_area_hsl(XMAX - 0.4, YMAX - 0.2, 0.35, 0.07, 0.0, 1.0, 0.0); sprintf(message, "Energy (log scale)"); glColor3f(0.0, 0.5, 1.0); xy_to_pos(XMAX - 0.7, YMAX - 0.13, pos); write_text(pos[0], pos[1], message); sprintf(message, "Energy (linear scale)"); glColor3f(1.0, 0.0, 0.0); xy_to_pos(XMAX - 0.7, YMAX - 0.23, pos); write_text(pos[0], pos[1], message); /* log of energy */ glLineWidth(3); glColor3f(0.0, 0.5, 1.0); glBegin(GL_LINE_STRIP); for (i=0; i<NX; i++) { x = XMIN + ((double)i)*(XMAX-XMIN)/((double)NX); y = logscale_y(energies[i]); if (y < ymid) y = ymid; xy_to_pos(x, y, pos); glVertex2d(pos[0], pos[1]); } glEnd(); /* y axis labels */ for (i=-2; i<3; i++) { y = logscale_y(pow(10.0, (double)i)); erase_area_hsl(XMIN + 0.06, y + 0.025, 0.12, 0.02, 0.0, 1.0, 0.0); sprintf(message, "%d dB", (i-2)*10); xy_to_pos(XMIN + 0.02, y + 0.01, pos); glColor3f(0.7, 0.7, 0.7); write_text_fixedwidth(pos[0], pos[1], message); } /* energy */ glColor3f(1.0, 0.0, 0.0); escale = 0.01; glBegin(GL_LINE_STRIP); for (i=0; i<NX; i++) { x = XMIN + ((double)i)*(XMAX-XMIN)/((double)NX); y = ymid + escale*energies[i]; xy_to_pos(x, y, pos); glVertex2d(pos[0], pos[1]); } glEnd(); /* draw horizontal mid line */ glColor3f(1.0, 1.0, 1.0); glBegin(GL_LINE_STRIP); xy_to_pos(XMIN, 0.5*(YMIN+YMAX), pos); glVertex2d(pos[0], pos[1]); xy_to_pos(XMAX, 0.5*(YMIN+YMAX), pos); glVertex2d(pos[0], pos[1]); glEnd(); } /*********************/ /* animation part */ /*********************/ void evolve_wave_half_old(double *phi_in[NX], double *psi_in[NX], double *phi_out[NX], double *psi_out[NX], short int *xy_in[NX]) /* time step of field evolution */ /* phi is value of field at time t, psi at time t-1 */ { int i, j, iplus, iminus, jplus, jminus, jmid = NY/2; double delta, x, y, c, cc, gamma; static long time = 0; time++; #pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta,x,y,c,cc,gamma) for (i=0; i<NX; i++){ for (j=0; j<NY/2; j++){ if (xy_in[i][j]) { c = COURANT; cc = courant2; gamma = GAMMA; } else if (TWOSPEEDS) { c = COURANTB; cc = courantb2; gamma = GAMMAB; } if (((TWOSPEEDS)&&(xy_in[i][j] != 2))||(xy_in[i][j] == 1)){ /* discretized Laplacian for various boundary conditions */ if ((B_COND == BC_DIRICHLET)||(B_COND == BC_ABSORBING)||(B_COND == BC_ABS_REFLECT)) { iplus = (i+1); if (iplus == NX) iplus = NX-1; iminus = (i-1); if (iminus == -1) iminus = 0; jplus = (j+1); if (jplus == jmid) jplus = jmid-1; jminus = (j-1); if (jminus == -1) jminus = 0; } else if (B_COND == BC_PERIODIC) { iplus = (i+1) % NX; iminus = (i-1) % NX; if (iminus < 0) iminus += NX; jplus = (j+1) % jmid; jminus = (j-1) % jmid; if (jminus < 0) jminus += jmid; } else if (B_COND == BC_VPER_HABS) { iplus = (i+1); if (iplus == NX) iplus = NX-1; iminus = (i-1); if (iminus == -1) iminus = 0; jplus = (j+1); if (jplus >= jmid) jplus -= jmid; jminus = (j-1); if (jminus < 0) jminus += jmid; } /* imposing linear wave on top and bottom by making Laplacian 1d */ if (OSCILLATE_TOPBOT) { if (j == NY-1) jminus = NY-1; else if (j == 0) jplus = 0; } delta = phi_in[iplus][j] + phi_in[iminus][j] + phi_in[i][jplus] + phi_in[i][jminus] - 4.0*phi_in[i][j]; x = phi_in[i][j]; y = psi_in[i][j]; /* evolve phi */ if ((B_COND == BC_PERIODIC)||(B_COND == BC_DIRICHLET)) phi_out[i][j] = -y + 2*x + cc*delta - KAPPA*x - gamma*(x-y); else if ((B_COND == BC_ABSORBING)||(B_COND == BC_ABS_REFLECT)) { if ((i>0)&&(i<NX-1)&&(j>0)&&(j<NY-1)) phi_out[i][j] = -y + 2*x + cc*delta - KAPPA*x - gamma*(x-y); /* upper border */ else if (j==NY-1) phi_out[i][j] = x - c*(x - phi_in[i][NY-2]) - KAPPA_TOPBOT*x - GAMMA_TOPBOT*(x-y); /* lower border */ else if (j==0) phi_out[i][j] = x - c*(x - phi_in[i][1]) - KAPPA_TOPBOT*x - GAMMA_TOPBOT*(x-y); /* right border */ if (i==NX-1) phi_out[i][j] = x - c*(x - phi_in[NX-2][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); /* left border */ else if (i==0) phi_out[i][j] = x - c*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); } else if (B_COND == BC_VPER_HABS) { if ((i>0)&&(i<NX-1)) phi_out[i][j] = -y + 2*x + cc*delta - KAPPA*x - gamma*(x-y); /* right border */ else if (i==NX-1) phi_out[i][j] = x - c*(x - phi_in[NX-2][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); /* left border */ else if (i==0) phi_out[i][j] = x - c*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); } /* add oscillating boundary condition on the left */ if ((i == 0)&&(OSCILLATE_LEFT)) phi_out[i][j] = AMPLITUDE*cos((double)time*OMEGA); psi_out[i][j] = x; if (FLOOR) { if (phi_out[i][j] > VMAX) phi_out[i][j] = VMAX; if (phi_out[i][j] < -VMAX) phi_out[i][j] = -VMAX; if (psi_out[i][j] > VMAX) psi_out[i][j] = VMAX; if (psi_out[i][j] < -VMAX) psi_out[i][j] = -VMAX; } } } } // printf("phi(0,0) = %.3lg, psi(0,0) = %.3lg\n", phi[NX/2][NY/2], psi[NX/2][NY/2]); } void evolve_wave_half(double *phi_in[NX], double *psi_in[NX], double *phi_out[NX], double *psi_out[NX], short int *xy_in[NX]) /* time step of field evolution */ /* phi is value of field at time t, psi at time t-1 */ { int i, j, iplus, iminus, jplus, jminus, jmid = NY/2; double delta, x, y, c, cc, gamma; static long time = 0; static double tc[NX][NY/2], tcc[NX][NY/2], tgamma[NX][NY/2]; static short int first = 1; time++; /* initialize tables with wave speeds and dissipation */ if (first) { for (i=0; i<NX; i++){ for (j=0; j<jmid; j++){ if (xy_in[i][j]) { tc[i][j] = COURANT; tcc[i][j] = courant2; tgamma[i][j] = GAMMA; } else if (TWOSPEEDS) { tc[i][j] = COURANTB; tcc[i][j] = courantb2; tgamma[i][j] = GAMMAB; } } } first = 0; } #pragma omp parallel for private(i,j,iplus,iminus,jplus,jminus,delta,x,y,c,cc,gamma) /* evolution in the bulk */ for (i=1; i<NX-1; i++){ for (j=1; j<jmid-1; j++){ if ((TWOSPEEDS)||(xy_in[i][j] != 0)){ x = phi_in[i][j]; y = psi_in[i][j]; /* discretized Laplacian */ delta = phi_in[i+1][j] + phi_in[i-1][j] + phi_in[i][j+1] + phi_in[i][j-1] - 4.0*x; /* evolve phi */ phi_out[i][j] = -y + 2*x + tcc[i][j]*delta - KAPPA*x - tgamma[i][j]*(x-y); psi_out[i][j] = x; } } } /* left boundary */ if (OSCILLATE_LEFT) for (j=1; j<jmid; j++) phi_out[0][j] = AMPLITUDE*cos((double)time*OMEGA); else for (j=1; j<jmid-1; j++){ if ((TWOSPEEDS)||(xy_in[0][j] != 0)){ x = phi_in[0][j]; y = psi_in[0][j]; switch (B_COND) { case (BC_DIRICHLET): { delta = phi_in[1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 3.0*x; phi_out[0][j] = -y + 2*x + tcc[0][j]*delta - KAPPA*x - tgamma[0][j]*(x-y); break; } case (BC_PERIODIC): { delta = phi_in[1][j] + phi_in[NX-1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 4.0*x; phi_out[0][j] = -y + 2*x + tcc[0][j]*delta - KAPPA*x - tgamma[0][j]*(x-y); break; } case (BC_ABSORBING): { delta = phi_in[1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 3.0*x; phi_out[0][j] = x - tc[0][j]*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); break; } case (BC_VPER_HABS): { delta = phi_in[1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 3.0*x; phi_out[0][j] = x - tc[0][j]*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); break; } } psi_out[0][j] = x; } } /* right boundary */ for (j=1; j<jmid-1; j++){ if ((TWOSPEEDS)||(xy_in[NX-1][j] != 0)){ x = phi_in[NX-1][j]; y = psi_in[NX-1][j]; switch (B_COND) { case (BC_DIRICHLET): { delta = phi_in[NX-2][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 3.0*x; phi_out[NX-1][j] = -y + 2*x + tcc[NX-1][j]*delta - KAPPA*x - tgamma[NX-1][j]*(x-y); break; } case (BC_PERIODIC): { delta = phi_in[NX-2][j] + phi_in[0][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 4.0*x; phi_out[NX-1][j] = -y + 2*x + tcc[NX-1][j]*delta - KAPPA*x - tgamma[NX-1][j]*(x-y); break; } case (BC_ABSORBING): { delta = phi_in[NX-2][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 3.0*x; phi_out[NX-1][j] = x - tc[NX-1][j]*(x - phi_in[NX-2][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); break; } case (BC_VPER_HABS): { delta = phi_in[NX-2][j] + phi_in[NX-1][j+1] + phi_in[NX-1][j-1] - 3.0*x; phi_out[NX-1][j] = x - tc[NX-1][j]*(x - phi_in[NX-2][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); break; } } psi_out[NX-1][j] = x; } } /* top boundary */ for (i=0; i<NX; i++){ if ((TWOSPEEDS)||(xy_in[i][jmid-1] != 0)){ x = phi_in[i][jmid-1]; y = psi_in[i][jmid-1]; switch (B_COND) { case (BC_DIRICHLET): { iplus = i+1; if (iplus == NX) iplus = NX-1; iminus = i-1; if (iminus == -1) iminus = 0; delta = phi_in[iplus][jmid-1] + phi_in[iminus][jmid-1] + phi_in[i][jmid-2] - 3.0*x; phi_out[i][jmid-1] = -y + 2*x + tcc[i][jmid-1]*delta - KAPPA*x - tgamma[i][jmid-1]*(x-y); break; } case (BC_PERIODIC): { iplus = (i+1) % NX; iminus = (i-1) % NX; if (iminus < 0) iminus += NX; delta = phi_in[iplus][jmid-1] + phi_in[iminus][jmid-1] + phi_in[i][jmid-2] + phi_in[i][0] - 4.0*x; phi_out[i][jmid-1] = -y + 2*x + tcc[i][jmid-1]*delta - KAPPA*x - tgamma[i][jmid-1]*(x-y); break; } case (BC_ABSORBING): { iplus = (i+1); if (iplus == NX) iplus = NX-1; iminus = (i-1); if (iminus == -1) iminus = 0; delta = phi_in[iplus][jmid-1] + phi_in[iminus][jmid-1] + phi_in[i][jmid-2] - 3.0*x; phi_out[i][jmid-1] = x - tc[i][jmid-1]*(x - phi_in[i][jmid-2]) - KAPPA_TOPBOT*x - GAMMA_TOPBOT*(x-y); break; } case (BC_VPER_HABS): { iplus = (i+1); if (iplus == NX) iplus = NX-1; iminus = (i-1); if (iminus == -1) iminus = 0; delta = phi_in[iplus][jmid-1] + phi_in[iminus][jmid-1] + phi_in[i][jmid-2] + phi_in[i][0] - 4.0*x; if (i==0) phi_out[0][jmid-1] = x - tc[0][jmid-1]*(x - phi_in[1][jmid-1]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); else phi_out[i][jmid-1] = -y + 2*x + tcc[i][jmid-1]*delta - KAPPA*x - tgamma[i][jmid-1]*(x-y); // delta = phi_in[1][j] + phi_in[0][j+1] + phi_in[0][j-1] - 3.0*x; // phi_out[0][j] = x - tc[0][j]*(x - phi_in[1][j]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); break; } } psi_out[i][jmid-1] = x; } } /* bottom boundary */ for (i=0; i<NX; i++){ if ((TWOSPEEDS)||(xy_in[i][0] != 0)){ x = phi_in[i][0]; y = psi_in[i][0]; switch (B_COND) { case (BC_DIRICHLET): { iplus = i+1; if (iplus == NX) iplus = NX-1; iminus = i-1; if (iminus == -1) iminus = 0; delta = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] - 3.0*x; phi_out[i][0] = -y + 2*x + tcc[i][0]*delta - KAPPA*x - tgamma[i][0]*(x-y); break; } case (BC_PERIODIC): { iplus = (i+1) % NX; iminus = (i-1) % NX; if (iminus < 0) iminus += NX; delta = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] + phi_in[i][jmid-1] - 4.0*x; phi_out[i][0] = -y + 2*x + tcc[i][0]*delta - KAPPA*x - tgamma[i][0]*(x-y); break; } case (BC_ABSORBING): { iplus = (i+1); if (iplus == NX) iplus = NX-1; iminus = (i-1); if (iminus == -1) iminus = 0; delta = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] - 3.0*x; phi_out[i][0] = x - tc[i][0]*(x - phi_in[i][1]) - KAPPA_TOPBOT*x - GAMMA_TOPBOT*(x-y); break; } case (BC_VPER_HABS): { iplus = (i+1); if (iplus == NX) iplus = NX-1; iminus = (i-1); if (iminus == -1) iminus = 0; delta = phi_in[iplus][0] + phi_in[iminus][0] + phi_in[i][1] + phi_in[i][jmid-1] - 4.0*x; if (i==0) phi_out[0][0] = x - tc[0][0]*(x - phi_in[1][0]) - KAPPA_SIDES*x - GAMMA_SIDES*(x-y); else phi_out[i][0] = -y + 2*x + tcc[i][0]*delta - KAPPA*x - tgamma[i][0]*(x-y); break; } } psi_out[i][0] = x; } } /* add oscillating boundary condition on the left corners */ if ((i == 0)&&(OSCILLATE_LEFT)) { phi_out[i][0] = AMPLITUDE*cos((double)time*OMEGA); phi_out[i][jmid-1] = AMPLITUDE*cos((double)time*OMEGA); } /* for debugging purposes/if there is a risk of blow-up */ if (FLOOR) for (i=0; i<NX; i++){ for (j=0; j<jmid; j++){ if (xy_in[i][j] != 0) { if (phi_out[i][j] > VMAX) phi_out[i][j] = VMAX; if (phi_out[i][j] < -VMAX) phi_out[i][j] = -VMAX; if (psi_out[i][j] > VMAX) psi_out[i][j] = VMAX; if (psi_out[i][j] < -VMAX) psi_out[i][j] = -VMAX; } } } // printf("phi(0,0) = %.3lg, psi(0,0) = %.3lg\n", phi[NX/2][NY/2], psi[NX/2][NY/2]); } void evolve_wave(double *phi[NX], double *psi[NX], double *phi_tmp[NX], double *psi_tmp[NX], short int *xy_in[NX]) /* time step of field evolution */ /* phi is value of field at time t, psi at time t-1 */ { // evolve_wave_half_old(phi, psi, phi_tmp, psi_tmp, xy_in); // evolve_wave_half_old(phi_tmp, psi_tmp, phi, psi, xy_in); evolve_wave_half(phi, psi, phi_tmp, psi_tmp, xy_in); evolve_wave_half(phi_tmp, psi_tmp, phi, psi, xy_in); } void animation() { double time, scale, energies[6], top_energy, bottom_energy; double *phi[NX], *psi[NX], *phi_tmp[NX], *psi_tmp[NX]; short int *xy_in[NX]; int i, j, s; /* Since NX and NY are big, it seemed wiser to use some memory allocation here */ for (i=0; i<NX; i++) { phi[i] = (double *)malloc(NY*sizeof(double)); psi[i] = (double *)malloc(NY*sizeof(double)); phi_tmp[i] = (double *)malloc(NY*sizeof(double)); psi_tmp[i] = (double *)malloc(NY*sizeof(double)); xy_in[i] = (short int *)malloc(NY*sizeof(short int)); } /* initialise positions and radii of circles */ printf("initializing circle configuration\n"); if ((B_DOMAIN == D_CIRCLES)||(B_DOMAIN_B == D_CIRCLES)) init_circle_config_energy(circles); else if (B_DOMAIN == D_POLYGONS) init_polygon_config(polygons); courant2 = COURANT*COURANT; courantb2 = COURANTB*COURANTB; /* initialize wave with a drop at one point, zero elsewhere */ // init_wave_flat_comp(phi, psi, xy_in); int_planar_wave_comp(XMIN + 0.015, 0.0, phi, psi, xy_in); // int_planar_wave_comp(XMIN + 0.5, 0.0, phi, psi, xy_in); printf("initializing wave\n"); // int_planar_wave_comp(XMIN + 0.1, 0.0, phi, psi, xy_in); // int_planar_wave_comp(XMIN + 1.0, 0.0, phi, psi, xy_in); // init_wave(-1.5, 0.0, phi, psi, xy_in); // init_wave(0.0, 0.0, phi, psi, xy_in); /* add a drop at another point */ // add_drop_to_wave(1.0, 0.7, 0.0, phi, psi); // add_drop_to_wave(1.0, -0.7, 0.0, phi, psi); // add_drop_to_wave(1.0, 0.0, -0.7, phi, psi); blank(); glColor3f(0.0, 0.0, 0.0); printf("drawing wave\n"); draw_wave_energy(phi, psi, xy_in, 1.0, 0); printf("drawing billiard\n"); draw_billiard_half(B_DOMAIN, CIRCLE_PATTERN, 0); glutSwapBuffers(); sleep(SLEEP1); for (i=0; i<=INITIAL_TIME + NSTEPS; i++) { //printf("%d\n",i); /* compute the variance of the field to adjust color scheme */ /* the color depends on the field divided by sqrt(1 + variance) */ if (SCALE) { scale = sqrt(1.0 + compute_variance(phi,psi, xy_in)); // printf("Scaling factor: %5lg\n", scale); } else scale = 1.0; draw_wave_energy(phi, psi, xy_in, scale, i); draw_billiard_half(B_DOMAIN, CIRCLE_PATTERN, 0); for (j=0; j<NVID; j++) { evolve_wave(phi, psi, phi_tmp, psi_tmp, xy_in); // if (i % 10 == 9) oscillate_linear_wave(0.2*scale, 0.15*(double)(i*NVID + j), -1.5, YMIN, -1.5, YMAX, phi, psi); } glutSwapBuffers(); if (MOVIE) { if (i >= INITIAL_TIME) save_frame(); else printf("Initial phase time %i of %i\n", i, INITIAL_TIME); /* it seems that saving too many files too fast can cause trouble with the file system */ /* so this is to make a pause from time to time - parameter PAUSE may need adjusting */ if (i % PAUSE == PAUSE - 1) { printf("Making a short pause\n"); sleep(PSLEEP); s = system("mv wave*.tif tif_wave/"); } } } if (MOVIE) { for (i=0; i<END_FRAMES; i++) save_frame(); s = system("mv wave*.tif tif_wave/"); } for (i=0; i<NX; i++) { free(phi[i]); free(psi[i]); free(phi_tmp[i]); free(psi_tmp[i]); free(xy_in[i]); } } void display(void) { glPushMatrix(); blank(); glutSwapBuffers(); blank(); glutSwapBuffers(); animation(); sleep(SLEEP2); glPopMatrix(); glutDestroyWindow(glutGetWindow()); } int main(int argc, char** argv) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH); glutInitWindowSize(WINWIDTH,WINHEIGHT); glutCreateWindow("Wave equation in a planar domain"); init(); glutDisplayFunc(display); glutMainLoop(); return 0; }
GB_bitmap_assign_IxJ_template.c
//------------------------------------------------------------------------------ // GB_bitmap_assign_IxJ_template: iterate over all of C(I,J) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Iterate over all positions in the IxJ Cartesian product. This is all // entries C(i,j) where i is in the list I and j is in the list J. This // traversal occurs whether or not C(i,j) is an entry present in C. // The C matrix is accessed at C(I,J). The A matrix is size |I|-by-|J|. // For bitmap assignent, C(I,J)=A is being computed. For bitmap extraction, // C=A(I,J) so the roles of A and C are swapped (see GB_bitmap_subref.c). { //-------------------------------------------------------------------------- // create the tasks to iterate over IxJ //-------------------------------------------------------------------------- int ntasks = 0, nthreads ; GB_task_struct *TaskList = NULL ; size_t TaskList_size = 0 ; GB_OK (GB_subassign_IxJ_slice (&TaskList, &TaskList_size, &ntasks, &nthreads, /* I, */ nI, /* Ikind, Icolon, J, */ nJ, /* Jkind, Jcolon, */ Context)) ; //-------------------------------------------------------------------------- // iterate over all IxJ //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; int64_t task_cnvals = 0 ; bool fine_task = (klast == -1) ; int64_t iA_start = 0, iA_end = nI ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; iA_start = TaskList [taskid].pA ; iA_end = TaskList [taskid].pA_end ; } //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t jA = kfirst ; jA <= klast ; jA++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, jA, Jkind, Jcolon) ; int64_t pC0 = jC * vlen ; // first entry in C(:,jC) int64_t pA0 = jA * nI ; // first entry in A(:,jA) //------------------------------------------------------------------ // operate on C (I(iA_start,iA_end-1),jC) //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; int64_t pC = iC + pC0 ; int64_t pA = iA + pA0 ; // operate on C(iC,jC) at pC (if C is bitmap or full) // and A(iA,jA) or M(iA,jA) at pA, if A and/or M are // bitmap or full. M(iA,jA) is accessed only for the // subassign method when M is bitmap or full. GB_IXJ_WORK (pC, pA) ; } } cnvals += task_cnvals ; } //-------------------------------------------------------------------------- // free workpace //-------------------------------------------------------------------------- GB_FREE_WERK (&TaskList, TaskList_size) ; }
GB_unop__identity_uint64_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint64_uint16) // op(A') function: GB (_unop_tran__identity_uint64_uint16) // C type: uint64_t // A type: uint16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint64_t z = (uint64_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint64_t z = (uint64_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint64_uint16) ( uint64_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint64_t z = (uint64_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint64_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
Searching.202002250815.buckets_equal_width.h
// // Created by Zhen Peng on 02/25/2020. // #ifndef BATCH_SEARCHING_SEARCHING_H #define BATCH_SEARCHING_SEARCHING_H #include <vector> #include <boost/dynamic_bitset.hpp> #include <iostream> #include <fstream> #include <unordered_map> #include <immintrin.h> #include <cstring> #include <unordered_set> #include <set> #include <algorithm> #include <omp.h> #include "../../include/definitions.h" //#include "../include/efanna2e/neighbor.h" #include "../../include/utils.h" #include "../../include/Candidate.h" #include "../../include/parallelization.h" //#include "../include/Buckets.h" namespace PANNS { class Searching { //private: public: idi num_v_ = 0; edgei num_e_ = 0; idi num_queries_ = 0; int dimension_ = 0; // uint64_t dimension_ = 0; idi width_ = 0; // NSG largest degree idi ep_ = 0; // Start point // std::vector<dataf> data_load_; // std::vector<dataf> queries_load_; // std::vector< std::vector<dataf> > data_load_; // std::vector< std::vector<dataf> > queries_load_; // std::vector<distf> norms_; dataf *data_load_ = nullptr; dataf *queries_load_ = nullptr; // dataf *norms_; // std::vector< std::vector<idi> > nsg_graph_; // idi *nsg_graph_indices_; // idi *nsg_graph_out_edges_; // std::vector< std::vector<idi> > edge_list_; char *opt_nsg_graph_ = nullptr; uint64_t data_bytes_; uint64_t neighbor_bytes_; uint64_t vertex_bytes_; // For multithreads int num_threads_ = 1; dataf compute_norm( const dataf *data) const; // idi vertex_id); // const std::vector<PANNS::dataf> &data); // size_t loc_start, // idi dimension) dataf compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<dataf> &d_data, // const std::vector<dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, dataf vertex_norm) const; // idi dimension) static idi insert_into_queue( std::vector<Candidate> &c_queue, idi c_queue_top, const Candidate &cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand); // idi insert_into_queue_nsg( // std::vector< Candidate > &c_queue, // idi c_queue_top, // Candidate cand); // template<typename T> static void insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue_base, const idi insert_index, const idi queue_start, const idi queue_size); static idi merge_two_queues_into_1st_queue_seq( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); // const idi limit_size); static idi merge_two_queues_into_1st_queue_para( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); static void merge_two_bkt_arrays_into_1st_bkt_array_seq( std::vector< std::vector<Candidate> > &a_array, // idi a_bound, std::vector< std::vector<Candidate> > &b_array, const idi b_bound); static idi add_into_CandidatesBuckets( const Candidate &cand, std::vector< std::vector<Candidate> > &buckets, const distf bucket_lower, const distf overall_width, const distf bucket_width); idi merge_all_queues_para( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L); idi merge_all_buckets_para( std::vector< std::vector< std::vector<Candidate> > > &local_buckets_list, std::vector<idi> &local_insert_locations, std::vector< std::vector<Candidate> > &global_buckets, const idi num_buckets); public: // For Profiling // L3CacheMissRate cache_miss_kernel; uint64_t count_distance_computation_ = 0; ~Searching() { free(data_load_); data_load_ = nullptr; // free(queries_load_); // _mm_free(data_load_); free(queries_load_); queries_load_ = nullptr; // free(norms_); // free(nsg_graph_indices_); // free(nsg_graph_out_edges_); free(opt_nsg_graph_); opt_nsg_graph_ = nullptr; } void load_data_load(char *filename); void load_queries_load(char *filename); void load_nsg_graph(char *filename); // void build_opt_graph(); void prepare_init_ids( std::vector<unsigned> &init_ids, unsigned L) const; // void prepare_candidate_queue_list( // const float *query_load, // std::vector<std::vector<efanna2e::Neighbor> > &retset_list, // std::vector<boost::dynamic_bitset<> > &is_visited_list, // const std::vector<unsigned> &init_ids, // const boost::dynamic_bitset<> &flags, // unsigned batch_start, // unsigned batch_size, // unsigned L); // void search_in_batch( //// const float *query_load, // size_t K, // size_t L, // unsigned batch_start, // unsigned batch_size, // std::vector< std::vector<Candidate> > &set_L_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list, // const std::vector<idi> &init_ids, // const boost::dynamic_bitset<> &is_visited, // std::vector<std::vector<idi> > &set_K_list); void search_in_sequential( idi query_id, idi K, idi L, std::vector<Candidate> &set_L, // boost::dynamic_bitset<> &is_visited, // boost::dynamic_bitset<> is_visited, // std::vector<idi> &init_ids, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // idi get_out_degree(idi v_id) const // { // if (v_id < num_v_ - 1) { // return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id]; // } else { // return num_e_ - nsg_graph_indices_[v_id]; // } // } void search_with_top_m( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // std::vector< std::vector<idi> > &top_m_list); void search_with_top_m_in_batch( PANNS::idi M, PANNS::idi batch_start, PANNS::idi batch_size, PANNS::idi K, PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list); void para_search_with_top_m( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // void para_prepare_init_ids( // std::vector<unsigned> &init_ids, // unsigned L) const; void load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list); void get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const; }; // Class Searching /** * Input the data from the file. * @param filename */ inline void Searching::load_data_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, data_load_, num_v_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: data dimension " << dimension_ << " is not equal to query dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input queries from the file. * @param filename */ inline void Searching::load_queries_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, queries_load_, num_queries_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: query dimension " << dimension_ << " is not equal to data dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input the NSG graph from the file. * Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp * @param filename */ inline void Searching::load_nsg_graph(char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { std::cerr << "Error: cannot read file " << filename << " ." << std::endl; exit(EXIT_FAILURE); } fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned)); fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned)); data_bytes_ = (1 + dimension_) * sizeof(dataf); neighbor_bytes_ = (1 + width_) * sizeof(idi); vertex_bytes_ = data_bytes_ + neighbor_bytes_; opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_); if (!opt_nsg_graph_) { std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl; exit(EXIT_FAILURE); } idi v_id = 0; num_e_ = 0; char *base_location = opt_nsg_graph_; while (true) { idi degree; fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); if (fin.eof()) { break; } num_e_ += degree; // std::vector<idi> tmp_ngbrs(degree); // fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned)); // Norm and data distf norm = compute_norm(data_load_ + v_id * dimension_); // distf norm = compute_norm(v_id); std::memcpy(base_location, &norm, sizeof(distf)); // Norm memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data base_location += data_bytes_; // Neighbors memcpy(base_location, &degree, sizeof(idi)); // Number of neighbors fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors // memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned)); base_location += neighbor_bytes_; ++v_id; } if (v_id != num_v_) { std::cerr << "Error: NSG data has " << v_id << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; exit(EXIT_FAILURE); } free(data_load_); data_load_ = nullptr; // //////////////////////// // idi v_id = 0; // num_e_ = 0; // while (true) { // idi degree; // fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); // if (fin.eof()) { // break; // } // num_e_ += degree; // // std::vector<idi> ngbrs(degree); // fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned)); //// nsg_graph_.push_back(ngbrs); //// tmp_edge_list.push_back(ngbrs); // edge_list_.push_back(ngbrs); // ++v_id; // } // if (v_id != num_v_) { // std::cerr << "Error: NSG data has " << v_id // << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; // exit(EXIT_FAILURE); // } } /** * Load those true top-K neighbors (ground truth) of queries * @param filename * @param[out] true_nn_list */ inline void Searching::load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list) // unsigned &t_K) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi t_query_num; idi t_K; // unsigned t_K; fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num)); fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K)); // if (t_query_num != query_num) { // fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n", // query_num, t_query_num, filename); // exit(EXIT_FAILURE); // } if (t_query_num < num_queries_) { fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_); exit(EXIT_FAILURE); } if (t_K < 100) { fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); exit(EXIT_FAILURE); } // data = new unsigned[(size_t) t_query_num * (size_t) t_K]; true_nn_list.resize(t_query_num); for (idi q_i = 0; q_i < t_query_num; ++q_i) { true_nn_list[q_i].resize(t_K); } for (unsigned q_i = 0; q_i < t_query_num; ++q_i) { // size_t offset = q_i * t_K; for (unsigned n_i = 0; n_i < t_K; ++n_i) { unsigned id; float dist; fin.read(reinterpret_cast<char *>(&id), sizeof(id)); fin.read(reinterpret_cast<char *>(&dist), sizeof(dist)); // data[offset + n_i] = id; true_nn_list[q_i][n_i] = id; } } fin.close(); } inline void Searching::get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const { // if (t_K < 100) { // fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); // exit(EXIT_FAILURE); // } if (true_nn_list[0].size() < 100) { fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n"); exit(EXIT_FAILURE); } recalls[5] = 0.0; recalls[10] = 0.0; recalls[20] = 0.0; recalls[50] = 0.0; recalls[100] = 0.0; for (unsigned q_i = 0; q_i < num_queries_; ++q_i) { // size_t offset = q_i * t_K; auto &set_K_q_i = set_K_list[q_i]; for (unsigned top_i = 0; top_i < 100; ++top_i) { unsigned true_id = true_nn_list[q_i][top_i]; for (unsigned n_i = 0; n_i < 100; ++n_i) { if (set_K_q_i[n_i] == true_id) { if (n_i < 5) recalls[5] += 1; if (n_i < 10) recalls[10] += 1; if (n_i < 20) recalls[20] += 1; if (n_i < 50) recalls[50] += 1; if (n_i < 100) recalls[100] += 1; } } } } recalls[5] /= 5.0 * num_queries_; recalls[10] /= 10.0 * num_queries_; recalls[20] /= 20.0 * num_queries_; recalls[50] /= 50.0 * num_queries_; recalls[100] /= 100.0 * num_queries_; } inline void Searching::search_in_sequential( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, // boost::dynamic_bitset<> &is_visited, // boost::dynamic_bitset<> is_visited, // std::vector<idi> &init_ids, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { // std::vector<Candidate> set_L(L+1); // std::vector<idi> init_ids(L); boost::dynamic_bitset<> is_visited(num_v_); for (idi v_i = 0; v_i < L; ++v_i) { is_visited[init_ids[v_i]] = true; } // { // idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); // unsigned out_degree = *out_edges++; // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { // init_ids[tmp_l] = out_edges[tmp_l]; // } // // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // // // If ep_'s neighbors are not enough, add other random vertices // idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). // while (tmp_l < L) { // tmp_id %= num_v_; // unsigned id = tmp_id++; // if (is_visited[id]) { // continue; // } // is_visited[id] = true; // init_ids[tmp_l] = id; // tmp_l++; // } // } // const std::vector<dataf> &query = queries_load_[query_id]; // std::vector<char> is_checked(L + 1, 0); // boost::dynamic_bitset<> is_checked(num_v_); // cache_miss_kernel.measure_stop(); // cache_miss_kernel.measure_start(); const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; // _mm_prefetch(reinterpret_cast<char *>(data_load_ + v_id * dimension_), _MM_HINT_T0); _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); // cache_miss_kernel.measure_stop(); // cache_miss_kernel.measure_start(); idi k = 0; // Index of every queue's first unchecked candidate. while (k < L) { Candidate &top_cand = set_L[k]; unsigned nk = L; if (!top_cand.is_checked_) { top_cand.is_checked_ = true; idi v_id = top_cand.id_; // Vertex ID. _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } // Traverse v_id's all neighbors, pushing them into the queue for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // Compute the distance // ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Insert into the queue idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } if (nk <= k) { k = nk; } else { ++k; } } // cache_miss_kernel.measure_stop(); for (size_t k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } /** * Prepare init_ids and flags, as they are constant for all queries. * @param[out] init_ids * @param L */ inline void Searching::prepare_init_ids( std::vector<unsigned int> &init_ids, unsigned L) const { // idi num_ngbrs = get_out_degree(ep_); // edgei edge_start = nsg_graph_indices_[ep_]; // // Store ep_'s neighbors as candidates // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { // init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; // } // std::unordered_set<idi> visited_ids; boost::dynamic_bitset<> is_selected(num_v_); idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; idi init_ids_end = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { // idi v_id = out_edges[tmp_l]; idi v_id = out_edges[e_i]; if(is_selected[v_id]) { continue; } is_selected[v_id] = true; // init_ids[tmp_l] = v_id; init_ids[init_ids_end++] = v_id; // init_ids[tmp_l] = out_edges[tmp_l]; // visited_ids.insert(init_ids[tmp_l]); } // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // If ep_'s neighbors are not enough, add other random vertices idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). while (init_ids_end < L) { tmp_id %= num_v_; idi v_id = tmp_id++; if (is_selected[v_id]) { continue; } // if (visited_ids.find(id) != visited_ids.end()) { // continue; // } is_selected[v_id] = true; // visited_ids.insert(id); init_ids[init_ids_end++] = v_id; // tmp_l++; } } // TODO: re-code in AVX-512 inline dataf Searching::compute_norm( const dataf *data) const // idi vertex_id) // const std::vector<PANNS::dataf> &data) // size_t loc_start, // idi dimension) { // const dataf *a = data.data() + loc_start; // const dataf *a = data_load_ + vertex_id * dimension_; // idi size = dimension_; dataf result = 0; //#define AVX_L2NORM(addr, dest, tmp) \ // tmp = _mm256_load_ps(addr); \ // tmp = _mm256_mul_ps(tmp, tmp); \ // dest = _mm256_add_ps(dest, tmp); #define AVX_L2NORM(addr, dest, tmp) \ tmp = _mm256_loadu_ps(addr); \ tmp = _mm256_mul_ps(tmp, tmp); \ dest = _mm256_add_ps(dest, tmp); __m256 sum; __m256 l0, l1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = data; const float *e_l = l + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_L2NORM(e_l, sum, l0); } for (unsigned i = 0; i < DD; i += 16, l += 16) { AVX_L2NORM(l, sum, l0); AVX_L2NORM(l + 8, sum, l1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; return result; } inline dataf Searching::compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<PANNS::dataf> &d_data, // const std::vector<PANNS::dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, dataf vertex_norm) const // idi dimension) { // idi size = dimension_; float result = 0; //#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ // tmp1 = _mm256_load_ps(addr1);\ // tmp2 = _mm256_load_ps(addr2);\ // tmp1 = _mm256_mul_ps(tmp1, tmp2); \ // dest = _mm256_add_ps(dest, tmp1); #define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm256_loadu_ps(addr1);\ tmp2 = _mm256_loadu_ps(addr2);\ tmp1 = _mm256_mul_ps(tmp1, tmp2); \ dest = _mm256_add_ps(dest, tmp1); __m256 sum; __m256 l0, l1; __m256 r0, r1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = v_data; const float *r = q_data; // const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf)); // const float *r = queries_load_ + query_id * dimension_; const float *e_l = l + DD; const float *e_r = r + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); } for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) { AVX_DOT(l, r, sum, l0, r0); AVX_DOT(l + 8, r + 8, sum, l1, r1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; result = -2 * result + vertex_norm; return result; } /** * PANNS version of InsertIntoPool(): binary-search to find the insert place and then move. * @param[out] c_queue * @param c_queue_top * @param cand * @return */ inline idi Searching::insert_into_queue( std::vector<PANNS::Candidate> &c_queue, PANNS::idi c_queue_top, const PANNS::Candidate &cand) { if (c_queue[0].distance_ > cand.distance_) { // If the first memmove(reinterpret_cast<char *>(c_queue.data() + 1), reinterpret_cast<char *>(c_queue.data()), c_queue_top * sizeof(Candidate)); c_queue[0] = cand; return 0; } else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) { // If the last if (c_queue[c_queue_top - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering c_queue[c_queue_top - 1] = cand; return c_queue_top - 1; } else { return c_queue_top; } } idi left = 0; idi right = c_queue_top; while (left < right) { idi mid = (right - left) / 2 + left; if (c_queue[mid].distance_ > cand.distance_) { right = mid; } else { left = mid + 1; } } // If the distance is the same if (0 != left && c_queue[left - 1].distance_ != cand.distance_) { ; } else { while (0 != left && c_queue[left - 1].distance_ == cand.distance_ && c_queue[left - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering --left; } } // Insert to left memmove(reinterpret_cast<char *>(c_queue.data() + left + 1), reinterpret_cast<char *>(c_queue.data() + left), (c_queue_top - left) * sizeof(Candidate)); c_queue[left] = cand; return left; } // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand) { if (0 == queue_top) { queue[queue_top++] = cand; return 0; } // Find the insert location auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc == queue_size) { return queue_size; } // Insert if (queue_top == queue_size) { // If full already --queue_top; } memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_top - insert_loc) * sizeof(Candidate)); // for (idi q_i = queue_top; q_i > insert_loc; --q_i) { // queue.at(q_i) = queue.at(q_i - 1); // } queue[insert_loc] = cand; ++queue_top; return insert_loc; } //template<typename T> inline void Searching::insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, const idi queue_start, const idi queue_size) { const idi dest_index = queue_start + insert_index; memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index - 1) * sizeof(Candidate)); queue[dest_index] = cand; // memmove(reinterpret_cast<char *>(queue_base + dest_index + 1), // reinterpret_cast<char *>(queue_base + dest_index), // (queue_size - insert_index - 1) * sizeof(T)); // for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) { // queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start); // } // queue_base[dest_index] = cand; } inline idi Searching::merge_two_queues_into_1st_queue_seq( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { return insert_index; } else if (insert_index == queue1_size - 1) { queue1[queue1_start + insert_index] = queue2[queue2_start]; return insert_index; } // auto *queue1_base = queue1.data() + queue1_start; // Insert the 1st of queue2 insert_one_element_at( queue2[queue2_start], // queue1.data(), queue1, insert_index, queue1_start, queue1_size); if (queue2_size == 1) { return insert_index; } // memmove(reinterpret_cast<char *>(queue1_base + insert_index + 1), // reinterpret_cast<char *>(queue1_base + insert_index), // (queue1_size - insert_index) * sizeof(Candidate)); // queue1[insert_index] = queue2[queue2_start]; // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; const idi q_i_1_bound = queue1_start + queue1_size; const idi q_i_2_bound = queue2_start + queue2_size; // const idi insert_i_bound = queue1_start + limit_size; for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) { // for (idi insert_i = insert_index + 1; insert_i < q_i_1_bound; ++insert_i) { if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) { // queue1 or queue2 finished traverse. Rest o break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else { // Insert queue2[q_i_2] into queue1 insert_one_element_at( queue2[q_i_2++], // queue1.data(), queue1, insert_i, queue1_start, queue1_size); ++q_i_1; } } // // // Merge queue1 and queue2 into tmp_queue. // std::vector<Candidate> tmp_queue(queue1_size + queue2_size); // std::merge(queue1.begin() + queue1_start, // queue1.begin() + queue1_start + queue1_size, // queue2.begin() + queue2_start, // queue2.begin() + queue2_start + queue2_size, // tmp_queue.begin()); // // Resize tmp_queue. // tmp_queue.resize(limit_size); // // // Swap queue1 and tmp_queue // queue1.swap(tmp_queue); return insert_index; } //// Backup //inline idi Searching::merge_two_queues_into_1st_queue_seq( // std::vector<Candidate> &queue1, // const idi queue1_start, // const idi queue1_size, // std::vector<Candidate> &queue2, // const idi queue2_start, // const idi queue2_size, // const idi limit_size) //{ // assert(queue1_size); // // Record the lowest insert location. // auto it_loc = std::lower_bound( // queue1.begin() + queue1_start, // queue1.begin() + queue1_start + queue1_size, // queue2[queue2_start]); // idi insert_loc = it_loc - (queue1.begin() + queue1_start); // // // Merge queue1 and queue2 into tmp_queue. // std::vector<Candidate> tmp_queue(queue1_size + queue2_size); // std::merge(queue1.begin() + queue1_start, // queue1.begin() + queue1_start + queue1_size, // queue2.begin() + queue2_start, // queue2.begin() + queue2_start + queue2_size, // tmp_queue.begin()); // // Resize tmp_queue. // tmp_queue.resize(limit_size); // // // Swap queue1 and tmp_queue // queue1.swap(tmp_queue); // // return insert_loc; //} inline idi Searching::merge_two_queues_into_1st_queue_para( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); if (queue2_size < 16) { // if (true) { // if (queue2_size < 1024) { return merge_two_queues_into_1st_queue_seq( queue1, queue1_start, queue1_size, queue2, queue2_start, queue2_size); } else { auto it1_begin = queue1.begin() + queue1_start; auto it1_end = it1_begin + queue1_size; // auto it2_begin = queue2.begin() + queue2_start; // auto it2_end = it2_begin + queue2_size; // Record the lowest insert location. idi lowest_insert_loc; { auto it_loc = std::lower_bound( it1_begin, it1_end, queue2[queue2_start]); lowest_insert_loc = it_loc - it1_begin; if (lowest_insert_loc == queue1_size) { return lowest_insert_loc; } else if (lowest_insert_loc == queue1_size - 1) { queue1[queue1_start + lowest_insert_loc] = queue2[queue2_start]; return lowest_insert_loc; } } // Partition queue2 and queue1 const idi stride = log2(static_cast<double>(queue2_size)); const idi num_partitions = (queue2_size - 1) / stride + 1; // Partitions in queue1 std::vector<idi> parts_in_q1(num_partitions); parts_in_q1[0] = 0; #pragma omp parallel for for (idi p_i = 1; p_i < num_partitions; ++p_i) { idi q_i = p_i * stride + queue2_start; auto it_loc = std::lower_bound( it1_begin, it1_end, queue2[q_i]); parts_in_q1[p_i] = it_loc - it1_begin; } // Partitions in queue2 std::vector<idi> parts_in_q2(num_partitions); parts_in_q2[0] = 0; #pragma omp parallel for for (idi p_i = 1; p_i < num_partitions; ++p_i) { parts_in_q2[p_i] = p_i * stride; } // Merge // Find partitions needed. std::vector<idi> offsets_queue3; // For the tmp_queue when merging. offsets_queue3.push_back(0); idi elements_count = 0; idi p_i_bound = 0; while (elements_count < queue1_size && p_i_bound < num_partitions) { idi q1_size; idi q2_size; if (p_i_bound != num_partitions - 1) { q1_size = parts_in_q1[p_i_bound + 1] - parts_in_q1[p_i_bound]; q2_size = parts_in_q2[p_i_bound + 1] - parts_in_q2[p_i_bound]; } else { q1_size = queue1_size - parts_in_q1[p_i_bound]; q2_size = queue2_size - parts_in_q2[p_i_bound]; } ++p_i_bound; idi tmp_size = q1_size + q2_size; elements_count += tmp_size; offsets_queue3.push_back(*offsets_queue3.rbegin() + tmp_size); } // Merge into tmp_queue in parallel std::vector<Candidate> tmp_queue(elements_count); #pragma omp parallel for for (idi p_i = 0; p_i < p_i_bound; ++p_i) { idi q1_start = parts_in_q1[p_i] + queue1_start; idi q2_start = parts_in_q2[p_i] + queue2_start; idi q1_end; idi q2_end; if (p_i != num_partitions - 1) { q1_end = parts_in_q1[p_i + 1] + queue1_start; q2_end = parts_in_q2[p_i + 1] + queue1_start; } else { q1_end = queue1_size + queue1_start; q2_end = queue2_size + queue2_start; } std::merge( queue1.begin() + q1_start, queue1.begin() + q1_end, queue2.begin() + q2_start, queue2.begin() + q2_end, tmp_queue.begin() + offsets_queue3[p_i]); } if (elements_count > queue1_size) { tmp_queue.resize(queue1_size); } tmp_queue.swap(queue1); //// Deprecated. Wrong implementation. ////#pragma omp parallel for // for (idi p_i = 0; p_i < num_partitions; ++p_i) { // idi q1_start = parts_in_q1[p_i] + queue1_start; // idi q2_start = parts_in_q2[p_i] + queue2_start; // idi q1_size; // idi q2_size; // if (p_i != num_partitions - 1) { // q1_size = parts_in_q1[p_i + 1] - parts_in_q1[p_i]; // q2_size = parts_in_q2[p_i + 1] - parts_in_q2[p_i]; // } else { // q1_size = queue1_size - parts_in_q1[p_i]; // q2_size = queue2_size - parts_in_q2[p_i]; // } // if (0 == q1_size || 0 == q2_size) continue; // merge_two_queues_into_1st_queue_seq( // queue1, // q1_start, // q1_size, // queue2, // q2_start, // q2_size); // } return lowest_insert_loc; } // ////////////////////////////////////////// // //// Backup // // Record the lowest insert location. // auto it_loc = std::lower_bound( // queue1.begin() + queue1_start, // queue1.begin() + queue1_start + queue1_size, // queue2[queue2_start]); // idi insert_loc = it_loc - (queue1.begin() + queue1_start); // // auto *queue1_base = queue1.data() + queue1_start; // // Insert the 1st of queue2 // insert_one_element_at( // queue2[queue2_start], // queue1_base, // insert_loc, // queue1_size); // // // Insert // idi q_i_1 = insert_loc + 1; // idi q_i_2 = queue2_start + 1; // const idi q_i_1_bound = queue1_start + queue1_size; // const idi q_i_2_bound = queue2_start + queue2_size; //// const idi insert_i_bound = queue1_start + limit_size; // for (idi insert_i = insert_loc + 1; insert_i < q_i_1_bound; ++insert_i) { // if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) { // // queue1 or queue2 finished traverse. Rest o // break; // } else if (queue1[q_i_1] < queue2[q_i_2]) { // ++q_i_1; // } else { // // Insert queue2[q_i_2] into queue1 // insert_one_element_at( // queue2[q_i_2++], // queue1_base, // insert_i, // queue1_size); // ++q_i_1; // } // } // // return insert_loc; } inline void Searching::merge_two_bkt_arrays_into_1st_bkt_array_seq( std::vector< std::vector<Candidate> > &a_array, // const idi a_bound, std::vector< std::vector<Candidate> > &b_array, const idi b_bound) { // idi a_size = a_bucket.size(); // idi b_size = b_bucket.size(); // a_bucket.resize(a_size + b_size); // std::memmove(a_bucket.data() + a_size, b_bucket.data(), b_size * sizeof(b_bucket[0])); for (idi bk_i = 0; bk_i < b_bound; ++bk_i) { // for (idi bk_i = 0; bk_i < num_buckets; ++bk_i) { auto &b_bucket = b_array[bk_i]; if (b_bucket.empty()) continue; auto &a_bucket = a_array[bk_i]; // a_bucket.insert(a_bucket.end(), b_bucket.begin(), b_bucket.end()); // TODO: memory optimization needed. // { idi a_size = a_bucket.size(); idi b_size = b_bucket.size(); a_bucket.resize(a_size + b_size); std::memmove(a_bucket.data() + a_size, b_bucket.data(), b_size * sizeof(b_bucket[0])); // } b_bucket.clear(); } // if (a_bound < b_bound) { // a_bound = b_bound; // } // b_bound = 0; } inline idi Searching::add_into_CandidatesBuckets( const Candidate &cand, std::vector< std::vector<Candidate> > &buckets, const distf bucket_lower, const distf overall_width, const distf bucket_width) { idi b_i; if (cand.distance_ < bucket_lower) { b_i = 0; // buckets[0].push_back(cand); } else { b_i = (cand.distance_ - bucket_lower) / overall_width / bucket_width; // buckets[b_i].push_back(cand); } buckets[b_i].push_back(cand); return b_i; } //inline void Searching::cand_pushes_ngbrs_into_queue( // idi cand_id, // const dataf *query_data, // idi L, // idi &new_k, // boost::dynamic_bitset<> &is_visited, // std::vector<Candidate> &set_L) //{ // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist >= set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } //} //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // Deprecated: cannot use std::set, because its element is constant. //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, //// std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // std::set<Candidate> set_L; // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // set_L.emplace(v_id, dist, false); // } //// std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { //// Candidate &top_cand = set_L[k]; // std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k); // unsigned nk = L; // if (!top_cand->is_checked_) { // top_cand->is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} inline void Searching::search_with_top_m( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) // std::vector< std::vector<idi> > &top_m_list) { boost::dynamic_bitset<> is_visited(num_v_); { // idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); // unsigned out_degree = *out_edges++; // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { // init_ids[tmp_l] = out_edges[tmp_l]; // } // // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // // // If ep_'s neighbors are not enough, add other random vertices // idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). // while (tmp_l < L) { // tmp_id %= num_v_; // unsigned id = tmp_id++; // if (is_visited[id]) { // continue; // } // is_visited[id] = true; // init_ids[tmp_l] = id; // tmp_l++; // } // //// while (tmp_l < L) { //// unsigned id = rand() % num_v_; //// if (is_visited[id]) { //// continue; //// } //// is_visited[id] = true; //// init_ids[tmp_l] = id; //// tmp_l++; //// } for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } // ///////////////////////////////////////// // Candidate &top_cand = set_L[k]; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist >= set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // // {//test // for (idi k_i = 0; k_i < K; ++k_i) { // printf("%u: %u: %u %f\n", // query_id, // k_i, set_L[k_i].id_, set_L[k_i].distance_); // } // exit(1); // } } inline void Searching::search_with_top_m_in_batch( const PANNS::idi M, const PANNS::idi batch_start, const PANNS::idi batch_size, const PANNS::idi K, const PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list) { std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_)); // Prepare the init_ids { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { auto &is_visited = is_visited_list[q_i]; for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } } // Initialize set_L_list { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_; for (idi i = 0; i < L; i++) { idi v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L); } } { std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates idi joint_queue_end = 0; boost::dynamic_bitset<> is_in_joint_queue(num_v_); // std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id]. // std::vector<idi> cands_query_ids_ends(num_v_, 0); std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M); std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchekced candidate. std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked std::vector<idi> queries_not_finished(batch_size); idi queries_not_finished_end = batch_size; for (idi q_i = 0; q_i < batch_size; ++q_i) { queries_not_finished[q_i] = q_i; } bool is_finished = false; idi counter_for_debug = 0; while (!is_finished) { ++counter_for_debug; // Build the new joint queue // Traverse every query's queue for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) { idi q_local_id = queries_not_finished[q_i]; // last_ks[q_local_id] = L; auto &set_L = set_L_list[q_local_id]; idi top_m_count = 0; for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } set_L[c_i].is_checked_ = true; last_ks[q_local_id] = c_i; ++top_m_count; idi cand_id = set_L[c_i].id_; // Record which query selected cand_id auto tmp_c = cands_query_ids.find(cand_id); if (tmp_c != cands_query_ids.end()) { tmp_c->second.push_back(q_local_id); } else { cands_query_ids.emplace(cand_id, std::vector<idi>()); cands_query_ids[cand_id].reserve(batch_size); cands_query_ids[cand_id].push_back(q_local_id); } // cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id; // Add candidate cand_id into the joint queue if (is_in_joint_queue[cand_id]) { continue; } is_in_joint_queue[cand_id] = true; joint_queue[joint_queue_end++] = cand_id; } } queries_not_finished_end = 0; // Clear queries_not_finished // Traverse every shared candidate for (idi c_i = 0; c_i < joint_queue_end; ++c_i) { idi cand_id = joint_queue[c_i]; is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; const auto &query_local_ids = cands_query_ids[cand_id]; // Push neighbors to every queue of the queries that selected cand_id. // Traverse cand_id's neighbors // idi &q_i_bound = cands_query_ids_ends[cand_id]; // for (idi q_i = 0; q_i < q_i_bound; ++q_i) { // idi q_local_id = query_local_ids[q_i]; for (idi q_local_id : query_local_ids) { dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_; auto &is_visited = is_visited_list[q_local_id]; auto &set_L = set_L_list[q_local_id]; // // Traverse cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate new_cand(nb_id, dist, false); idi insert_loc = insert_into_queue(set_L, L, new_cand); if (insert_loc < nks[q_local_id]) { nks[q_local_id] = insert_loc; } } } cands_query_ids.erase(cand_id); // q_i_bound = 0; // Clear cands_query_ids[cand_id] } joint_queue_end = 0; // Clear joint_queue for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) { if (nks[q_local_id] <= last_ks[q_local_id]) { ks[q_local_id] = nks[q_local_id]; } else { ks[q_local_id] = last_ks[q_local_id] + 1; } nks[q_local_id] = L; last_ks[q_local_id] = L; if (ks[q_local_id] < L) { queries_not_finished[queries_not_finished_end++] = q_local_id; } } if (!queries_not_finished_end) { is_finished = true; } } } { for (idi q_i = 0; q_i < batch_size; ++q_i) { for (idi c_i = 0; c_i < K && c_i < L; ++c_i) { set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_; } } } //// // {//test // for (idi q_i = 0; q_i < batch_size; ++q_i) { // printf("query: %u\n", q_i + batch_start); // for (idi c_i = 0; c_i < K; ++c_i) { // printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_); // } // } // } } inline idi Searching::merge_all_buckets_para( std::vector< std::vector< std::vector<Candidate> > > &local_buckets_list, std::vector<idi> &local_insert_locations, std::vector< std::vector<Candidate> > &global_buckets, const idi num_buckets) { idi num_arrays = num_threads_; idi size = 1 << (static_cast<idi>(log2(num_arrays))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (idi i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; idi bi = i + (1 << d) - 1; auto &a_array = local_buckets_list[ai]; auto &b_array = local_buckets_list[bi]; idi &a_bound = local_insert_locations[ai]; idi &b_bound = local_insert_locations[bi]; ////#pragma omp parallel for //// Fine-grained is slower. // for (idi bk_i = 0; bk_i < b_bound; ++bk_i) { //// for (idi bk_i = 0; bk_i < num_buckets; ++bk_i) { // auto &b_bucket = b_array[bk_i]; // if (b_bucket.empty()) // continue; // auto &a_bucket = a_array[bk_i]; // merge_two_buckets_into_1st_bucket_set(a_bucket, b_bucket); //// a_bucket.insert(a_bucket.end(), b_bucket.begin(), b_bucket.end()); // TODO: memory optimization needed. //// { //// idi a_size = a_bucket.size(); //// idi b_size = b_bucket.size(); //// a_bucket.resize(a_size + b_size); //// std::memmove(a_bucket.data() + a_size, b_bucket.data(), b_size * sizeof(b_bucket[0])); //// } // b_bucket.clear(); // } // if (a_bound < b_bound) { // a_bound = b_bound; // } // b_bound = num_threads_; merge_two_bkt_arrays_into_1st_bkt_array_seq( a_array, // a_bound, b_array, num_buckets); // b_bound); if (a_bound < b_bound) { a_bound = b_bound; } b_bound = 0; } } if (size != num_arrays) { for (idi a_i = size; a_i < num_arrays; ++a_i) { idi ai = a_i; idi bi = ai - 1; auto &a_array = local_buckets_list[ai]; auto &b_array = local_buckets_list[bi]; idi &a_bound = local_insert_locations[ai]; idi &b_bound = local_insert_locations[bi]; ////#pragma omp parallel for //// Fine-grained is slower. // for (idi bk_i = 0; bk_i < b_bound; ++bk_i) { //// for (idi bk_i = 0; bk_i < num_buckets; ++bk_i) { // auto &b_bucket = b_array[bk_i]; // if (b_bucket.empty()) // continue; // auto &a_bucket = a_array[bk_i]; // merge_two_buckets_into_1st_bucket_set(a_bucket, b_bucket); //// a_bucket.insert(a_bucket.end(), b_bucket.begin(), b_bucket.end()); // TODO: memory optimization needed. //// { //// idi a_size = a_bucket.size(); //// idi b_size = b_bucket.size(); //// a_bucket.resize(a_size + b_size); //// std::memmove(a_bucket.data() + a_size, b_bucket.data(), b_size * sizeof(b_bucket[0])); //// } // b_bucket.clear(); // } // if (a_bound < b_bound) { // a_bound = b_bound; // } // b_bound = num_threads_; merge_two_bkt_arrays_into_1st_bkt_array_seq( a_array, // a_bound, b_array, num_buckets); // b_bound); if (a_bound < b_bound) { a_bound = b_bound; } b_bound = 0; } } // Merge into the global_buckets. idi first_bucket = 0; auto &b_array = *local_buckets_list.rbegin(); while (first_bucket < num_buckets && b_array[first_bucket].size() == 0) { ++first_bucket; } idi &b_bound = *local_insert_locations.rbegin(); ////#pragma omp parallel for //// Fine-grained is slower. // for (idi bk_i = 0; bk_i < b_bound; ++bk_i) { //// for (idi bk_i = 0; bk_i < num_buckets; ++bk_i) { // auto &b_bucket = b_array[bk_i]; // if (b_bucket.empty()) // continue; // auto &a_bucket = global_buckets[bk_i]; // merge_two_buckets_into_1st_bucket_set(a_bucket, b_bucket); //// a_bucket.insert(a_bucket.end(), b_bucket.begin(), b_bucket.end()); //// { //// idi a_size = a_bucket.size(); //// idi b_size = b_bucket.size(); //// a_bucket.resize(a_size + b_size); //// std::memmove(a_bucket.data() + a_size, b_bucket.data(), b_size * sizeof(b_bucket[0])); //// } // b_bucket.clear(); // } // b_bound = num_threads_; merge_two_bkt_arrays_into_1st_bkt_array_seq( global_buckets, b_array, num_buckets); // b_bound); b_bound = 0; return first_bucket; } inline idi Searching::merge_all_queues_para( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L) { int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; } } // Remain, prefix-sum-like merge if (size != num_threads_) { for (int i = size; i < num_threads_; ++i) { idi ai = i; idi bi = i - 1; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; } } // Merge into set_L idi r = L; if (local_queues_ends[num_threads_ - 1]) { r = merge_two_queues_into_1st_queue_seq( set_L, 0, L, local_queues_list[num_threads_ - 1], 0, local_queues_ends[num_threads_ - 1]); } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return r; } //// Using local buckets and then merge. inline void Searching::para_search_with_top_m( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) // std::vector< std::vector<idi> > &top_m_list) { // const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); // boost::dynamic_bitset<> is_visited(num_v_); std::vector<uint8_t> is_visited(num_v_, 0); { #pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } } const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // Get the distances of all candidates, store in the set set_L. distf dist_lower = DISTF_MAX; distf dist_upper = DISTF_MIN; #pragma omp parallel for reduction(max : dist_upper) reduction(min : dist_lower) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. dist_lower = std::min(dist_lower, dist); dist_upper = std::max(dist_upper, dist); // if (dist < dist_lower) { // dist_lower = dist; // } // else if (dist > dist_upper) { //// if (dist > dist_upper) { // dist_upper = dist; // } } // std::sort(set_L.begin(), set_L.begin() + L); // Compute the width for buckets const idi num_buckets = 64; const distf bucket_width = 1.0 / num_buckets; // CandidatesBuckets global_buckets(num_buckets, set_L.begin()->distance_, set_L.rbegin()->distance_); std::vector< std::vector<Candidate> > global_buckets(num_buckets); std::vector< std::vector< std::vector<Candidate> > > local_buckets_list( num_threads_, std::vector< std::vector<Candidate> >(num_buckets)); std::vector<idi> local_insert_locations(num_threads_, 0); // Record a local array's highest bucket id that has new insertion. distf bucket_lower = dist_lower; distf bucket_upper = dist_upper >= 0 ? dist_upper + 1 : dist_upper - 1; // TODO: is it proper? // distf bucket_lower = set_L.begin()->distance_; // distf bucket_upper = set_L.rbegin()->distance_; distf overall_width = bucket_upper - bucket_lower; // Copy set_L into global_bucket. for (idi q_i = 0; q_i < L; ++q_i) { // idi b_i = (set_L[q_i].distance_ - bucket_lower) / overall_width / bucket_width; // global_buckets[b_i].push_back(set_L[q_i]); add_into_CandidatesBuckets( set_L[q_i], global_buckets, bucket_lower, overall_width, bucket_width); } std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. idi b_index = 0; // Index of the first bucket that contains unchecked candidates. bool is_finished = false; idi tmp_count = 0; // for debug // while (k < L) { while (!is_finished) { ++tmp_count; // Select M candidates idi last_b = num_buckets; // The last index of the bucket that contains unchecked candidates. Used for updating b_index. for (idi b_i = b_index; b_i < num_buckets && top_m_candidates_end < M; ++b_i) { // Traverse bucket b_i auto &bucket_i = global_buckets[b_i]; idi q_i_bound = bucket_i.size(); for (idi q_i = 0; q_i < q_i_bound && top_m_candidates_end < M; ++q_i) { if (bucket_i[q_i].is_checked_) { continue; } last_b = b_i; bucket_i[q_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = bucket_i[q_i].id_; } } //// Backup // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } //// // Push M candidates' neighbors into the queue. #pragma omp parallel for for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; if (!AtomicOps::CAS(is_visited.data() + nb_id, static_cast<uint8_t>(0), static_cast<uint8_t>(1))) { continue; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { if (dist > dist_upper) { continue; } Candidate cand(nb_id, dist, false); // Add to the local buckets. idi r = add_into_CandidatesBuckets( cand, local_buckets_list[tid], bucket_lower, overall_width, bucket_width); if (r > local_insert_locations[tid]) { local_insert_locations[tid] = r; } // idi b_i = (cand.distance_ - bucket_lower) / overall_width / bucket_width; // local_buckets_list[tid][b_i].push_back(cand); // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); } } top_m_candidates_end = 0; // Clear top_m_candidates idi lowest_b_i = num_buckets; // The largest index of the bucket that has new candidates // Merge buckets. Merge all local buckets in parallel. { // if (num_threads_ > 1) { lowest_b_i = merge_all_buckets_para( local_buckets_list, local_insert_locations, global_buckets, num_buckets); // } else { // auto &b_array = local_buckets_list[0]; // while (b_array[lowest_b_i].size() == 0) { // ++lowest_b_i; // } // for (idi bk_i = lowest_b_i; bk_i < num_buckets; ++bk_i) { // auto &b_bucket = b_array[bk_i]; // if (b_bucket.empty()) // continue; // auto &a_bucket = global_buckets[bk_i]; // a_bucket.insert(a_bucket.end(), b_bucket.begin(), b_bucket.end()); // } // } } {// Update the start bucket. if (lowest_b_i <= last_b) { b_index = lowest_b_i; } else { b_index = last_b + 1; } // TODO: dynamic buckets width. } {// Check the converge condition if (b_index >= num_buckets) { is_finished = true; } else { idi fixed_count = 0; for (idi b_i = 0; b_i < b_index && fixed_count < L; ++b_i) { fixed_count += global_buckets[b_i].size(); } if (fixed_count >= L) { is_finished = true; } // else { // // Update dist_upper // idi b_i = b_index; // for ( ; b_i < num_buckets && fixed_count < L; ++b_i) { // fixed_count += global_buckets[b_i].size(); // } // distf tmp_upper = b_i * bucket_width * overall_width + bucket_lower; // if (tmp_upper < dist_upper) { // dist_upper = tmp_upper; // } // } } } // {//test // // Print global_buckets' sizes // printf("sizes:"); // for (idi b_i = 0; b_i < num_buckets; ++b_i) { // printf(" %u:%lu", // b_i, global_buckets[b_i].size()); // } // printf("\n"); // } // idi nk = L; // // Merge. Parallel merging in every two queues. // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_para( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// idi r = merge_two_queues_into_1st_queue_seq( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para( // local_queues_list, // local_queues_ends, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq( // set_L, // 0, // L, // local_queues_list[0], // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } } //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } {// Move to set_K idi count = 0; idi last_b = 0; for ( ; last_b < num_buckets && count < K; ++last_b) { auto &bucket_i = global_buckets[last_b]; count += bucket_i.size(); std::sort(bucket_i.begin(), bucket_i.end()); } idi k_i = 0; for (idi b_i = 0; b_i < last_b && k_i < K; ++b_i) { const auto &bucket_i = global_buckets[b_i]; const idi size_b = bucket_i.size(); for (idi q_i = 0; q_i < size_b && k_i < K; ++q_i) { set_K[k_i++] = bucket_i[q_i].id_; } } } // {//// DEPRECATED. Cannot guarantee the accuracy. // idi count = 0; // for (idi b_i = 0; b_i < num_buckets && count < K; ++b_i) { // auto &bucket_i = global_buckets[b_i]; // idi size_b = bucket_i.size(); // if (count + size_b >= K) { // idi remain = K - count; //// std::copy_n(set_L.begin() + count, remain, bucket_i.begin()); // std::memmove(set_L.data() + count, bucket_i.data(), remain * sizeof(Candidate)); // count = K; // } else { //// std::copy_n(set_L.begin() + count, size_b, bucket_i.begin()); // std::memmove(set_L.data() + count, bucket_i.data(), size_b * sizeof(Candidate)); // count += size_b; // } // } // } // // {//test // for (idi k_i = 0; k_i < K; ++k_i) { // printf("query_id: %u " // "set_L[%u]: " // "id: %u " // "dist: %f\n", // query_id, // k_i, // set_L[k_i].id_, // set_L[k_i].distance_); // } // exit(1); // } } ////// Backup: using OpenMP critical clause //inline void Searching::para_search_with_top_m( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //// for (idi v_i = 0; v_i < L; ++v_i) { //// idi v_id = init_ids[v_i]; //// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); //// } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; //#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // DEPRECATED. No enough workload for OpenMP, and hard to implement efficiently. ///** // * Prepare init_ids and flags, as they are constant for all queries. // * @param[out] init_ids // * @param L // */ //inline void Searching::para_prepare_init_ids( // std::vector<unsigned int> &init_ids, // unsigned L) const //{ //// idi num_ngbrs = get_out_degree(ep_); //// edgei edge_start = nsg_graph_indices_[ep_]; //// // Store ep_'s neighbors as candidates //// idi tmp_l = 0; //// for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { //// init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; //// } //// std::unordered_set<idi> visited_ids; // std::vector<uint8_t> is_selected(num_v_, 0); //// boost::dynamic_bitset<> is_selected(num_v_); // idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // idi init_ids_end = 0; //// idi e_i_bound = out_degree <= L ? out_degree : L; //#pragma omp parallel for // for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { //// for (idi e_i = 0; e_i < e_i_bound; ++e_i) { // idi v_id = out_edges[e_i]; //// if(is_selected[v_id]) { //// continue; //// } //// is_selected[v_id] = 1; // // if (!AtomicOps::CAS(is_selected.data() + v_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // //// init_ids[init_ids_end++] = v_id; // volatile idi old_v = init_ids_end; // volatile idi new_v = old_v + 1; // while (!AtomicOps::CAS(&init_ids_end, old_v, new_v)) { // old_v = init_ids_end; // new_v = old_v + 1; // } // init_ids[old_v] = v_id; // } // //// for (idi i = 0; i < tmp_l; ++i) { //// is_visited[init_ids[i]] = true; //// } // // // If ep_'s neighbors are not enough, add other random vertices // idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). // while (init_ids_end < L) { // tmp_id %= num_v_; // idi v_id = tmp_id++; // if (is_selected[v_id]) { // continue; // } //// if (visited_ids.find(id) != visited_ids.end()) { //// continue; //// } // is_selected[v_id] = 1; //// visited_ids.insert(id); // init_ids[init_ids_end++] = v_id; //// tmp_l++; // } //} } // namespace PANNS #endif //BATCH_SEARCHING_SEARCHING_H
DRB095-doall2-taskloop-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Two-dimensional array computation: Only one loop is associated with omp taskloop. The inner loop's loop iteration variable will be shared if it is shared in the enclosing context. Data race pairs (we allow multiple ones to preserve the pattern): Write_set = {j@69:14, j@69:30} Read_set = {j@69:21, j@69:30, j@70:16} Any pair from Write_set vs. Write_set and Write_set vs. Read_set is a data race pair. */ #include <stdio.h> int a[100][100]; int main() { int i, j; #pragma omp parallel { #pragma omp for private(j) collapse(2) for (i = 0; i < 100; i++) for (j = 0; j < 100; j++) a[i][j]+=1; } printf ("a[50][50]=%d\n", a[50][50]); return 0; }
GB_binop__bget_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bget_int8) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__bget_int8) // A.*B function (eWiseMult): GB (_AemultB_03__bget_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bget_int8) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__bget_int8) // C+=b function (dense accum): GB (_Cdense_accumb__bget_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bget_int8) // C=scalar+B GB (_bind1st__bget_int8) // C=scalar+B' GB (_bind1st_tran__bget_int8) // C=A+scalar GB (_bind2nd__bget_int8) // C=A'+scalar GB (_bind2nd_tran__bget_int8) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = GB_BITGET (aij, bij, int8_t, 8) #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int8_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_BITGET (x, y, int8_t, 8) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BGET || GxB_NO_INT8 || GxB_NO_BGET_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__bget_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bget_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bget_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bget_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__bget_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bget_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__bget_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bget_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bget_int8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int8_t bij = Bx [p] ; Cx [p] = GB_BITGET (x, bij, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bget_int8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = Ax [p] ; Cx [p] = GB_BITGET (aij, y, int8_t, 8) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (x, aij, int8_t, 8) ; \ } GrB_Info GB (_bind1st_tran__bget_int8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = Ax [pA] ; \ Cx [pC] = GB_BITGET (aij, y, int8_t, 8) ; \ } GrB_Info GB (_bind2nd_tran__bget_int8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
convolution_5x5_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv5x5s1_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f); out0.fill(_bias0); int q = 0; for (; q < inch; q++) { __fp16* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); const __fp16* r3 = img0.row<const __fp16>(3); const __fp16* r4 = img0.row<const __fp16>(4); const __fp16* kptr = kernel.channel(p).row<const __fp16>(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0] \n" // sum0 sum1 sum2 sum3 "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v16.8h, v1.h[0] \n" "fmla v30.8h, v16.8h, v2.h[0] \n" "fmla v31.8h, v16.8h, v3.h[0] \n" "fmla v28.8h, v17.8h, v0.h[1] \n" "fmla v29.8h, v17.8h, v1.h[1] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v3.h[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v1.h[2] \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v31.8h, v18.8h, v3.h[2] \n" "fmla v28.8h, v19.8h, v0.h[3] \n" "fmla v29.8h, v19.8h, v1.h[3] \n" "fmla v30.8h, v19.8h, v2.h[3] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v1.h[4] \n" "fmla v30.8h, v20.8h, v2.h[4] \n" "fmla v31.8h, v20.8h, v3.h[4] \n" "fmla v28.8h, v21.8h, v0.h[5] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "fmla v30.8h, v21.8h, v2.h[5] \n" "fmla v31.8h, v21.8h, v3.h[5] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%1] \n" // r04 r05 r06 r07 "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v1.h[6] \n" "fmla v30.8h, v22.8h, v2.h[6] \n" "fmla v31.8h, v22.8h, v3.h[6] \n" "fmla v28.8h, v23.8h, v0.h[7] \n" "fmla v29.8h, v23.8h, v1.h[7] \n" "fmla v30.8h, v23.8h, v2.h[7] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v16.8h, v3.h[0] \n" "fmla v31.8h, v16.8h, v4.h[0] \n" "fmla v28.8h, v17.8h, v1.h[1] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" "fmla v30.8h, v17.8h, v3.h[1] \n" "fmla v31.8h, v17.8h, v4.h[1] \n" "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v18.8h, v3.h[2] \n" "fmla v31.8h, v18.8h, v4.h[2] \n" "fmla v28.8h, v19.8h, v1.h[3] \n" "fmla v29.8h, v19.8h, v2.h[3] \n" "fmla v30.8h, v19.8h, v3.h[3] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v20.8h, v3.h[4] \n" "fmla v31.8h, v20.8h, v4.h[4] \n" "fmla v28.8h, v21.8h, v1.h[5] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "fmla v30.8h, v21.8h, v3.h[5] \n" "fmla v31.8h, v21.8h, v4.h[5] \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v22.8h, v3.h[6] \n" "fmla v31.8h, v22.8h, v4.h[6] \n" "fmla v28.8h, v23.8h, v1.h[7] \n" "fmla v29.8h, v23.8h, v2.h[7] \n" "fmla v30.8h, v23.8h, v3.h[7] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v16.8h, v4.h[0] \n" "fmla v31.8h, v16.8h, v5.h[0] \n" "fmla v28.8h, v17.8h, v2.h[1] \n" "fmla v29.8h, v17.8h, v3.h[1] \n" "fmla v30.8h, v17.8h, v4.h[1] \n" "fmla v31.8h, v17.8h, v5.h[1] \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v18.8h, v4.h[2] \n" "fmla v31.8h, v18.8h, v5.h[2] \n" "fmla v28.8h, v19.8h, v2.h[3] \n" "fmla v29.8h, v19.8h, v3.h[3] \n" "fmla v30.8h, v19.8h, v4.h[3] \n" "fmla v31.8h, v19.8h, v5.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v20.8h, v4.h[4] \n" "fmla v31.8h, v20.8h, v5.h[4] \n" "fmla v28.8h, v21.8h, v2.h[5] \n" "fmla v29.8h, v21.8h, v3.h[5] \n" "fmla v30.8h, v21.8h, v4.h[5] \n" "fmla v31.8h, v21.8h, v5.h[5] \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v22.8h, v4.h[6] \n" "fmla v31.8h, v22.8h, v5.h[6] \n" "fmla v28.8h, v23.8h, v2.h[7] \n" "fmla v29.8h, v23.8h, v3.h[7] \n" "fmla v30.8h, v23.8h, v4.h[7] \n" "fmla v31.8h, v23.8h, v5.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v16.8h, v4.h[0] \n" "fmla v30.8h, v16.8h, v5.h[0] \n" "fmla v31.8h, v16.8h, v6.h[0] \n" "fmla v28.8h, v17.8h, v3.h[1] \n" "fmla v29.8h, v17.8h, v4.h[1] \n" "fmla v30.8h, v17.8h, v5.h[1] \n" "fmla v31.8h, v17.8h, v6.h[1] \n" "fmla v28.8h, v18.8h, v3.h[2] \n" "fmla v29.8h, v18.8h, v4.h[2] \n" "fmla v30.8h, v18.8h, v5.h[2] \n" "fmla v31.8h, v18.8h, v6.h[2] \n" "fmla v28.8h, v19.8h, v3.h[3] \n" "fmla v29.8h, v19.8h, v4.h[3] \n" "fmla v30.8h, v19.8h, v5.h[3] \n" "fmla v31.8h, v19.8h, v6.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v20.8h, v4.h[4] \n" "fmla v30.8h, v20.8h, v5.h[4] \n" "fmla v31.8h, v20.8h, v6.h[4] \n" "fmla v28.8h, v21.8h, v3.h[5] \n" "fmla v29.8h, v21.8h, v4.h[5] \n" "fmla v30.8h, v21.8h, v5.h[5] \n" "fmla v31.8h, v21.8h, v6.h[5] \n" "fmla v28.8h, v22.8h, v3.h[6] \n" "fmla v29.8h, v22.8h, v4.h[6] \n" "fmla v30.8h, v22.8h, v5.h[6] \n" "fmla v31.8h, v22.8h, v6.h[6] \n" "fmla v28.8h, v23.8h, v3.h[7] \n" "fmla v29.8h, v23.8h, v4.h[7] \n" "fmla v30.8h, v23.8h, v5.h[7] \n" "fmla v31.8h, v23.8h, v6.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v16.8h, v5.h[0] \n" "fmla v30.8h, v16.8h, v6.h[0] \n" "fmla v31.8h, v16.8h, v7.h[0] \n" "fmla v28.8h, v17.8h, v4.h[1] \n" "fmla v29.8h, v17.8h, v5.h[1] \n" "fmla v30.8h, v17.8h, v6.h[1] \n" "fmla v31.8h, v17.8h, v7.h[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v28.8h, v18.8h, v4.h[2] \n" "fmla v29.8h, v18.8h, v5.h[2] \n" "fmla v30.8h, v18.8h, v6.h[2] \n" "fmla v31.8h, v18.8h, v7.h[2] \n" "fmla v28.8h, v19.8h, v4.h[3] \n" "fmla v29.8h, v19.8h, v5.h[3] \n" "fmla v30.8h, v19.8h, v6.h[3] \n" "fmla v31.8h, v19.8h, v7.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v5.h[4] \n" "fmla v30.8h, v20.8h, v6.h[4] \n" "fmla v31.8h, v20.8h, v7.h[4] \n" "fmla v28.8h, v21.8h, v4.h[5] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v6.h[5] \n" "fmla v31.8h, v21.8h, v7.h[5] \n" "fmla v28.8h, v22.8h, v4.h[6] \n" "fmla v29.8h, v22.8h, v5.h[6] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v7.h[6] \n" "fmla v28.8h, v23.8h, v4.h[7] \n" "fmla v29.8h, v23.8h, v5.h[7] \n" "fmla v30.8h, v23.8h, v6.h[7] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v8.h[0] \n" "fmla v29.8h, v16.8h, v9.h[0] \n" "fmla v30.8h, v16.8h, v10.h[0] \n" "fmla v31.8h, v16.8h, v11.h[0] \n" "fmla v28.8h, v17.8h, v8.h[1] \n" "fmla v29.8h, v17.8h, v9.h[1] \n" "fmla v30.8h, v17.8h, v10.h[1] \n" "fmla v31.8h, v17.8h, v11.h[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%2] \n" // r14 r15 r16 r17 "fmla v28.8h, v18.8h, v8.h[2] \n" "fmla v29.8h, v18.8h, v9.h[2] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v18.8h, v11.h[2] \n" "fmla v28.8h, v19.8h, v8.h[3] \n" "fmla v29.8h, v19.8h, v9.h[3] \n" "fmla v30.8h, v19.8h, v10.h[3] \n" "fmla v31.8h, v19.8h, v11.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v8.h[4] \n" "fmla v29.8h, v20.8h, v9.h[4] \n" "fmla v30.8h, v20.8h, v10.h[4] \n" "fmla v31.8h, v20.8h, v11.h[4] \n" "fmla v28.8h, v21.8h, v8.h[5] \n" "fmla v29.8h, v21.8h, v9.h[5] \n" "fmla v30.8h, v21.8h, v10.h[5] \n" "fmla v31.8h, v21.8h, v11.h[5] \n" "fmla v28.8h, v22.8h, v8.h[6] \n" "fmla v29.8h, v22.8h, v9.h[6] \n" "fmla v30.8h, v22.8h, v10.h[6] \n" "fmla v31.8h, v22.8h, v11.h[6] \n" "fmla v28.8h, v23.8h, v8.h[7] \n" "fmla v29.8h, v23.8h, v9.h[7] \n" "fmla v30.8h, v23.8h, v10.h[7] \n" "fmla v31.8h, v23.8h, v11.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v9.h[0] \n" "fmla v29.8h, v16.8h, v10.h[0] \n" "fmla v30.8h, v16.8h, v11.h[0] \n" "fmla v31.8h, v16.8h, v12.h[0] \n" "fmla v28.8h, v17.8h, v9.h[1] \n" "fmla v29.8h, v17.8h, v10.h[1] \n" "fmla v30.8h, v17.8h, v11.h[1] \n" "fmla v31.8h, v17.8h, v12.h[1] \n" "fmla v28.8h, v18.8h, v9.h[2] \n" "fmla v29.8h, v18.8h, v10.h[2] \n" "fmla v30.8h, v18.8h, v11.h[2] \n" "fmla v31.8h, v18.8h, v12.h[2] \n" "fmla v28.8h, v19.8h, v9.h[3] \n" "fmla v29.8h, v19.8h, v10.h[3] \n" "fmla v30.8h, v19.8h, v11.h[3] \n" "fmla v31.8h, v19.8h, v12.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v9.h[4] \n" "fmla v29.8h, v20.8h, v10.h[4] \n" "fmla v30.8h, v20.8h, v11.h[4] \n" "fmla v31.8h, v20.8h, v12.h[4] \n" "fmla v28.8h, v21.8h, v9.h[5] \n" "fmla v29.8h, v21.8h, v10.h[5] \n" "fmla v30.8h, v21.8h, v11.h[5] \n" "fmla v31.8h, v21.8h, v12.h[5] \n" "fmla v28.8h, v22.8h, v9.h[6] \n" "fmla v29.8h, v22.8h, v10.h[6] \n" "fmla v30.8h, v22.8h, v11.h[6] \n" "fmla v31.8h, v22.8h, v12.h[6] \n" "fmla v28.8h, v23.8h, v9.h[7] \n" "fmla v29.8h, v23.8h, v10.h[7] \n" "fmla v30.8h, v23.8h, v11.h[7] \n" "fmla v31.8h, v23.8h, v12.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v10.h[0] \n" "fmla v29.8h, v16.8h, v11.h[0] \n" "fmla v30.8h, v16.8h, v12.h[0] \n" "fmla v31.8h, v16.8h, v13.h[0] \n" "fmla v28.8h, v17.8h, v10.h[1] \n" "fmla v29.8h, v17.8h, v11.h[1] \n" "fmla v30.8h, v17.8h, v12.h[1] \n" "fmla v31.8h, v17.8h, v13.h[1] \n" "fmla v28.8h, v18.8h, v10.h[2] \n" "fmla v29.8h, v18.8h, v11.h[2] \n" "fmla v30.8h, v18.8h, v12.h[2] \n" "fmla v31.8h, v18.8h, v13.h[2] \n" "fmla v28.8h, v19.8h, v10.h[3] \n" "fmla v29.8h, v19.8h, v11.h[3] \n" "fmla v30.8h, v19.8h, v12.h[3] \n" "fmla v31.8h, v19.8h, v13.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v10.h[4] \n" "fmla v29.8h, v20.8h, v11.h[4] \n" "fmla v30.8h, v20.8h, v12.h[4] \n" "fmla v31.8h, v20.8h, v13.h[4] \n" "fmla v28.8h, v21.8h, v10.h[5] \n" "fmla v29.8h, v21.8h, v11.h[5] \n" "fmla v30.8h, v21.8h, v12.h[5] \n" "fmla v31.8h, v21.8h, v13.h[5] \n" "fmla v28.8h, v22.8h, v10.h[6] \n" "fmla v29.8h, v22.8h, v11.h[6] \n" "fmla v30.8h, v22.8h, v12.h[6] \n" "fmla v31.8h, v22.8h, v13.h[6] \n" "fmla v28.8h, v23.8h, v10.h[7] \n" "fmla v29.8h, v23.8h, v11.h[7] \n" "fmla v30.8h, v23.8h, v12.h[7] \n" "fmla v31.8h, v23.8h, v13.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v11.h[0] \n" "fmla v29.8h, v16.8h, v12.h[0] \n" "fmla v30.8h, v16.8h, v13.h[0] \n" "fmla v31.8h, v16.8h, v14.h[0] \n" "fmla v28.8h, v17.8h, v11.h[1] \n" "fmla v29.8h, v17.8h, v12.h[1] \n" "fmla v30.8h, v17.8h, v13.h[1] \n" "fmla v31.8h, v17.8h, v14.h[1] \n" "fmla v28.8h, v18.8h, v11.h[2] \n" "fmla v29.8h, v18.8h, v12.h[2] \n" "fmla v30.8h, v18.8h, v13.h[2] \n" "fmla v31.8h, v18.8h, v14.h[2] \n" "fmla v28.8h, v19.8h, v11.h[3] \n" "fmla v29.8h, v19.8h, v12.h[3] \n" "fmla v30.8h, v19.8h, v13.h[3] \n" "fmla v31.8h, v19.8h, v14.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v11.h[4] \n" "fmla v29.8h, v20.8h, v12.h[4] \n" "fmla v30.8h, v20.8h, v13.h[4] \n" "fmla v31.8h, v20.8h, v14.h[4] \n" "fmla v28.8h, v21.8h, v11.h[5] \n" "fmla v29.8h, v21.8h, v12.h[5] \n" "fmla v30.8h, v21.8h, v13.h[5] \n" "fmla v31.8h, v21.8h, v14.h[5] \n" "fmla v28.8h, v22.8h, v11.h[6] \n" "fmla v29.8h, v22.8h, v12.h[6] \n" "fmla v30.8h, v22.8h, v13.h[6] \n" "fmla v31.8h, v22.8h, v14.h[6] \n" "fmla v28.8h, v23.8h, v11.h[7] \n" "fmla v29.8h, v23.8h, v12.h[7] \n" "fmla v30.8h, v23.8h, v13.h[7] \n" "fmla v31.8h, v23.8h, v14.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v12.h[0] \n" "fmla v29.8h, v16.8h, v13.h[0] \n" "fmla v30.8h, v16.8h, v14.h[0] \n" "fmla v31.8h, v16.8h, v15.h[0] \n" "fmla v28.8h, v17.8h, v12.h[1] \n" "fmla v29.8h, v17.8h, v13.h[1] \n" "fmla v30.8h, v17.8h, v14.h[1] \n" "fmla v31.8h, v17.8h, v15.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v28.8h, v18.8h, v12.h[2] \n" "fmla v29.8h, v18.8h, v13.h[2] \n" "fmla v30.8h, v18.8h, v14.h[2] \n" "fmla v31.8h, v18.8h, v15.h[2] \n" "fmla v28.8h, v19.8h, v12.h[3] \n" "fmla v29.8h, v19.8h, v13.h[3] \n" "fmla v30.8h, v19.8h, v14.h[3] \n" "fmla v31.8h, v19.8h, v15.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v12.h[4] \n" "fmla v29.8h, v20.8h, v13.h[4] \n" "fmla v30.8h, v20.8h, v14.h[4] \n" "fmla v31.8h, v20.8h, v15.h[4] \n" "fmla v28.8h, v21.8h, v12.h[5] \n" "fmla v29.8h, v21.8h, v13.h[5] \n" "fmla v30.8h, v21.8h, v14.h[5] \n" "fmla v31.8h, v21.8h, v15.h[5] \n" "fmla v28.8h, v22.8h, v12.h[6] \n" "fmla v29.8h, v22.8h, v13.h[6] \n" "fmla v30.8h, v22.8h, v14.h[6] \n" "fmla v31.8h, v22.8h, v15.h[6] \n" "fmla v28.8h, v23.8h, v12.h[7] \n" "fmla v29.8h, v23.8h, v13.h[7] \n" "fmla v30.8h, v23.8h, v14.h[7] \n" "fmla v31.8h, v23.8h, v15.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v16.8h, v1.h[0] \n" "fmla v30.8h, v16.8h, v2.h[0] \n" "fmla v31.8h, v16.8h, v3.h[0] \n" "fmla v28.8h, v17.8h, v0.h[1] \n" "fmla v29.8h, v17.8h, v1.h[1] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%3] \n" // r24 r25 r26 r27 "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v1.h[2] \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v31.8h, v18.8h, v3.h[2] \n" "fmla v28.8h, v19.8h, v0.h[3] \n" "fmla v29.8h, v19.8h, v1.h[3] \n" "fmla v30.8h, v19.8h, v2.h[3] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v1.h[4] \n" "fmla v30.8h, v20.8h, v2.h[4] \n" "fmla v31.8h, v20.8h, v3.h[4] \n" "fmla v28.8h, v21.8h, v0.h[5] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "fmla v30.8h, v21.8h, v2.h[5] \n" "fmla v31.8h, v21.8h, v3.h[5] \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v1.h[6] \n" "fmla v30.8h, v22.8h, v2.h[6] \n" "fmla v31.8h, v22.8h, v3.h[6] \n" "fmla v28.8h, v23.8h, v0.h[7] \n" "fmla v29.8h, v23.8h, v1.h[7] \n" "fmla v30.8h, v23.8h, v2.h[7] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v16.8h, v3.h[0] \n" "fmla v31.8h, v16.8h, v4.h[0] \n" "fmla v28.8h, v17.8h, v1.h[1] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" "fmla v30.8h, v17.8h, v3.h[1] \n" "fmla v31.8h, v17.8h, v4.h[1] \n" "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v18.8h, v3.h[2] \n" "fmla v31.8h, v18.8h, v4.h[2] \n" "fmla v28.8h, v19.8h, v1.h[3] \n" "fmla v29.8h, v19.8h, v2.h[3] \n" "fmla v30.8h, v19.8h, v3.h[3] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v20.8h, v3.h[4] \n" "fmla v31.8h, v20.8h, v4.h[4] \n" "fmla v28.8h, v21.8h, v1.h[5] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "fmla v30.8h, v21.8h, v3.h[5] \n" "fmla v31.8h, v21.8h, v4.h[5] \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v22.8h, v3.h[6] \n" "fmla v31.8h, v22.8h, v4.h[6] \n" "fmla v28.8h, v23.8h, v1.h[7] \n" "fmla v29.8h, v23.8h, v2.h[7] \n" "fmla v30.8h, v23.8h, v3.h[7] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v16.8h, v4.h[0] \n" "fmla v31.8h, v16.8h, v5.h[0] \n" "fmla v28.8h, v17.8h, v2.h[1] \n" "fmla v29.8h, v17.8h, v3.h[1] \n" "fmla v30.8h, v17.8h, v4.h[1] \n" "fmla v31.8h, v17.8h, v5.h[1] \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v18.8h, v4.h[2] \n" "fmla v31.8h, v18.8h, v5.h[2] \n" "fmla v28.8h, v19.8h, v2.h[3] \n" "fmla v29.8h, v19.8h, v3.h[3] \n" "fmla v30.8h, v19.8h, v4.h[3] \n" "fmla v31.8h, v19.8h, v5.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v20.8h, v4.h[4] \n" "fmla v31.8h, v20.8h, v5.h[4] \n" "fmla v28.8h, v21.8h, v2.h[5] \n" "fmla v29.8h, v21.8h, v3.h[5] \n" "fmla v30.8h, v21.8h, v4.h[5] \n" "fmla v31.8h, v21.8h, v5.h[5] \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v22.8h, v4.h[6] \n" "fmla v31.8h, v22.8h, v5.h[6] \n" "fmla v28.8h, v23.8h, v2.h[7] \n" "fmla v29.8h, v23.8h, v3.h[7] \n" "fmla v30.8h, v23.8h, v4.h[7] \n" "fmla v31.8h, v23.8h, v5.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v16.8h, v4.h[0] \n" "fmla v30.8h, v16.8h, v5.h[0] \n" "fmla v31.8h, v16.8h, v6.h[0] \n" "fmla v28.8h, v17.8h, v3.h[1] \n" "fmla v29.8h, v17.8h, v4.h[1] \n" "fmla v30.8h, v17.8h, v5.h[1] \n" "fmla v31.8h, v17.8h, v6.h[1] \n" "fmla v28.8h, v18.8h, v3.h[2] \n" "fmla v29.8h, v18.8h, v4.h[2] \n" "fmla v30.8h, v18.8h, v5.h[2] \n" "fmla v31.8h, v18.8h, v6.h[2] \n" "fmla v28.8h, v19.8h, v3.h[3] \n" "fmla v29.8h, v19.8h, v4.h[3] \n" "fmla v30.8h, v19.8h, v5.h[3] \n" "fmla v31.8h, v19.8h, v6.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v20.8h, v4.h[4] \n" "fmla v30.8h, v20.8h, v5.h[4] \n" "fmla v31.8h, v20.8h, v6.h[4] \n" "fmla v28.8h, v21.8h, v3.h[5] \n" "fmla v29.8h, v21.8h, v4.h[5] \n" "fmla v30.8h, v21.8h, v5.h[5] \n" "fmla v31.8h, v21.8h, v6.h[5] \n" "fmla v28.8h, v22.8h, v3.h[6] \n" "fmla v29.8h, v22.8h, v4.h[6] \n" "fmla v30.8h, v22.8h, v5.h[6] \n" "fmla v31.8h, v22.8h, v6.h[6] \n" "fmla v28.8h, v23.8h, v3.h[7] \n" "fmla v29.8h, v23.8h, v4.h[7] \n" "fmla v30.8h, v23.8h, v5.h[7] \n" "fmla v31.8h, v23.8h, v6.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v16.8h, v5.h[0] \n" "fmla v30.8h, v16.8h, v6.h[0] \n" "fmla v31.8h, v16.8h, v7.h[0] \n" "fmla v28.8h, v17.8h, v4.h[1] \n" "fmla v29.8h, v17.8h, v5.h[1] \n" "fmla v30.8h, v17.8h, v6.h[1] \n" "fmla v31.8h, v17.8h, v7.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%4], #64 \n" // r30 r31 r32 r33 "fmla v28.8h, v18.8h, v4.h[2] \n" "fmla v29.8h, v18.8h, v5.h[2] \n" "fmla v30.8h, v18.8h, v6.h[2] \n" "fmla v31.8h, v18.8h, v7.h[2] \n" "fmla v28.8h, v19.8h, v4.h[3] \n" "fmla v29.8h, v19.8h, v5.h[3] \n" "fmla v30.8h, v19.8h, v6.h[3] \n" "fmla v31.8h, v19.8h, v7.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v5.h[4] \n" "fmla v30.8h, v20.8h, v6.h[4] \n" "fmla v31.8h, v20.8h, v7.h[4] \n" "fmla v28.8h, v21.8h, v4.h[5] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v6.h[5] \n" "fmla v31.8h, v21.8h, v7.h[5] \n" "fmla v28.8h, v22.8h, v4.h[6] \n" "fmla v29.8h, v22.8h, v5.h[6] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v7.h[6] \n" "fmla v28.8h, v23.8h, v4.h[7] \n" "fmla v29.8h, v23.8h, v5.h[7] \n" "fmla v30.8h, v23.8h, v6.h[7] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v8.h[0] \n" "fmla v29.8h, v16.8h, v9.h[0] \n" "fmla v30.8h, v16.8h, v10.h[0] \n" "fmla v31.8h, v16.8h, v11.h[0] \n" "fmla v28.8h, v17.8h, v8.h[1] \n" "fmla v29.8h, v17.8h, v9.h[1] \n" "fmla v30.8h, v17.8h, v10.h[1] \n" "fmla v31.8h, v17.8h, v11.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%4] \n" // r34 r35 r36 r37 "fmla v28.8h, v18.8h, v8.h[2] \n" "fmla v29.8h, v18.8h, v9.h[2] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v18.8h, v11.h[2] \n" "fmla v28.8h, v19.8h, v8.h[3] \n" "fmla v29.8h, v19.8h, v9.h[3] \n" "fmla v30.8h, v19.8h, v10.h[3] \n" "fmla v31.8h, v19.8h, v11.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v8.h[4] \n" "fmla v29.8h, v20.8h, v9.h[4] \n" "fmla v30.8h, v20.8h, v10.h[4] \n" "fmla v31.8h, v20.8h, v11.h[4] \n" "fmla v28.8h, v21.8h, v8.h[5] \n" "fmla v29.8h, v21.8h, v9.h[5] \n" "fmla v30.8h, v21.8h, v10.h[5] \n" "fmla v31.8h, v21.8h, v11.h[5] \n" "fmla v28.8h, v22.8h, v8.h[6] \n" "fmla v29.8h, v22.8h, v9.h[6] \n" "fmla v30.8h, v22.8h, v10.h[6] \n" "fmla v31.8h, v22.8h, v11.h[6] \n" "fmla v28.8h, v23.8h, v8.h[7] \n" "fmla v29.8h, v23.8h, v9.h[7] \n" "fmla v30.8h, v23.8h, v10.h[7] \n" "fmla v31.8h, v23.8h, v11.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v9.h[0] \n" "fmla v29.8h, v16.8h, v10.h[0] \n" "fmla v30.8h, v16.8h, v11.h[0] \n" "fmla v31.8h, v16.8h, v12.h[0] \n" "fmla v28.8h, v17.8h, v9.h[1] \n" "fmla v29.8h, v17.8h, v10.h[1] \n" "fmla v30.8h, v17.8h, v11.h[1] \n" "fmla v31.8h, v17.8h, v12.h[1] \n" "fmla v28.8h, v18.8h, v9.h[2] \n" "fmla v29.8h, v18.8h, v10.h[2] \n" "fmla v30.8h, v18.8h, v11.h[2] \n" "fmla v31.8h, v18.8h, v12.h[2] \n" "fmla v28.8h, v19.8h, v9.h[3] \n" "fmla v29.8h, v19.8h, v10.h[3] \n" "fmla v30.8h, v19.8h, v11.h[3] \n" "fmla v31.8h, v19.8h, v12.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v9.h[4] \n" "fmla v29.8h, v20.8h, v10.h[4] \n" "fmla v30.8h, v20.8h, v11.h[4] \n" "fmla v31.8h, v20.8h, v12.h[4] \n" "fmla v28.8h, v21.8h, v9.h[5] \n" "fmla v29.8h, v21.8h, v10.h[5] \n" "fmla v30.8h, v21.8h, v11.h[5] \n" "fmla v31.8h, v21.8h, v12.h[5] \n" "fmla v28.8h, v22.8h, v9.h[6] \n" "fmla v29.8h, v22.8h, v10.h[6] \n" "fmla v30.8h, v22.8h, v11.h[6] \n" "fmla v31.8h, v22.8h, v12.h[6] \n" "fmla v28.8h, v23.8h, v9.h[7] \n" "fmla v29.8h, v23.8h, v10.h[7] \n" "fmla v30.8h, v23.8h, v11.h[7] \n" "fmla v31.8h, v23.8h, v12.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v10.h[0] \n" "fmla v29.8h, v16.8h, v11.h[0] \n" "fmla v30.8h, v16.8h, v12.h[0] \n" "fmla v31.8h, v16.8h, v13.h[0] \n" "fmla v28.8h, v17.8h, v10.h[1] \n" "fmla v29.8h, v17.8h, v11.h[1] \n" "fmla v30.8h, v17.8h, v12.h[1] \n" "fmla v31.8h, v17.8h, v13.h[1] \n" "fmla v28.8h, v18.8h, v10.h[2] \n" "fmla v29.8h, v18.8h, v11.h[2] \n" "fmla v30.8h, v18.8h, v12.h[2] \n" "fmla v31.8h, v18.8h, v13.h[2] \n" "fmla v28.8h, v19.8h, v10.h[3] \n" "fmla v29.8h, v19.8h, v11.h[3] \n" "fmla v30.8h, v19.8h, v12.h[3] \n" "fmla v31.8h, v19.8h, v13.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v10.h[4] \n" "fmla v29.8h, v20.8h, v11.h[4] \n" "fmla v30.8h, v20.8h, v12.h[4] \n" "fmla v31.8h, v20.8h, v13.h[4] \n" "fmla v28.8h, v21.8h, v10.h[5] \n" "fmla v29.8h, v21.8h, v11.h[5] \n" "fmla v30.8h, v21.8h, v12.h[5] \n" "fmla v31.8h, v21.8h, v13.h[5] \n" "fmla v28.8h, v22.8h, v10.h[6] \n" "fmla v29.8h, v22.8h, v11.h[6] \n" "fmla v30.8h, v22.8h, v12.h[6] \n" "fmla v31.8h, v22.8h, v13.h[6] \n" "fmla v28.8h, v23.8h, v10.h[7] \n" "fmla v29.8h, v23.8h, v11.h[7] \n" "fmla v30.8h, v23.8h, v12.h[7] \n" "fmla v31.8h, v23.8h, v13.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v11.h[0] \n" "fmla v29.8h, v16.8h, v12.h[0] \n" "fmla v30.8h, v16.8h, v13.h[0] \n" "fmla v31.8h, v16.8h, v14.h[0] \n" "fmla v28.8h, v17.8h, v11.h[1] \n" "fmla v29.8h, v17.8h, v12.h[1] \n" "fmla v30.8h, v17.8h, v13.h[1] \n" "fmla v31.8h, v17.8h, v14.h[1] \n" "fmla v28.8h, v18.8h, v11.h[2] \n" "fmla v29.8h, v18.8h, v12.h[2] \n" "fmla v30.8h, v18.8h, v13.h[2] \n" "fmla v31.8h, v18.8h, v14.h[2] \n" "fmla v28.8h, v19.8h, v11.h[3] \n" "fmla v29.8h, v19.8h, v12.h[3] \n" "fmla v30.8h, v19.8h, v13.h[3] \n" "fmla v31.8h, v19.8h, v14.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v11.h[4] \n" "fmla v29.8h, v20.8h, v12.h[4] \n" "fmla v30.8h, v20.8h, v13.h[4] \n" "fmla v31.8h, v20.8h, v14.h[4] \n" "fmla v28.8h, v21.8h, v11.h[5] \n" "fmla v29.8h, v21.8h, v12.h[5] \n" "fmla v30.8h, v21.8h, v13.h[5] \n" "fmla v31.8h, v21.8h, v14.h[5] \n" "fmla v28.8h, v22.8h, v11.h[6] \n" "fmla v29.8h, v22.8h, v12.h[6] \n" "fmla v30.8h, v22.8h, v13.h[6] \n" "fmla v31.8h, v22.8h, v14.h[6] \n" "fmla v28.8h, v23.8h, v11.h[7] \n" "fmla v29.8h, v23.8h, v12.h[7] \n" "fmla v30.8h, v23.8h, v13.h[7] \n" "fmla v31.8h, v23.8h, v14.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v12.h[0] \n" "fmla v29.8h, v16.8h, v13.h[0] \n" "fmla v30.8h, v16.8h, v14.h[0] \n" "fmla v31.8h, v16.8h, v15.h[0] \n" "fmla v28.8h, v17.8h, v12.h[1] \n" "fmla v29.8h, v17.8h, v13.h[1] \n" "fmla v30.8h, v17.8h, v14.h[1] \n" "fmla v31.8h, v17.8h, v15.h[1] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%5], #64 \n" // r40 r41 r42 r43 "fmla v28.8h, v18.8h, v12.h[2] \n" "fmla v29.8h, v18.8h, v13.h[2] \n" "fmla v30.8h, v18.8h, v14.h[2] \n" "fmla v31.8h, v18.8h, v15.h[2] \n" "fmla v28.8h, v19.8h, v12.h[3] \n" "fmla v29.8h, v19.8h, v13.h[3] \n" "fmla v30.8h, v19.8h, v14.h[3] \n" "fmla v31.8h, v19.8h, v15.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v12.h[4] \n" "fmla v29.8h, v20.8h, v13.h[4] \n" "fmla v30.8h, v20.8h, v14.h[4] \n" "fmla v31.8h, v20.8h, v15.h[4] \n" "fmla v28.8h, v21.8h, v12.h[5] \n" "fmla v29.8h, v21.8h, v13.h[5] \n" "fmla v30.8h, v21.8h, v14.h[5] \n" "fmla v31.8h, v21.8h, v15.h[5] \n" "fmla v28.8h, v22.8h, v12.h[6] \n" "fmla v29.8h, v22.8h, v13.h[6] \n" "fmla v30.8h, v22.8h, v14.h[6] \n" "fmla v31.8h, v22.8h, v15.h[6] \n" "fmla v28.8h, v23.8h, v12.h[7] \n" "fmla v29.8h, v23.8h, v13.h[7] \n" "fmla v30.8h, v23.8h, v14.h[7] \n" "fmla v31.8h, v23.8h, v15.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v16.8h, v1.h[0] \n" "fmla v30.8h, v16.8h, v2.h[0] \n" "fmla v31.8h, v16.8h, v3.h[0] \n" "fmla v28.8h, v17.8h, v0.h[1] \n" "fmla v29.8h, v17.8h, v1.h[1] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v3.h[1] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%5] \n" // r44 r45 r46 r47 "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v1.h[2] \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v31.8h, v18.8h, v3.h[2] \n" "fmla v28.8h, v19.8h, v0.h[3] \n" "fmla v29.8h, v19.8h, v1.h[3] \n" "fmla v30.8h, v19.8h, v2.h[3] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v1.h[4] \n" "fmla v30.8h, v20.8h, v2.h[4] \n" "fmla v31.8h, v20.8h, v3.h[4] \n" "fmla v28.8h, v21.8h, v0.h[5] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "fmla v30.8h, v21.8h, v2.h[5] \n" "fmla v31.8h, v21.8h, v3.h[5] \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v1.h[6] \n" "fmla v30.8h, v22.8h, v2.h[6] \n" "fmla v31.8h, v22.8h, v3.h[6] \n" "fmla v28.8h, v23.8h, v0.h[7] \n" "fmla v29.8h, v23.8h, v1.h[7] \n" "fmla v30.8h, v23.8h, v2.h[7] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v16.8h, v3.h[0] \n" "fmla v31.8h, v16.8h, v4.h[0] \n" "fmla v28.8h, v17.8h, v1.h[1] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" "fmla v30.8h, v17.8h, v3.h[1] \n" "fmla v31.8h, v17.8h, v4.h[1] \n" "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v18.8h, v3.h[2] \n" "fmla v31.8h, v18.8h, v4.h[2] \n" "fmla v28.8h, v19.8h, v1.h[3] \n" "fmla v29.8h, v19.8h, v2.h[3] \n" "fmla v30.8h, v19.8h, v3.h[3] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v20.8h, v3.h[4] \n" "fmla v31.8h, v20.8h, v4.h[4] \n" "fmla v28.8h, v21.8h, v1.h[5] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "fmla v30.8h, v21.8h, v3.h[5] \n" "fmla v31.8h, v21.8h, v4.h[5] \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v22.8h, v3.h[6] \n" "fmla v31.8h, v22.8h, v4.h[6] \n" "fmla v28.8h, v23.8h, v1.h[7] \n" "fmla v29.8h, v23.8h, v2.h[7] \n" "fmla v30.8h, v23.8h, v3.h[7] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v16.8h, v4.h[0] \n" "fmla v31.8h, v16.8h, v5.h[0] \n" "fmla v28.8h, v17.8h, v2.h[1] \n" "fmla v29.8h, v17.8h, v3.h[1] \n" "fmla v30.8h, v17.8h, v4.h[1] \n" "fmla v31.8h, v17.8h, v5.h[1] \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v18.8h, v4.h[2] \n" "fmla v31.8h, v18.8h, v5.h[2] \n" "fmla v28.8h, v19.8h, v2.h[3] \n" "fmla v29.8h, v19.8h, v3.h[3] \n" "fmla v30.8h, v19.8h, v4.h[3] \n" "fmla v31.8h, v19.8h, v5.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v20.8h, v4.h[4] \n" "fmla v31.8h, v20.8h, v5.h[4] \n" "fmla v28.8h, v21.8h, v2.h[5] \n" "fmla v29.8h, v21.8h, v3.h[5] \n" "fmla v30.8h, v21.8h, v4.h[5] \n" "fmla v31.8h, v21.8h, v5.h[5] \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v22.8h, v4.h[6] \n" "fmla v31.8h, v22.8h, v5.h[6] \n" "fmla v28.8h, v23.8h, v2.h[7] \n" "fmla v29.8h, v23.8h, v3.h[7] \n" "fmla v30.8h, v23.8h, v4.h[7] \n" "fmla v31.8h, v23.8h, v5.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v16.8h, v4.h[0] \n" "fmla v30.8h, v16.8h, v5.h[0] \n" "fmla v31.8h, v16.8h, v6.h[0] \n" "fmla v28.8h, v17.8h, v3.h[1] \n" "fmla v29.8h, v17.8h, v4.h[1] \n" "fmla v30.8h, v17.8h, v5.h[1] \n" "fmla v31.8h, v17.8h, v6.h[1] \n" "fmla v28.8h, v18.8h, v3.h[2] \n" "fmla v29.8h, v18.8h, v4.h[2] \n" "fmla v30.8h, v18.8h, v5.h[2] \n" "fmla v31.8h, v18.8h, v6.h[2] \n" "fmla v28.8h, v19.8h, v3.h[3] \n" "fmla v29.8h, v19.8h, v4.h[3] \n" "fmla v30.8h, v19.8h, v5.h[3] \n" "fmla v31.8h, v19.8h, v6.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v20.8h, v4.h[4] \n" "fmla v30.8h, v20.8h, v5.h[4] \n" "fmla v31.8h, v20.8h, v6.h[4] \n" "fmla v28.8h, v21.8h, v3.h[5] \n" "fmla v29.8h, v21.8h, v4.h[5] \n" "fmla v30.8h, v21.8h, v5.h[5] \n" "fmla v31.8h, v21.8h, v6.h[5] \n" "fmla v28.8h, v22.8h, v3.h[6] \n" "fmla v29.8h, v22.8h, v4.h[6] \n" "fmla v30.8h, v22.8h, v5.h[6] \n" "fmla v31.8h, v22.8h, v6.h[6] \n" "fmla v28.8h, v23.8h, v3.h[7] \n" "fmla v29.8h, v23.8h, v4.h[7] \n" "fmla v30.8h, v23.8h, v5.h[7] \n" "fmla v31.8h, v23.8h, v6.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6] \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v16.8h, v5.h[0] \n" "fmla v30.8h, v16.8h, v6.h[0] \n" "fmla v31.8h, v16.8h, v7.h[0] \n" "fmla v28.8h, v17.8h, v4.h[1] \n" "fmla v29.8h, v17.8h, v5.h[1] \n" "fmla v30.8h, v17.8h, v6.h[1] \n" "fmla v31.8h, v17.8h, v7.h[1] \n" "fmla v28.8h, v18.8h, v4.h[2] \n" "fmla v29.8h, v18.8h, v5.h[2] \n" "fmla v30.8h, v18.8h, v6.h[2] \n" "fmla v31.8h, v18.8h, v7.h[2] \n" "fmla v28.8h, v19.8h, v4.h[3] \n" "fmla v29.8h, v19.8h, v5.h[3] \n" "fmla v30.8h, v19.8h, v6.h[3] \n" "fmla v31.8h, v19.8h, v7.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v5.h[4] \n" "fmla v30.8h, v20.8h, v6.h[4] \n" "fmla v31.8h, v20.8h, v7.h[4] \n" "fmla v28.8h, v21.8h, v4.h[5] \n" "fmla v29.8h, v21.8h, v5.h[5] \n" "fmla v30.8h, v21.8h, v6.h[5] \n" "fmla v31.8h, v21.8h, v7.h[5] \n" "fmla v28.8h, v22.8h, v4.h[6] \n" "fmla v29.8h, v22.8h, v5.h[6] \n" "fmla v30.8h, v22.8h, v6.h[6] \n" "fmla v31.8h, v22.8h, v7.h[6] \n" "fmla v28.8h, v23.8h, v4.h[7] \n" "fmla v29.8h, v23.8h, v5.h[7] \n" "fmla v30.8h, v23.8h, v6.h[7] \n" "fmla v31.8h, v23.8h, v7.h[7] \n" "sub %6, %6, #3136 \n" // kptr -= 24.5 * 64; "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%0], #64 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.8h, v1.8h}, [%1], #32 \n" // r00 r01 "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "prfm pldl1keep, [%0, #256] \n" "ld1 {v30.8h, v31.8h}, [%0] \n" // sum0 sum1 "fmul v28.8h, v16.8h, v0.h[0] \n" "fmul v29.8h, v16.8h, v1.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "fmla v31.8h, v17.8h, v1.h[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v1.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v1.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v31.8h, v21.8h, v1.h[5] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [%1] \n" // r02 r03 r04 r05 "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v1.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v17.8h, v1.h[1] \n" "fmla v31.8h, v17.8h, v2.h[1] \n" "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v19.8h, v1.h[3] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v21.8h, v1.h[5] \n" "fmla v31.8h, v21.8h, v2.h[5] \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v23.8h, v1.h[7] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v3.h[1] \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v19.8h, v2.h[3] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v21.8h, v2.h[5] \n" "fmla v31.8h, v21.8h, v3.h[5] \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v23.8h, v2.h[7] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v16.8h, v4.h[0] \n" "fmla v30.8h, v17.8h, v3.h[1] \n" "fmla v31.8h, v17.8h, v4.h[1] \n" "fmla v28.8h, v18.8h, v3.h[2] \n" "fmla v29.8h, v18.8h, v4.h[2] \n" "fmla v30.8h, v19.8h, v3.h[3] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v20.8h, v4.h[4] \n" "fmla v30.8h, v21.8h, v3.h[5] \n" "fmla v31.8h, v21.8h, v4.h[5] \n" "fmla v28.8h, v22.8h, v3.h[6] \n" "fmla v29.8h, v22.8h, v4.h[6] \n" "fmla v30.8h, v23.8h, v3.h[7] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v16.8h, v5.h[0] \n" "fmla v30.8h, v17.8h, v4.h[1] \n" "fmla v31.8h, v17.8h, v5.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.8h, v9.8h}, [%2], #32 \n" // r10 r11 "fmla v28.8h, v18.8h, v4.h[2] \n" "fmla v29.8h, v18.8h, v5.h[2] \n" "fmla v30.8h, v19.8h, v4.h[3] \n" "fmla v31.8h, v19.8h, v5.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v5.h[4] \n" "fmla v30.8h, v21.8h, v4.h[5] \n" "fmla v31.8h, v21.8h, v5.h[5] \n" "fmla v28.8h, v22.8h, v4.h[6] \n" "fmla v29.8h, v22.8h, v5.h[6] \n" "fmla v30.8h, v23.8h, v4.h[7] \n" "fmla v31.8h, v23.8h, v5.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v30.8h, v16.8h, v8.h[0] \n" "fmla v31.8h, v16.8h, v9.h[0] \n" "fmla v30.8h, v17.8h, v8.h[1] \n" "fmla v31.8h, v17.8h, v9.h[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v10.8h, v11.8h, v12.8h, v13.8h}, [%2] \n" // r12 r13 r14 r15 "fmla v30.8h, v18.8h, v8.h[2] \n" "fmla v31.8h, v18.8h, v9.h[2] \n" "fmla v30.8h, v19.8h, v8.h[3] \n" "fmla v31.8h, v19.8h, v9.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v30.8h, v20.8h, v8.h[4] \n" "fmla v31.8h, v20.8h, v9.h[4] \n" "fmla v30.8h, v21.8h, v8.h[5] \n" "fmla v31.8h, v21.8h, v9.h[5] \n" "fmla v30.8h, v22.8h, v8.h[6] \n" "fmla v31.8h, v22.8h, v9.h[6] \n" "fmla v30.8h, v23.8h, v8.h[7] \n" "fmla v31.8h, v23.8h, v9.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v30.8h, v16.8h, v9.h[0] \n" "fmla v31.8h, v16.8h, v10.h[0] \n" "fmla v30.8h, v17.8h, v9.h[1] \n" "fmla v31.8h, v17.8h, v10.h[1] \n" "fmla v30.8h, v18.8h, v9.h[2] \n" "fmla v31.8h, v18.8h, v10.h[2] \n" "fmla v30.8h, v19.8h, v9.h[3] \n" "fmla v31.8h, v19.8h, v10.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v30.8h, v20.8h, v9.h[4] \n" "fmla v31.8h, v20.8h, v10.h[4] \n" "fmla v30.8h, v21.8h, v9.h[5] \n" "fmla v31.8h, v21.8h, v10.h[5] \n" "fmla v30.8h, v22.8h, v9.h[6] \n" "fmla v31.8h, v22.8h, v10.h[6] \n" "fmla v30.8h, v23.8h, v9.h[7] \n" "fmla v31.8h, v23.8h, v10.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v30.8h, v16.8h, v10.h[0] \n" "fmla v31.8h, v16.8h, v11.h[0] \n" "fmla v30.8h, v17.8h, v10.h[1] \n" "fmla v31.8h, v17.8h, v11.h[1] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v18.8h, v11.h[2] \n" "fmla v30.8h, v19.8h, v10.h[3] \n" "fmla v31.8h, v19.8h, v11.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v30.8h, v20.8h, v10.h[4] \n" "fmla v31.8h, v20.8h, v11.h[4] \n" "fmla v30.8h, v21.8h, v10.h[5] \n" "fmla v31.8h, v21.8h, v11.h[5] \n" "fmla v30.8h, v22.8h, v10.h[6] \n" "fmla v31.8h, v22.8h, v11.h[6] \n" "fmla v30.8h, v23.8h, v10.h[7] \n" "fmla v31.8h, v23.8h, v11.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v30.8h, v16.8h, v11.h[0] \n" "fmla v31.8h, v16.8h, v12.h[0] \n" "fmla v30.8h, v17.8h, v11.h[1] \n" "fmla v31.8h, v17.8h, v12.h[1] \n" "fmla v30.8h, v18.8h, v11.h[2] \n" "fmla v31.8h, v18.8h, v12.h[2] \n" "fmla v30.8h, v19.8h, v11.h[3] \n" "fmla v31.8h, v19.8h, v12.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v30.8h, v20.8h, v11.h[4] \n" "fmla v31.8h, v20.8h, v12.h[4] \n" "fmla v30.8h, v21.8h, v11.h[5] \n" "fmla v31.8h, v21.8h, v12.h[5] \n" "fmla v30.8h, v22.8h, v11.h[6] \n" "fmla v31.8h, v22.8h, v12.h[6] \n" "fmla v30.8h, v23.8h, v11.h[7] \n" "fmla v31.8h, v23.8h, v12.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v30.8h, v16.8h, v12.h[0] \n" "fmla v31.8h, v16.8h, v13.h[0] \n" "fmla v30.8h, v17.8h, v12.h[1] \n" "fmla v31.8h, v17.8h, v13.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r20 r21 "fmla v30.8h, v18.8h, v12.h[2] \n" "fmla v31.8h, v18.8h, v13.h[2] \n" "fmla v30.8h, v19.8h, v12.h[3] \n" "fmla v31.8h, v19.8h, v13.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v30.8h, v20.8h, v12.h[4] \n" "fmla v31.8h, v20.8h, v13.h[4] \n" "fmla v30.8h, v21.8h, v12.h[5] \n" "fmla v31.8h, v21.8h, v13.h[5] \n" "fmla v30.8h, v22.8h, v12.h[6] \n" "fmla v31.8h, v22.8h, v13.h[6] \n" "fmla v30.8h, v23.8h, v12.h[7] \n" "fmla v31.8h, v23.8h, v13.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v16.8h, v1.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "fmla v31.8h, v17.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [%3] \n" // r22 r23 r24 r25 "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v1.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v1.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v31.8h, v21.8h, v1.h[5] \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v1.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v17.8h, v1.h[1] \n" "fmla v31.8h, v17.8h, v2.h[1] \n" "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v19.8h, v1.h[3] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v21.8h, v1.h[5] \n" "fmla v31.8h, v21.8h, v2.h[5] \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v23.8h, v1.h[7] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v3.h[1] \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v19.8h, v2.h[3] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v21.8h, v2.h[5] \n" "fmla v31.8h, v21.8h, v3.h[5] \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v23.8h, v2.h[7] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v16.8h, v4.h[0] \n" "fmla v30.8h, v17.8h, v3.h[1] \n" "fmla v31.8h, v17.8h, v4.h[1] \n" "fmla v28.8h, v18.8h, v3.h[2] \n" "fmla v29.8h, v18.8h, v4.h[2] \n" "fmla v30.8h, v19.8h, v3.h[3] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v20.8h, v4.h[4] \n" "fmla v30.8h, v21.8h, v3.h[5] \n" "fmla v31.8h, v21.8h, v4.h[5] \n" "fmla v28.8h, v22.8h, v3.h[6] \n" "fmla v29.8h, v22.8h, v4.h[6] \n" "fmla v30.8h, v23.8h, v3.h[7] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v16.8h, v5.h[0] \n" "fmla v30.8h, v17.8h, v4.h[1] \n" "fmla v31.8h, v17.8h, v5.h[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.8h, v9.8h}, [%4], #32 \n" // r30 r31 "fmla v28.8h, v18.8h, v4.h[2] \n" "fmla v29.8h, v18.8h, v5.h[2] \n" "fmla v30.8h, v19.8h, v4.h[3] \n" "fmla v31.8h, v19.8h, v5.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v5.h[4] \n" "fmla v30.8h, v21.8h, v4.h[5] \n" "fmla v31.8h, v21.8h, v5.h[5] \n" "fmla v28.8h, v22.8h, v4.h[6] \n" "fmla v29.8h, v22.8h, v5.h[6] \n" "fmla v30.8h, v23.8h, v4.h[7] \n" "fmla v31.8h, v23.8h, v5.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v30.8h, v16.8h, v8.h[0] \n" "fmla v31.8h, v16.8h, v9.h[0] \n" "fmla v30.8h, v17.8h, v8.h[1] \n" "fmla v31.8h, v17.8h, v9.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v10.8h, v11.8h, v12.8h, v13.8h}, [%4] \n" // r32 r33 r34 r35 "fmla v30.8h, v18.8h, v8.h[2] \n" "fmla v31.8h, v18.8h, v9.h[2] \n" "fmla v30.8h, v19.8h, v8.h[3] \n" "fmla v31.8h, v19.8h, v9.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v30.8h, v20.8h, v8.h[4] \n" "fmla v31.8h, v20.8h, v9.h[4] \n" "fmla v30.8h, v21.8h, v8.h[5] \n" "fmla v31.8h, v21.8h, v9.h[5] \n" "fmla v30.8h, v22.8h, v8.h[6] \n" "fmla v31.8h, v22.8h, v9.h[6] \n" "fmla v30.8h, v23.8h, v8.h[7] \n" "fmla v31.8h, v23.8h, v9.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v30.8h, v16.8h, v9.h[0] \n" "fmla v31.8h, v16.8h, v10.h[0] \n" "fmla v30.8h, v17.8h, v9.h[1] \n" "fmla v31.8h, v17.8h, v10.h[1] \n" "fmla v30.8h, v18.8h, v9.h[2] \n" "fmla v31.8h, v18.8h, v10.h[2] \n" "fmla v30.8h, v19.8h, v9.h[3] \n" "fmla v31.8h, v19.8h, v10.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v30.8h, v20.8h, v9.h[4] \n" "fmla v31.8h, v20.8h, v10.h[4] \n" "fmla v30.8h, v21.8h, v9.h[5] \n" "fmla v31.8h, v21.8h, v10.h[5] \n" "fmla v30.8h, v22.8h, v9.h[6] \n" "fmla v31.8h, v22.8h, v10.h[6] \n" "fmla v30.8h, v23.8h, v9.h[7] \n" "fmla v31.8h, v23.8h, v10.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v30.8h, v16.8h, v10.h[0] \n" "fmla v31.8h, v16.8h, v11.h[0] \n" "fmla v30.8h, v17.8h, v10.h[1] \n" "fmla v31.8h, v17.8h, v11.h[1] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v18.8h, v11.h[2] \n" "fmla v30.8h, v19.8h, v10.h[3] \n" "fmla v31.8h, v19.8h, v11.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v30.8h, v20.8h, v10.h[4] \n" "fmla v31.8h, v20.8h, v11.h[4] \n" "fmla v30.8h, v21.8h, v10.h[5] \n" "fmla v31.8h, v21.8h, v11.h[5] \n" "fmla v30.8h, v22.8h, v10.h[6] \n" "fmla v31.8h, v22.8h, v11.h[6] \n" "fmla v30.8h, v23.8h, v10.h[7] \n" "fmla v31.8h, v23.8h, v11.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v30.8h, v16.8h, v11.h[0] \n" "fmla v31.8h, v16.8h, v12.h[0] \n" "fmla v30.8h, v17.8h, v11.h[1] \n" "fmla v31.8h, v17.8h, v12.h[1] \n" "fmla v30.8h, v18.8h, v11.h[2] \n" "fmla v31.8h, v18.8h, v12.h[2] \n" "fmla v30.8h, v19.8h, v11.h[3] \n" "fmla v31.8h, v19.8h, v12.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v30.8h, v20.8h, v11.h[4] \n" "fmla v31.8h, v20.8h, v12.h[4] \n" "fmla v30.8h, v21.8h, v11.h[5] \n" "fmla v31.8h, v21.8h, v12.h[5] \n" "fmla v30.8h, v22.8h, v11.h[6] \n" "fmla v31.8h, v22.8h, v12.h[6] \n" "fmla v30.8h, v23.8h, v11.h[7] \n" "fmla v31.8h, v23.8h, v12.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v30.8h, v16.8h, v12.h[0] \n" "fmla v31.8h, v16.8h, v13.h[0] \n" "fmla v30.8h, v17.8h, v12.h[1] \n" "fmla v31.8h, v17.8h, v13.h[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.8h, v1.8h}, [%5], #32 \n" // r40 r41 "fmla v30.8h, v18.8h, v12.h[2] \n" "fmla v31.8h, v18.8h, v13.h[2] \n" "fmla v30.8h, v19.8h, v12.h[3] \n" "fmla v31.8h, v19.8h, v13.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v30.8h, v20.8h, v12.h[4] \n" "fmla v31.8h, v20.8h, v13.h[4] \n" "fmla v30.8h, v21.8h, v12.h[5] \n" "fmla v31.8h, v21.8h, v13.h[5] \n" "fmla v30.8h, v22.8h, v12.h[6] \n" "fmla v31.8h, v22.8h, v13.h[6] \n" "fmla v30.8h, v23.8h, v12.h[7] \n" "fmla v31.8h, v23.8h, v13.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v16.8h, v1.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "fmla v31.8h, v17.8h, v1.h[1] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v2.8h, v3.8h, v4.8h, v5.8h}, [%5] \n" // r42 r43 r44 r45 "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v1.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v1.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v31.8h, v21.8h, v1.h[5] \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v1.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v17.8h, v1.h[1] \n" "fmla v31.8h, v17.8h, v2.h[1] \n" "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v19.8h, v1.h[3] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v21.8h, v1.h[5] \n" "fmla v31.8h, v21.8h, v2.h[5] \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v23.8h, v1.h[7] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v3.h[1] \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v19.8h, v2.h[3] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v21.8h, v2.h[5] \n" "fmla v31.8h, v21.8h, v3.h[5] \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v23.8h, v2.h[7] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v16.8h, v4.h[0] \n" "fmla v30.8h, v17.8h, v3.h[1] \n" "fmla v31.8h, v17.8h, v4.h[1] \n" "fmla v28.8h, v18.8h, v3.h[2] \n" "fmla v29.8h, v18.8h, v4.h[2] \n" "fmla v30.8h, v19.8h, v3.h[3] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v20.8h, v4.h[4] \n" "fmla v30.8h, v21.8h, v3.h[5] \n" "fmla v31.8h, v21.8h, v4.h[5] \n" "fmla v28.8h, v22.8h, v3.h[6] \n" "fmla v29.8h, v22.8h, v4.h[6] \n" "fmla v30.8h, v23.8h, v3.h[7] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6] \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v16.8h, v5.h[0] \n" "fmla v30.8h, v17.8h, v4.h[1] \n" "fmla v31.8h, v17.8h, v5.h[1] \n" "fmla v28.8h, v18.8h, v4.h[2] \n" "fmla v29.8h, v18.8h, v5.h[2] \n" "fmla v30.8h, v19.8h, v4.h[3] \n" "fmla v31.8h, v19.8h, v5.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v5.h[4] \n" "fmla v30.8h, v21.8h, v4.h[5] \n" "fmla v31.8h, v21.8h, v5.h[5] \n" "fmla v28.8h, v22.8h, v4.h[6] \n" "fmla v29.8h, v22.8h, v5.h[6] \n" "fmla v30.8h, v23.8h, v4.h[7] \n" "fmla v31.8h, v23.8h, v5.h[7] \n" "fadd v28.8h, v28.8h, v30.8h \n" "fadd v29.8h, v29.8h, v31.8h \n" "sub %6, %6, #3136 \n" // kptr -= 24.5 * 64; "st1 {v28.8h, v29.8h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v8", "v9", "v10", "v11", "v12", "v13", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%1, #128] \n" "ld1 {v0.8h}, [%1], #16 \n" // r00 "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v31.8h}, [%0] \n" // sum0 "fmul v28.8h, v16.8h, v0.h[0] \n" "fmul v29.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmul v30.8h, v18.8h, v0.h[2] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v21.8h, v0.h[5] \n" "prfm pldl1keep, [%1, #512] \n" "ld1 {v1.8h, v2.8h, v3.8h, v4.8h}, [%1] \n" // r01 r02 r03 r04 "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v17.8h, v1.h[1] \n" "fmla v30.8h, v18.8h, v1.h[2] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "fmla v30.8h, v22.8h, v1.h[6] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "fmla v30.8h, v22.8h, v2.h[6] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v17.8h, v3.h[1] \n" "fmla v30.8h, v18.8h, v3.h[2] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v21.8h, v3.h[5] \n" "fmla v30.8h, v22.8h, v3.h[6] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v17.8h, v4.h[1] \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v8.8h}, [%2], #16 \n" // r10 "fmla v30.8h, v18.8h, v4.h[2] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v21.8h, v4.h[5] \n" "fmla v30.8h, v22.8h, v4.h[6] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v8.h[0] \n" "fmla v29.8h, v17.8h, v8.h[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v9.8h, v10.8h, v11.8h, v12.8h}, [%2] \n" // r11 r12 r13 r14 "fmla v30.8h, v18.8h, v8.h[2] \n" "fmla v31.8h, v19.8h, v8.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v8.h[4] \n" "fmla v29.8h, v21.8h, v8.h[5] \n" "fmla v30.8h, v22.8h, v8.h[6] \n" "fmla v31.8h, v23.8h, v8.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v9.h[0] \n" "fmla v29.8h, v17.8h, v9.h[1] \n" "fmla v30.8h, v18.8h, v9.h[2] \n" "fmla v31.8h, v19.8h, v9.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v9.h[4] \n" "fmla v29.8h, v21.8h, v9.h[5] \n" "fmla v30.8h, v22.8h, v9.h[6] \n" "fmla v31.8h, v23.8h, v9.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v10.h[0] \n" "fmla v29.8h, v17.8h, v10.h[1] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v19.8h, v10.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v10.h[4] \n" "fmla v29.8h, v21.8h, v10.h[5] \n" "fmla v30.8h, v22.8h, v10.h[6] \n" "fmla v31.8h, v23.8h, v10.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v11.h[0] \n" "fmla v29.8h, v17.8h, v11.h[1] \n" "fmla v30.8h, v18.8h, v11.h[2] \n" "fmla v31.8h, v19.8h, v11.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v11.h[4] \n" "fmla v29.8h, v21.8h, v11.h[5] \n" "fmla v30.8h, v22.8h, v11.h[6] \n" "fmla v31.8h, v23.8h, v11.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v12.h[0] \n" "fmla v29.8h, v17.8h, v12.h[1] \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.8h}, [%3], #16 \n" // r20 "fmla v30.8h, v18.8h, v12.h[2] \n" "fmla v31.8h, v19.8h, v12.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v12.h[4] \n" "fmla v29.8h, v21.8h, v12.h[5] \n" "fmla v30.8h, v22.8h, v12.h[6] \n" "fmla v31.8h, v23.8h, v12.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v1.8h, v2.8h, v3.8h, v4.8h}, [%3] \n" // r21 r22 r23 r24 "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v17.8h, v1.h[1] \n" "fmla v30.8h, v18.8h, v1.h[2] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "fmla v30.8h, v22.8h, v1.h[6] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "fmla v30.8h, v22.8h, v2.h[6] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v17.8h, v3.h[1] \n" "fmla v30.8h, v18.8h, v3.h[2] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v21.8h, v3.h[5] \n" "fmla v30.8h, v22.8h, v3.h[6] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v17.8h, v4.h[1] \n" "prfm pldl1keep, [%4, #128] \n" "ld1 {v8.8h}, [%4], #16 \n" // r30 "fmla v30.8h, v18.8h, v4.h[2] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v21.8h, v4.h[5] \n" "fmla v30.8h, v22.8h, v4.h[6] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v8.h[0] \n" "fmla v29.8h, v17.8h, v8.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v9.8h, v10.8h, v11.8h, v12.8h}, [%4] \n" // r31 r32 r33 r34 "fmla v30.8h, v18.8h, v8.h[2] \n" "fmla v31.8h, v19.8h, v8.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v8.h[4] \n" "fmla v29.8h, v21.8h, v8.h[5] \n" "fmla v30.8h, v22.8h, v8.h[6] \n" "fmla v31.8h, v23.8h, v8.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v9.h[0] \n" "fmla v29.8h, v17.8h, v9.h[1] \n" "fmla v30.8h, v18.8h, v9.h[2] \n" "fmla v31.8h, v19.8h, v9.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v9.h[4] \n" "fmla v29.8h, v21.8h, v9.h[5] \n" "fmla v30.8h, v22.8h, v9.h[6] \n" "fmla v31.8h, v23.8h, v9.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v10.h[0] \n" "fmla v29.8h, v17.8h, v10.h[1] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v19.8h, v10.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v10.h[4] \n" "fmla v29.8h, v21.8h, v10.h[5] \n" "fmla v30.8h, v22.8h, v10.h[6] \n" "fmla v31.8h, v23.8h, v10.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v11.h[0] \n" "fmla v29.8h, v17.8h, v11.h[1] \n" "fmla v30.8h, v18.8h, v11.h[2] \n" "fmla v31.8h, v19.8h, v11.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v11.h[4] \n" "fmla v29.8h, v21.8h, v11.h[5] \n" "fmla v30.8h, v22.8h, v11.h[6] \n" "fmla v31.8h, v23.8h, v11.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v12.h[0] \n" "fmla v29.8h, v17.8h, v12.h[1] \n" "prfm pldl1keep, [%5, #128] \n" "ld1 {v0.8h}, [%5], #16 \n" // r40 "fmla v30.8h, v18.8h, v12.h[2] \n" "fmla v31.8h, v19.8h, v12.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v12.h[4] \n" "fmla v29.8h, v21.8h, v12.h[5] \n" "fmla v30.8h, v22.8h, v12.h[6] \n" "fmla v31.8h, v23.8h, v12.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v1.8h, v2.8h, v3.8h, v4.8h}, [%5] \n" // r41 r42 r43 r44 "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v17.8h, v1.h[1] \n" "fmla v30.8h, v18.8h, v1.h[2] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "fmla v30.8h, v22.8h, v1.h[6] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "fmla v30.8h, v22.8h, v2.h[6] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v17.8h, v3.h[1] \n" "fmla v30.8h, v18.8h, v3.h[2] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v21.8h, v3.h[5] \n" "fmla v30.8h, v22.8h, v3.h[6] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6] \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v17.8h, v4.h[1] \n" "fmla v30.8h, v18.8h, v4.h[2] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v21.8h, v4.h[5] \n" "fmla v30.8h, v22.8h, v4.h[6] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "fadd v28.8h, v28.8h, v29.8h \n" "fadd v30.8h, v30.8h, v31.8h \n" "fadd v28.8h, v28.8h, v30.8h \n" "sub %6, %6, #3136 \n" // kptr -= 24.5 * 64; "st1 {v28.8h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } r0 += 4 * 8; r1 += 4 * 8; r2 += 4 * 8; r3 += 4 * 8; r4 += 4 * 8; } } } } static void conv5x5s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int tailstep = (w - 2 * outw + w) * 8; const __fp16* bias = _bias; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { Mat out0 = top_blob.channel(p); float16x8_t _bias0 = bias ? vld1q_f16(bias + p * 8) : vdupq_n_f16(0.f); out0.fill(_bias0); int q = 0; for (; q < inch; q++) { __fp16* outptr0 = out0; const Mat img0 = bottom_blob.channel(q); const __fp16* r0 = img0.row<const __fp16>(0); const __fp16* r1 = img0.row<const __fp16>(1); const __fp16* r2 = img0.row<const __fp16>(2); const __fp16* r3 = img0.row<const __fp16>(3); const __fp16* r4 = img0.row<const __fp16>(4); const __fp16* kptr = kernel.channel(p).row<const __fp16>(q); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { asm volatile( "prfm pldl1keep, [%1, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" // r00 r01 r02 r03 "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "prfm pldl1keep, [%0, #256] \n" "ld1 {v30.8h, v31.8h}, [%0] \n" // sum0 sum1 "fmul v28.8h, v16.8h, v0.h[0] \n" "fmul v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "fmla v31.8h, v17.8h, v2.h[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v31.8h, v21.8h, v2.h[5] \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v17.8h, v1.h[1] \n" "fmla v31.8h, v17.8h, v3.h[1] \n" "prfm pldl1keep, [%1, #384] \n" "ld1 {v4.8h, v5.8h, v6.8h}, [%1] \n" // r04 r05 r06 "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v19.8h, v1.h[3] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v21.8h, v1.h[5] \n" "fmla v31.8h, v21.8h, v3.h[5] \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v23.8h, v1.h[7] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v4.h[0] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v4.h[1] \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v4.h[2] \n" "fmla v30.8h, v19.8h, v2.h[3] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v4.h[4] \n" "fmla v30.8h, v21.8h, v2.h[5] \n" "fmla v31.8h, v21.8h, v4.h[5] \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v4.h[6] \n" "fmla v30.8h, v23.8h, v2.h[7] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v16.8h, v5.h[0] \n" "fmla v30.8h, v17.8h, v3.h[1] \n" "fmla v31.8h, v17.8h, v5.h[1] \n" "fmla v28.8h, v18.8h, v3.h[2] \n" "fmla v29.8h, v18.8h, v5.h[2] \n" "fmla v30.8h, v19.8h, v3.h[3] \n" "fmla v31.8h, v19.8h, v5.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v20.8h, v5.h[4] \n" "fmla v30.8h, v21.8h, v3.h[5] \n" "fmla v31.8h, v21.8h, v5.h[5] \n" "fmla v28.8h, v22.8h, v3.h[6] \n" "fmla v29.8h, v22.8h, v5.h[6] \n" "fmla v30.8h, v23.8h, v3.h[7] \n" "fmla v31.8h, v23.8h, v5.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v16.8h, v6.h[0] \n" "fmla v30.8h, v17.8h, v4.h[1] \n" "fmla v31.8h, v17.8h, v6.h[1] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r10 r11 r12 r13 "fmla v28.8h, v18.8h, v4.h[2] \n" "fmla v29.8h, v18.8h, v6.h[2] \n" "fmla v30.8h, v19.8h, v4.h[3] \n" "fmla v31.8h, v19.8h, v6.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v6.h[4] \n" "fmla v30.8h, v21.8h, v4.h[5] \n" "fmla v31.8h, v21.8h, v6.h[5] \n" "fmla v28.8h, v22.8h, v4.h[6] \n" "fmla v29.8h, v22.8h, v6.h[6] \n" "fmla v30.8h, v23.8h, v4.h[7] \n" "fmla v31.8h, v23.8h, v6.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v8.h[0] \n" "fmla v29.8h, v16.8h, v10.h[0] \n" "fmla v30.8h, v17.8h, v8.h[1] \n" "fmla v31.8h, v17.8h, v10.h[1] \n" "fmla v28.8h, v18.8h, v8.h[2] \n" "fmla v29.8h, v18.8h, v10.h[2] \n" "fmla v30.8h, v19.8h, v8.h[3] \n" "fmla v31.8h, v19.8h, v10.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v8.h[4] \n" "fmla v29.8h, v20.8h, v10.h[4] \n" "fmla v30.8h, v21.8h, v8.h[5] \n" "fmla v31.8h, v21.8h, v10.h[5] \n" "fmla v28.8h, v22.8h, v8.h[6] \n" "fmla v29.8h, v22.8h, v10.h[6] \n" "fmla v30.8h, v23.8h, v8.h[7] \n" "fmla v31.8h, v23.8h, v10.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v9.h[0] \n" "fmla v29.8h, v16.8h, v11.h[0] \n" "fmla v30.8h, v17.8h, v9.h[1] \n" "fmla v31.8h, v17.8h, v11.h[1] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v12.8h, v13.8h, v14.8h}, [%2] \n" // r14 r15 r16 "fmla v28.8h, v18.8h, v9.h[2] \n" "fmla v29.8h, v18.8h, v11.h[2] \n" "fmla v30.8h, v19.8h, v9.h[3] \n" "fmla v31.8h, v19.8h, v11.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v9.h[4] \n" "fmla v29.8h, v20.8h, v11.h[4] \n" "fmla v30.8h, v21.8h, v9.h[5] \n" "fmla v31.8h, v21.8h, v11.h[5] \n" "fmla v28.8h, v22.8h, v9.h[6] \n" "fmla v29.8h, v22.8h, v11.h[6] \n" "fmla v30.8h, v23.8h, v9.h[7] \n" "fmla v31.8h, v23.8h, v11.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v10.h[0] \n" "fmla v29.8h, v16.8h, v12.h[0] \n" "fmla v30.8h, v17.8h, v10.h[1] \n" "fmla v31.8h, v17.8h, v12.h[1] \n" "fmla v28.8h, v18.8h, v10.h[2] \n" "fmla v29.8h, v18.8h, v12.h[2] \n" "fmla v30.8h, v19.8h, v10.h[3] \n" "fmla v31.8h, v19.8h, v12.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v10.h[4] \n" "fmla v29.8h, v20.8h, v12.h[4] \n" "fmla v30.8h, v21.8h, v10.h[5] \n" "fmla v31.8h, v21.8h, v12.h[5] \n" "fmla v28.8h, v22.8h, v10.h[6] \n" "fmla v29.8h, v22.8h, v12.h[6] \n" "fmla v30.8h, v23.8h, v10.h[7] \n" "fmla v31.8h, v23.8h, v12.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v11.h[0] \n" "fmla v29.8h, v16.8h, v13.h[0] \n" "fmla v30.8h, v17.8h, v11.h[1] \n" "fmla v31.8h, v17.8h, v13.h[1] \n" "fmla v28.8h, v18.8h, v11.h[2] \n" "fmla v29.8h, v18.8h, v13.h[2] \n" "fmla v30.8h, v19.8h, v11.h[3] \n" "fmla v31.8h, v19.8h, v13.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v11.h[4] \n" "fmla v29.8h, v20.8h, v13.h[4] \n" "fmla v30.8h, v21.8h, v11.h[5] \n" "fmla v31.8h, v21.8h, v13.h[5] \n" "fmla v28.8h, v22.8h, v11.h[6] \n" "fmla v29.8h, v22.8h, v13.h[6] \n" "fmla v30.8h, v23.8h, v11.h[7] \n" "fmla v31.8h, v23.8h, v13.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v12.h[0] \n" "fmla v29.8h, v16.8h, v14.h[0] \n" "fmla v30.8h, v17.8h, v12.h[1] \n" "fmla v31.8h, v17.8h, v14.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%3], #64 \n" // r20 r21 r22 r23 "fmla v28.8h, v18.8h, v12.h[2] \n" "fmla v29.8h, v18.8h, v14.h[2] \n" "fmla v30.8h, v19.8h, v12.h[3] \n" "fmla v31.8h, v19.8h, v14.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v12.h[4] \n" "fmla v29.8h, v20.8h, v14.h[4] \n" "fmla v30.8h, v21.8h, v12.h[5] \n" "fmla v31.8h, v21.8h, v14.h[5] \n" "fmla v28.8h, v22.8h, v12.h[6] \n" "fmla v29.8h, v22.8h, v14.h[6] \n" "fmla v30.8h, v23.8h, v12.h[7] \n" "fmla v31.8h, v23.8h, v14.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "fmla v31.8h, v17.8h, v2.h[1] \n" "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v31.8h, v21.8h, v2.h[5] \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v17.8h, v1.h[1] \n" "fmla v31.8h, v17.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v4.8h, v5.8h, v6.8h}, [%3] \n" // r24 r25 r26 "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v19.8h, v1.h[3] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v21.8h, v1.h[5] \n" "fmla v31.8h, v21.8h, v3.h[5] \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v23.8h, v1.h[7] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v4.h[0] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v4.h[1] \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v4.h[2] \n" "fmla v30.8h, v19.8h, v2.h[3] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v4.h[4] \n" "fmla v30.8h, v21.8h, v2.h[5] \n" "fmla v31.8h, v21.8h, v4.h[5] \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v4.h[6] \n" "fmla v30.8h, v23.8h, v2.h[7] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v16.8h, v5.h[0] \n" "fmla v30.8h, v17.8h, v3.h[1] \n" "fmla v31.8h, v17.8h, v5.h[1] \n" "fmla v28.8h, v18.8h, v3.h[2] \n" "fmla v29.8h, v18.8h, v5.h[2] \n" "fmla v30.8h, v19.8h, v3.h[3] \n" "fmla v31.8h, v19.8h, v5.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v20.8h, v5.h[4] \n" "fmla v30.8h, v21.8h, v3.h[5] \n" "fmla v31.8h, v21.8h, v5.h[5] \n" "fmla v28.8h, v22.8h, v3.h[6] \n" "fmla v29.8h, v22.8h, v5.h[6] \n" "fmla v30.8h, v23.8h, v3.h[7] \n" "fmla v31.8h, v23.8h, v5.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v16.8h, v6.h[0] \n" "fmla v30.8h, v17.8h, v4.h[1] \n" "fmla v31.8h, v17.8h, v6.h[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%4], #64 \n" // r30 r31 r32 r33 "fmla v28.8h, v18.8h, v4.h[2] \n" "fmla v29.8h, v18.8h, v6.h[2] \n" "fmla v30.8h, v19.8h, v4.h[3] \n" "fmla v31.8h, v19.8h, v6.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v6.h[4] \n" "fmla v30.8h, v21.8h, v4.h[5] \n" "fmla v31.8h, v21.8h, v6.h[5] \n" "fmla v28.8h, v22.8h, v4.h[6] \n" "fmla v29.8h, v22.8h, v6.h[6] \n" "fmla v30.8h, v23.8h, v4.h[7] \n" "fmla v31.8h, v23.8h, v6.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v8.h[0] \n" "fmla v29.8h, v16.8h, v10.h[0] \n" "fmla v30.8h, v17.8h, v8.h[1] \n" "fmla v31.8h, v17.8h, v10.h[1] \n" "fmla v28.8h, v18.8h, v8.h[2] \n" "fmla v29.8h, v18.8h, v10.h[2] \n" "fmla v30.8h, v19.8h, v8.h[3] \n" "fmla v31.8h, v19.8h, v10.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v8.h[4] \n" "fmla v29.8h, v20.8h, v10.h[4] \n" "fmla v30.8h, v21.8h, v8.h[5] \n" "fmla v31.8h, v21.8h, v10.h[5] \n" "fmla v28.8h, v22.8h, v8.h[6] \n" "fmla v29.8h, v22.8h, v10.h[6] \n" "fmla v30.8h, v23.8h, v8.h[7] \n" "fmla v31.8h, v23.8h, v10.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v9.h[0] \n" "fmla v29.8h, v16.8h, v11.h[0] \n" "fmla v30.8h, v17.8h, v9.h[1] \n" "fmla v31.8h, v17.8h, v11.h[1] \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v12.8h, v13.8h, v14.8h}, [%4] \n" // r34 r35 r36 "fmla v28.8h, v18.8h, v9.h[2] \n" "fmla v29.8h, v18.8h, v11.h[2] \n" "fmla v30.8h, v19.8h, v9.h[3] \n" "fmla v31.8h, v19.8h, v11.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v9.h[4] \n" "fmla v29.8h, v20.8h, v11.h[4] \n" "fmla v30.8h, v21.8h, v9.h[5] \n" "fmla v31.8h, v21.8h, v11.h[5] \n" "fmla v28.8h, v22.8h, v9.h[6] \n" "fmla v29.8h, v22.8h, v11.h[6] \n" "fmla v30.8h, v23.8h, v9.h[7] \n" "fmla v31.8h, v23.8h, v11.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v10.h[0] \n" "fmla v29.8h, v16.8h, v12.h[0] \n" "fmla v30.8h, v17.8h, v10.h[1] \n" "fmla v31.8h, v17.8h, v12.h[1] \n" "fmla v28.8h, v18.8h, v10.h[2] \n" "fmla v29.8h, v18.8h, v12.h[2] \n" "fmla v30.8h, v19.8h, v10.h[3] \n" "fmla v31.8h, v19.8h, v12.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v10.h[4] \n" "fmla v29.8h, v20.8h, v12.h[4] \n" "fmla v30.8h, v21.8h, v10.h[5] \n" "fmla v31.8h, v21.8h, v12.h[5] \n" "fmla v28.8h, v22.8h, v10.h[6] \n" "fmla v29.8h, v22.8h, v12.h[6] \n" "fmla v30.8h, v23.8h, v10.h[7] \n" "fmla v31.8h, v23.8h, v12.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v11.h[0] \n" "fmla v29.8h, v16.8h, v13.h[0] \n" "fmla v30.8h, v17.8h, v11.h[1] \n" "fmla v31.8h, v17.8h, v13.h[1] \n" "fmla v28.8h, v18.8h, v11.h[2] \n" "fmla v29.8h, v18.8h, v13.h[2] \n" "fmla v30.8h, v19.8h, v11.h[3] \n" "fmla v31.8h, v19.8h, v13.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v11.h[4] \n" "fmla v29.8h, v20.8h, v13.h[4] \n" "fmla v30.8h, v21.8h, v11.h[5] \n" "fmla v31.8h, v21.8h, v13.h[5] \n" "fmla v28.8h, v22.8h, v11.h[6] \n" "fmla v29.8h, v22.8h, v13.h[6] \n" "fmla v30.8h, v23.8h, v11.h[7] \n" "fmla v31.8h, v23.8h, v13.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v12.h[0] \n" "fmla v29.8h, v16.8h, v14.h[0] \n" "fmla v30.8h, v17.8h, v12.h[1] \n" "fmla v31.8h, v17.8h, v14.h[1] \n" "prfm pldl1keep, [%5, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%5], #64 \n" // r40 r41 r42 r43 "fmla v28.8h, v18.8h, v12.h[2] \n" "fmla v29.8h, v18.8h, v14.h[2] \n" "fmla v30.8h, v19.8h, v12.h[3] \n" "fmla v31.8h, v19.8h, v14.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v12.h[4] \n" "fmla v29.8h, v20.8h, v14.h[4] \n" "fmla v30.8h, v21.8h, v12.h[5] \n" "fmla v31.8h, v21.8h, v14.h[5] \n" "fmla v28.8h, v22.8h, v12.h[6] \n" "fmla v29.8h, v22.8h, v14.h[6] \n" "fmla v30.8h, v23.8h, v12.h[7] \n" "fmla v31.8h, v23.8h, v14.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v16.8h, v2.h[0] \n" "fmla v30.8h, v17.8h, v0.h[1] \n" "fmla v31.8h, v17.8h, v2.h[1] \n" "fmla v28.8h, v18.8h, v0.h[2] \n" "fmla v29.8h, v18.8h, v2.h[2] \n" "fmla v30.8h, v19.8h, v0.h[3] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v20.8h, v2.h[4] \n" "fmla v30.8h, v21.8h, v0.h[5] \n" "fmla v31.8h, v21.8h, v2.h[5] \n" "fmla v28.8h, v22.8h, v0.h[6] \n" "fmla v29.8h, v22.8h, v2.h[6] \n" "fmla v30.8h, v23.8h, v0.h[7] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v16.8h, v3.h[0] \n" "fmla v30.8h, v17.8h, v1.h[1] \n" "fmla v31.8h, v17.8h, v3.h[1] \n" "prfm pldl1keep, [%5, #384] \n" "ld1 {v4.8h, v5.8h, v6.8h}, [%5] \n" // r44 r45 r46 "fmla v28.8h, v18.8h, v1.h[2] \n" "fmla v29.8h, v18.8h, v3.h[2] \n" "fmla v30.8h, v19.8h, v1.h[3] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v20.8h, v3.h[4] \n" "fmla v30.8h, v21.8h, v1.h[5] \n" "fmla v31.8h, v21.8h, v3.h[5] \n" "fmla v28.8h, v22.8h, v1.h[6] \n" "fmla v29.8h, v22.8h, v3.h[6] \n" "fmla v30.8h, v23.8h, v1.h[7] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v16.8h, v4.h[0] \n" "fmla v30.8h, v17.8h, v2.h[1] \n" "fmla v31.8h, v17.8h, v4.h[1] \n" "fmla v28.8h, v18.8h, v2.h[2] \n" "fmla v29.8h, v18.8h, v4.h[2] \n" "fmla v30.8h, v19.8h, v2.h[3] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v20.8h, v4.h[4] \n" "fmla v30.8h, v21.8h, v2.h[5] \n" "fmla v31.8h, v21.8h, v4.h[5] \n" "fmla v28.8h, v22.8h, v2.h[6] \n" "fmla v29.8h, v22.8h, v4.h[6] \n" "fmla v30.8h, v23.8h, v2.h[7] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v16.8h, v5.h[0] \n" "fmla v30.8h, v17.8h, v3.h[1] \n" "fmla v31.8h, v17.8h, v5.h[1] \n" "fmla v28.8h, v18.8h, v3.h[2] \n" "fmla v29.8h, v18.8h, v5.h[2] \n" "fmla v30.8h, v19.8h, v3.h[3] \n" "fmla v31.8h, v19.8h, v5.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v20.8h, v5.h[4] \n" "fmla v30.8h, v21.8h, v3.h[5] \n" "fmla v31.8h, v21.8h, v5.h[5] \n" "fmla v28.8h, v22.8h, v3.h[6] \n" "fmla v29.8h, v22.8h, v5.h[6] \n" "fmla v30.8h, v23.8h, v3.h[7] \n" "fmla v31.8h, v23.8h, v5.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6] \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v16.8h, v6.h[0] \n" "fmla v30.8h, v17.8h, v4.h[1] \n" "fmla v31.8h, v17.8h, v6.h[1] \n" "fmla v28.8h, v18.8h, v4.h[2] \n" "fmla v29.8h, v18.8h, v6.h[2] \n" "fmla v30.8h, v19.8h, v4.h[3] \n" "fmla v31.8h, v19.8h, v6.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v20.8h, v6.h[4] \n" "fmla v30.8h, v21.8h, v4.h[5] \n" "fmla v31.8h, v21.8h, v6.h[5] \n" "fmla v28.8h, v22.8h, v4.h[6] \n" "fmla v29.8h, v22.8h, v6.h[6] \n" "fmla v30.8h, v23.8h, v4.h[7] \n" "fmla v31.8h, v23.8h, v6.h[7] \n" "fadd v28.8h, v28.8h, v30.8h \n" "fadd v29.8h, v29.8h, v31.8h \n" "sub %6, %6, #3136 \n" // kptr -= 24.5 * 64; "st1 {v28.8h, v29.8h}, [%0], #32 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } for (; j < outw; j++) { asm volatile( "prfm pldl1keep, [%1, #256] \n" "ld1 {v0.8h, v1.8h}, [%1], #32 \n" // r00 r01 "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "prfm pldl1keep, [%0, #128] \n" "ld1 {v31.8h}, [%0] \n" // sum0 "fmul v28.8h, v16.8h, v0.h[0] \n" "fmul v29.8h, v17.8h, v0.h[1] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmul v30.8h, v18.8h, v0.h[2] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v17.8h, v1.h[1] \n" "prfm pldl1keep, [%1, #384] \n" "ld1 {v2.8h, v3.8h, v4.8h}, [%1] \n" // r02 r03 r04 "fmla v30.8h, v18.8h, v1.h[2] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "fmla v30.8h, v22.8h, v1.h[6] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "fmla v30.8h, v22.8h, v2.h[6] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v17.8h, v3.h[1] \n" "fmla v30.8h, v18.8h, v3.h[2] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v21.8h, v3.h[5] \n" "fmla v30.8h, v22.8h, v3.h[6] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v17.8h, v4.h[1] \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v8.8h, v9.8h}, [%2], #32 \n" // r10 r11 "fmla v30.8h, v18.8h, v4.h[2] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v21.8h, v4.h[5] \n" "fmla v30.8h, v22.8h, v4.h[6] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v8.h[0] \n" "fmla v29.8h, v17.8h, v8.h[1] \n" "fmla v30.8h, v18.8h, v8.h[2] \n" "fmla v31.8h, v19.8h, v8.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v8.h[4] \n" "fmla v29.8h, v21.8h, v8.h[5] \n" "fmla v30.8h, v22.8h, v8.h[6] \n" "fmla v31.8h, v23.8h, v8.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v9.h[0] \n" "fmla v29.8h, v17.8h, v9.h[1] \n" "prfm pldl1keep, [%2, #384] \n" "ld1 {v10.8h, v11.8h, v12.8h}, [%2] \n" // r12 r13 r14 "fmla v30.8h, v18.8h, v9.h[2] \n" "fmla v31.8h, v19.8h, v9.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v9.h[4] \n" "fmla v29.8h, v21.8h, v9.h[5] \n" "fmla v30.8h, v22.8h, v9.h[6] \n" "fmla v31.8h, v23.8h, v9.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v10.h[0] \n" "fmla v29.8h, v17.8h, v10.h[1] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v19.8h, v10.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v10.h[4] \n" "fmla v29.8h, v21.8h, v10.h[5] \n" "fmla v30.8h, v22.8h, v10.h[6] \n" "fmla v31.8h, v23.8h, v10.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v11.h[0] \n" "fmla v29.8h, v17.8h, v11.h[1] \n" "fmla v30.8h, v18.8h, v11.h[2] \n" "fmla v31.8h, v19.8h, v11.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v11.h[4] \n" "fmla v29.8h, v21.8h, v11.h[5] \n" "fmla v30.8h, v22.8h, v11.h[6] \n" "fmla v31.8h, v23.8h, v11.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v12.h[0] \n" "fmla v29.8h, v17.8h, v12.h[1] \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.8h, v1.8h}, [%3], #32 \n" // r20 r21 "fmla v30.8h, v18.8h, v12.h[2] \n" "fmla v31.8h, v19.8h, v12.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v12.h[4] \n" "fmla v29.8h, v21.8h, v12.h[5] \n" "fmla v30.8h, v22.8h, v12.h[6] \n" "fmla v31.8h, v23.8h, v12.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v17.8h, v0.h[1] \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v17.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #384] \n" "ld1 {v2.8h, v3.8h, v4.8h}, [%3] \n" // r22 r23 r24 "fmla v30.8h, v18.8h, v1.h[2] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "fmla v30.8h, v22.8h, v1.h[6] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "fmla v30.8h, v22.8h, v2.h[6] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v17.8h, v3.h[1] \n" "fmla v30.8h, v18.8h, v3.h[2] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v21.8h, v3.h[5] \n" "fmla v30.8h, v22.8h, v3.h[6] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v17.8h, v4.h[1] \n" "prfm pldl1keep, [%4, #256] \n" "ld1 {v8.8h, v9.8h}, [%4], #32 \n" // r30 r31 "fmla v30.8h, v18.8h, v4.h[2] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v21.8h, v4.h[5] \n" "fmla v30.8h, v22.8h, v4.h[6] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v8.h[0] \n" "fmla v29.8h, v17.8h, v8.h[1] \n" "fmla v30.8h, v18.8h, v8.h[2] \n" "fmla v31.8h, v19.8h, v8.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v8.h[4] \n" "fmla v29.8h, v21.8h, v8.h[5] \n" "fmla v30.8h, v22.8h, v8.h[6] \n" "fmla v31.8h, v23.8h, v8.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v9.h[0] \n" "fmla v29.8h, v17.8h, v9.h[1] \n" "prfm pldl1keep, [%4, #384] \n" "ld1 {v10.8h, v11.8h, v12.8h}, [%4] \n" // r32 r33 r34 "fmla v30.8h, v18.8h, v9.h[2] \n" "fmla v31.8h, v19.8h, v9.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v9.h[4] \n" "fmla v29.8h, v21.8h, v9.h[5] \n" "fmla v30.8h, v22.8h, v9.h[6] \n" "fmla v31.8h, v23.8h, v9.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v10.h[0] \n" "fmla v29.8h, v17.8h, v10.h[1] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v19.8h, v10.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v10.h[4] \n" "fmla v29.8h, v21.8h, v10.h[5] \n" "fmla v30.8h, v22.8h, v10.h[6] \n" "fmla v31.8h, v23.8h, v10.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v11.h[0] \n" "fmla v29.8h, v17.8h, v11.h[1] \n" "fmla v30.8h, v18.8h, v11.h[2] \n" "fmla v31.8h, v19.8h, v11.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v11.h[4] \n" "fmla v29.8h, v21.8h, v11.h[5] \n" "fmla v30.8h, v22.8h, v11.h[6] \n" "fmla v31.8h, v23.8h, v11.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v12.h[0] \n" "fmla v29.8h, v17.8h, v12.h[1] \n" "prfm pldl1keep, [%5, #256] \n" "ld1 {v0.8h, v1.8h}, [%5], #32 \n" // r40 r41 "fmla v30.8h, v18.8h, v12.h[2] \n" "fmla v31.8h, v19.8h, v12.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v12.h[4] \n" "fmla v29.8h, v21.8h, v12.h[5] \n" "fmla v30.8h, v22.8h, v12.h[6] \n" "fmla v31.8h, v23.8h, v12.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v0.h[0] \n" "fmla v29.8h, v17.8h, v0.h[1] \n" "fmla v30.8h, v18.8h, v0.h[2] \n" "fmla v31.8h, v19.8h, v0.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v0.h[4] \n" "fmla v29.8h, v21.8h, v0.h[5] \n" "fmla v30.8h, v22.8h, v0.h[6] \n" "fmla v31.8h, v23.8h, v0.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v1.h[0] \n" "fmla v29.8h, v17.8h, v1.h[1] \n" "prfm pldl1keep, [%5, #384] \n" "ld1 {v2.8h, v3.8h, v4.8h}, [%5] \n" // r42 r43 r44 "fmla v30.8h, v18.8h, v1.h[2] \n" "fmla v31.8h, v19.8h, v1.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v1.h[4] \n" "fmla v29.8h, v21.8h, v1.h[5] \n" "fmla v30.8h, v22.8h, v1.h[6] \n" "fmla v31.8h, v23.8h, v1.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v2.h[0] \n" "fmla v29.8h, v17.8h, v2.h[1] \n" "fmla v30.8h, v18.8h, v2.h[2] \n" "fmla v31.8h, v19.8h, v2.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v2.h[4] \n" "fmla v29.8h, v21.8h, v2.h[5] \n" "fmla v30.8h, v22.8h, v2.h[6] \n" "fmla v31.8h, v23.8h, v2.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6], #64 \n" "fmla v28.8h, v16.8h, v3.h[0] \n" "fmla v29.8h, v17.8h, v3.h[1] \n" "fmla v30.8h, v18.8h, v3.h[2] \n" "fmla v31.8h, v19.8h, v3.h[3] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%6], #64 \n" "fmla v28.8h, v20.8h, v3.h[4] \n" "fmla v29.8h, v21.8h, v3.h[5] \n" "fmla v30.8h, v22.8h, v3.h[6] \n" "fmla v31.8h, v23.8h, v3.h[7] \n" "prfm pldl1keep, [%6, #512] \n" "ld1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%6] \n" "fmla v28.8h, v16.8h, v4.h[0] \n" "fmla v29.8h, v17.8h, v4.h[1] \n" "fmla v30.8h, v18.8h, v4.h[2] \n" "fmla v31.8h, v19.8h, v4.h[3] \n" "fmla v28.8h, v20.8h, v4.h[4] \n" "fmla v29.8h, v21.8h, v4.h[5] \n" "fmla v30.8h, v22.8h, v4.h[6] \n" "fmla v31.8h, v23.8h, v4.h[7] \n" "fadd v28.8h, v28.8h, v29.8h \n" "fadd v30.8h, v30.8h, v31.8h \n" "fadd v28.8h, v28.8h, v30.8h \n" "sub %6, %6, #3136 \n" // kptr -= 24.5 * 64; "st1 {v28.8h}, [%0], #16 \n" : "=r"(outptr0), // %0 "=r"(r0), // %1 "=r"(r1), // %2 "=r"(r2), // %3 "=r"(r3), // %4 "=r"(r4), // %5 "=r"(kptr) // %6 : "0"(outptr0), "1"(r0), "2"(r1), "3"(r2), "4"(r3), "5"(r4), "6"(kptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v28", "v29", "v30", "v31"); } r0 += tailstep; r1 += tailstep; r2 += tailstep; r3 += tailstep; r4 += tailstep; } } } }
LoadSubgraph.h
#ifndef __GPU_COMMON_LOAD_SUBGRAPH_H__ #define __GPU_COMMON_LOAD_SUBGRAPH_H__ #include <future> #include <thread> #include "CPUGraph.h" #include "Task.h" static uintV* LoadSubgraph(Graph* cpu_relation, size_t load_vertex_count, size_t load_edge_count, uintV* load_vertex_ids, uintE* load_row_ptrs, size_t thread_num) { uintE* graph_row_ptrs = cpu_relation->GetRowPtrs(); uintV* graph_cols = cpu_relation->GetCols(); uintV* load_cols = new uintV[load_edge_count]; if (thread_num == 1) { size_t off = 0; for (size_t i = 0; i < load_vertex_count; ++i) { uintV u = load_vertex_ids[i]; for (uintE j = graph_row_ptrs[u]; j < graph_row_ptrs[u + 1]; ++j) { uintV v = graph_cols[j]; load_cols[off] = v; ++off; } } } else { #pragma omp parallel for num_threads(thread_num) for (size_t e = 0; e < load_edge_count; ++e) { size_t index = std::upper_bound(load_row_ptrs, load_row_ptrs + load_vertex_count + 1, e) - load_row_ptrs; --index; uintV u = load_vertex_ids[index]; size_t v_off = e - load_row_ptrs[index]; uintV v = graph_cols[graph_row_ptrs[u] + v_off]; load_cols[e] = v; } } return load_cols; } #endif
Kmeans.c
#include <stdio.h> #include <stdlib.h> #include "kmeans.h" void addPixelCluster(cluster* cl, pixel* px){ if(cl->size==cl->nbPixel){ realloc(cl->list,cl->size*2); cl->size*=2; } cl->list[cl->nbPixel]=*px; cl->nbPixel++; } pixel randomPixel(pixel** matrix,unsigned int height,unsigned int length){ int L=rand()%length; int H=rand()%height; return matrix[H][L]; } pixel* initRandom(unsigned int K, pixel** pixelMatrix,unsigned int height, unsigned int length){ pixel* clusterKernel=malloc(K*sizeof(pixel)); unsigned int i; for (i = 0; i < K; i++) { clusterKernel[i]=randomPixel(pixelMatrix,height,length); } return clusterKernel; } cluster* initCluster(unsigned int K){ int i; cluster* cl=malloc(K*sizeof(struct strCluster)); for (i = 0; i < K; i++) { cl[i].list=malloc(pow(2,16)*sizeof(pixel)); cl[i].size=pow(2,16); cl[i].nbPixel=0; } return cl; } unsigned int distancePixel(pixel* px1,pixel* px2){ return sqrt(((px1->R - px2->R)*(px1->R - px2->R))+ ((px1->G - px2->G)*(px1->G - px2->G))+ ((px1->B - px2->B)*(px1->B - px2->B))); } void segmentation(pixel* clusterKernel,cluster* cluster,unsigned nbCluster, pixel** matrix,unsigned int height, unsigned int length){ int i,j,k; int tmpDistance=-1; int minDistance=-1; int minCluster=-1; #pragma omp parallel for for (i = 0; i < height; i++) { for (j = 0; j < length; j++) { for (k= 0; k < nbCluster; k++) { tmpDistance=distancePixel(&matrix[i][j],&clusterKernel[k]); if(k==0) minDistance=tmpDistance; if (tmpDistance<minDistance) { minCluster=k; } addPixelCluster(&cluster[minCluster],&matrix[i][j]); } } } } void updateKernel(pixel* clusterKernel,cluster* cluster,unsigned int K){ int i; int R=0; int G=0; int B=0; int size=cluster[K].size; for (i = 0; i < size; i++) { R+=cluster[K].list[i].R; G+=cluster[K].list[i].G; B+=cluster[K].list[i].B; } R/=size; G/=size; B/=size; clusterKernel[K].R=R; clusterKernel[K].G=G; clusterKernel[K].B=B; } void kmeans(pixel* clusterKernel,cluster* cluster,Image toSegment,unsigned K){ int i,k; for (i = 0; i < 10; i++) { for (k = 0; k < K; k++) { cluster[K].nbPixel=0; } segmentation(clusterKernel,cluster,K,toSegment->matrix,toSegment->height,toSegment->length); #pragma omp parallel for for (k = 0; k < K; k++) { updateKernel(clusterKernel,cluster,k); } } }
GrB_Semiring_wait.c
//------------------------------------------------------------------------------ // GrB_Semiring_wait: wait for a user-defined GrB_Semiring to complete //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // In SuiteSparse:GraphBLAS, a user-defined GrB_Semiring has no pending // operations to wait for. All this method does is verify that the semiring is // properly initialized, and then it does an OpenMP flush. #include "GB.h" GrB_Info GrB_Semiring_wait // no work, just check if the GrB_Semiring is valid ( #if (GxB_IMPLEMENTATION_MAJOR <= 5) GrB_Semiring *semiring #else GrB_Semiring semiring, GrB_WaitMode waitmode #endif ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- #if (GxB_IMPLEMENTATION_MAJOR <= 5) GB_WHERE1 ("GrB_Semiring_wait (&semiring)") ; GB_RETURN_IF_NULL (semiring) ; GB_RETURN_IF_NULL_OR_FAULTY (*semiring) ; #else GB_WHERE1 ("GrB_Semiring_wait (semiring, mode)") ; GB_RETURN_IF_NULL_OR_FAULTY (semiring) ; #endif //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- #pragma omp flush return (GrB_SUCCESS) ; }
declare_simd_aarch64.c
// REQUIRES: aarch64-registered-target // -fopemp and -fopenmp-simd behavior are expected to be the same. // RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon -fopenmp -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s --check-prefix=AARCH64 // RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon -fopenmp-simd -x c -emit-llvm %s -o - -femit-all-decls | FileCheck %s --check-prefix=AARCH64 #pragma omp declare simd #pragma omp declare simd simdlen(2) #pragma omp declare simd simdlen(6) #pragma omp declare simd simdlen(8) double foo(float x); // AARCH64: "_ZGVnM2v_foo" "_ZGVnM4v_foo" "_ZGVnM8v_foo" "_ZGVnN2v_foo" "_ZGVnN4v_foo" "_ZGVnN8v_foo" // AARCH64-NOT: _ZGVnN6v_foo void foo_loop(double *x, float *y, int N) { for (int i = 0; i < N; ++i) { x[i] = foo(y[i]); } } // make sure that the following two function by default gets generated // with 4 and 2 lanes, as descrived in the vector ABI #pragma omp declare simd notinbranch float bar(double x); #pragma omp declare simd notinbranch double baz(float x); // AARCH64: "_ZGVnN2v_baz" "_ZGVnN4v_baz" // AARCH64-NOT: baz // AARCH64: "_ZGVnN2v_bar" "_ZGVnN4v_bar" // AARCH64-NOT: bar void baz_bar_loop(double *x, float *y, int N) { for (int i = 0; i < N; ++i) { x[i] = baz(y[i]); y[i] = bar(x[i]); } } /***************************/ /* 32-bit integer tests */ /***************************/ #pragma omp declare simd #pragma omp declare simd simdlen(2) #pragma omp declare simd simdlen(6) #pragma omp declare simd simdlen(8) long foo_int(int x); // AARCH64: "_ZGVnN2v_foo_int" "_ZGVnN4v_foo_int" "_ZGVnN8v_foo_int" // No non power of two // AARCH64-NOT: _ZGVnN6v_foo_int void foo_int_loop(long *x, int *y, int N) { for (int i = 0; i < N; ++i) { x[i] = foo_int(y[i]); } } #pragma omp declare simd char simple_8bit(char); // AARCH64: "_ZGVnM16v_simple_8bit" "_ZGVnM8v_simple_8bit" "_ZGVnN16v_simple_8bit" "_ZGVnN8v_simple_8bit" #pragma omp declare simd short simple_16bit(short); // AARCH64: "_ZGVnM4v_simple_16bit" "_ZGVnM8v_simple_16bit" "_ZGVnN4v_simple_16bit" "_ZGVnN8v_simple_16bit" #pragma omp declare simd int simple_32bit(int); // AARCH64: "_ZGVnM2v_simple_32bit" "_ZGVnM4v_simple_32bit" "_ZGVnN2v_simple_32bit" "_ZGVnN4v_simple_32bit" #pragma omp declare simd long simple_64bit(long); // AARCH64: "_ZGVnM2v_simple_64bit" "_ZGVnN2v_simple_64bit" #pragma omp declare simd #pragma omp declare simd simdlen(32) char a01(int x); // AARCH64: "_ZGVnN16v_a01" "_ZGVnN32v_a01" "_ZGVnN8v_a01" // AARCH64-NOT: a01 #pragma omp declare simd #pragma omp declare simd simdlen(2) long a02(short x); // AARCH64: "_ZGVnN2v_a02" "_ZGVnN4v_a02" "_ZGVnN8v_a02" // AARCH64-NOT: a02 /************/ /* pointers */ /************/ #pragma omp declare simd int b01(int *x); // AARCH64: "_ZGVnN4v_b01" // AARCH64-NOT: b01 #pragma omp declare simd char b02(char *); // AARCH64: "_ZGVnN16v_b02" "_ZGVnN8v_b02" // AARCH64-NOT: b02 #pragma omp declare simd double *b03(double *); // AARCH64: "_ZGVnN2v_b03" // AARCH64-NOT: b03 /***********/ /* masking */ /***********/ #pragma omp declare simd inbranch int c01(double *x, short y); // AARCH64: "_ZGVnM8vv_c01" // AARCH64-NOT: c01 #pragma omp declare simd inbranch uniform(x) double c02(double *x, char y); // AARCH64: "_ZGVnM16uv_c02" "_ZGVnM8uv_c02" // AARCH64-NOT: c02 /*************************/ /* sincos-like signature */ /*************************/ #pragma omp declare simd linear(sin) linear(cos) void sincos(double in, double *sin, double *cos); // AARCH64: "_ZGVnN2vll_sincos" // AARCH64-NOT: sincos #pragma omp declare simd linear(sin : 1) linear(cos : 2) void SinCos(double in, double *sin, double *cos); // AARCH64: "_ZGVnN2vll2_SinCos" // AARCH64-NOT: SinCos // Selection of tests based on the examples provided in chapter 5 of // the Vector Function ABI specifications for AArch64, at // https://developer.arm.com/products/software-development-tools/hpc/arm-compiler-for-hpc/vector-function-abi. // Listing 2, p. 18 #pragma omp declare simd inbranch uniform(x) linear(val(i) : 4) int foo2(int *x, int i); // AARCH64: "_ZGVnM2ul4_foo2" "_ZGVnM4ul4_foo2" // AARCH64-NOT: foo2 // Listing 3, p. 18 #pragma omp declare simd inbranch uniform(x, c) linear(i \ : c) int foo3(int *x, int i, unsigned char c); // AARCH64: "_ZGVnM16uls2u_foo3" "_ZGVnM8uls2u_foo3" // AARCH64-NOT: foo3 // Listing 6, p. 19 #pragma omp declare simd linear(x) aligned(x : 16) simdlen(4) int foo4(int *x, float y); // AARCH64: "_ZGVnM4la16v_foo4" "_ZGVnN4la16v_foo4" // AARCH64-NOT: foo4 static int *I; static char *C; static short *S; static long *L; static float *F; static double *D; void do_something() { simple_8bit(*C); simple_16bit(*S); simple_32bit(*I); simple_64bit(*L); *C = a01(*I); *L = a02(*S); *I = b01(I); *C = b02(C); D = b03(D); *I = c01(D, *S); *D = c02(D, *S); sincos(*D, D, D); SinCos(*D, D, D); foo2(I, *I); foo3(I, *I, *C); foo4(I, *F); } typedef struct S { char R, G, B; } STy; #pragma omp declare simd notinbranch STy DoRGB(STy x); // AARCH64: "_ZGVnN2v_DoRGB" static STy *RGBData; void do_rgb_stuff() { DoRGB(*RGBData); }
GB_AxB_dot3.c
//------------------------------------------------------------------------------ // GB_AxB_dot3: compute C<M> = A'*B in parallel //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // This function only computes C<M>=A'*B. The mask must be present, and not // complemented. The mask is always applied. #include "GB_mxm.h" #ifndef GBCOMPACT #include "GB_AxB__include.h" #endif #define GB_FREE_WORK \ { \ GB_FREE_MEMORY (TaskList, max_ntasks+1, sizeof (GB_task_struct)) ; \ } #define GB_FREE_ALL \ { \ GB_FREE_WORK ; \ GB_MATRIX_FREE (Chandle) ; \ } GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only GrB_Info GB_AxB_dot3 // C<M> = A'*B using dot product method ( GrB_Matrix *Chandle, // output matrix const GrB_Matrix M, // mask matrix const bool Mask_struct, // if true, use the only structure of M const GrB_Matrix A, // input matrix const GrB_Matrix B, // input matrix const GrB_Semiring semiring, // semiring that defines C=A*B const bool flipxy, // if true, do z=fmult(b,a) vs fmult(a,b) GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- GrB_Info info ; ASSERT (Chandle != NULL) ; ASSERT (*Chandle == NULL) ; ASSERT_MATRIX_OK (M, "M for dot3 A'*B", GB0) ; ASSERT_MATRIX_OK (A, "A for dot3 A'*B", GB0) ; ASSERT_MATRIX_OK (B, "B for dot3 A'*B", GB0) ; ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ; ASSERT_SEMIRING_OK (semiring, "semiring for numeric A'*B", GB0) ; ASSERT (A->vlen == B->vlen) ; int ntasks, max_ntasks = 0, nthreads ; GB_task_struct *TaskList = NULL ; //-------------------------------------------------------------------------- // get the semiring operators //-------------------------------------------------------------------------- GrB_BinaryOp mult = semiring->multiply ; GrB_Monoid add = semiring->add ; ASSERT (mult->ztype == add->op->ztype) ; bool op_is_first = mult->opcode == GB_FIRST_opcode ; bool op_is_second = mult->opcode == GB_SECOND_opcode ; bool op_is_pair = mult->opcode == GB_PAIR_opcode ; bool A_is_pattern = false ; bool B_is_pattern = false ; if (flipxy) { // z = fmult (b,a) will be computed A_is_pattern = op_is_first || op_is_pair ; B_is_pattern = op_is_second || op_is_pair ; ASSERT (GB_IMPLIES (!A_is_pattern, GB_Type_compatible (A->type, mult->ytype))) ; ASSERT (GB_IMPLIES (!B_is_pattern, GB_Type_compatible (B->type, mult->xtype))) ; } else { // z = fmult (a,b) will be computed A_is_pattern = op_is_second || op_is_pair ; B_is_pattern = op_is_first || op_is_pair ; ASSERT (GB_IMPLIES (!A_is_pattern, GB_Type_compatible (A->type, mult->xtype))) ; ASSERT (GB_IMPLIES (!B_is_pattern, GB_Type_compatible (B->type, mult->ytype))) ; } (*Chandle) = NULL ; //-------------------------------------------------------------------------- // get M, A, and B //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Mp = M->p ; const int64_t *GB_RESTRICT Mh = M->h ; const int64_t *GB_RESTRICT Mi = M->i ; const GB_void *GB_RESTRICT Mx = (Mask_struct ? NULL : (M->x)) ; const size_t msize = M->type->size ; const int64_t mvlen = M->vlen ; const int64_t mvdim = M->vdim ; const int64_t mnz = GB_NNZ (M) ; const int64_t mnvec = M->nvec ; const bool M_is_hyper = M->is_hyper ; const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ah = A->h ; // const int64_t *GB_RESTRICT Ai = A->i ; // const int64_t avlen = A->vlen ; // const int64_t avdim = A->vdim ; // const int64_t anz = GB_NNZ (A) ; const int64_t anvec = A->nvec ; const bool A_is_hyper = A->is_hyper ; const int64_t *GB_RESTRICT Bp = B->p ; const int64_t *GB_RESTRICT Bh = B->h ; // const int64_t *GB_RESTRICT Bi = B->i ; // const int64_t bvlen = B->vlen ; // const int64_t bvdim = B->vdim ; // const int64_t bnz = GB_NNZ (B) ; const int64_t bnvec = B->nvec ; const bool B_is_hyper = B->is_hyper ; //-------------------------------------------------------------------------- // allocate C, the same size and # of entries as M //-------------------------------------------------------------------------- GrB_Type ctype = add->op->ztype ; int64_t cvlen = mvlen ; int64_t cvdim = mvdim ; int64_t cnz = mnz ; int64_t cnvec = mnvec ; GB_CREATE (Chandle, ctype, cvlen, cvdim, GB_Ap_malloc, true, GB_SAME_HYPER_AS (M_is_hyper), M->hyper_ratio, cnvec, cnz+1, // add one to cnz for GB_cumsum of Cwork in GB_AxB_dot3_slice true, Context) ; if (info != GrB_SUCCESS) { // out of memory GB_FREE_ALL ; return (info) ; } GrB_Matrix C = (*Chandle) ; int64_t *GB_RESTRICT Cp = C->p ; int64_t *GB_RESTRICT Ch = C->h ; int64_t *GB_RESTRICT Cwork = C->i ; // use C->i as workspace //-------------------------------------------------------------------------- // determine the # of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; //-------------------------------------------------------------------------- // copy Mp and Mh into C //-------------------------------------------------------------------------- // FUTURE:: C->p and C->h could be shallow copies of M->p and M->h, which // could same some time and memory if C is then, say, transposed by // GB_accum_mask later on. nthreads = GB_nthreads (cnvec, chunk, nthreads_max) ; GB_memcpy (Cp, Mp, (cnvec+1) * sizeof (int64_t), nthreads) ; if (M_is_hyper) { GB_memcpy (Ch, Mh, cnvec * sizeof (int64_t), nthreads) ; } C->magic = GB_MAGIC ; C->nvec_nonempty = M->nvec_nonempty ; C->nvec = M->nvec ; //-------------------------------------------------------------------------- // construct the tasks for the first phase //-------------------------------------------------------------------------- nthreads = GB_nthreads (cnz, chunk, nthreads_max) ; GB_OK (GB_AxB_dot3_one_slice (&TaskList, &max_ntasks, &ntasks, &nthreads, M, Context)) ; //-------------------------------------------------------------------------- // phase1: estimate the work to compute each entry in C //-------------------------------------------------------------------------- // The work to compute C(i,j) is held in Cwork [p], if C(i,j) appears in // as the pth entry in C. int taskid; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- // GB_GET_TASK_DESCRIPTOR ; int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; bool fine_task = (klast == -1) ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; } int64_t bpleft = 0 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of C and M //------------------------------------------------------------------ int64_t j = (Mh == NULL) ? k : Mh [k] ; GB_GET_VECTOR (pM, pM_end, pM, pM_end, Mp, k) ; //------------------------------------------------------------------ // get B(:,j) //------------------------------------------------------------------ int64_t pB, pB_end ; GB_lookup (B_is_hyper, Bh, Bp, &bpleft, bnvec-1, j, &pB, &pB_end) ; int64_t bjnz = pB_end - pB ; //------------------------------------------------------------------ // estimate the work to compute each entry of C(:,j) //------------------------------------------------------------------ // A decent estimate of the work to compute the dot product C(i,j) // = A(:,i)'*B(:,j) is min (|A(:,i)|, |B(:,j)|) + 1. This is a // lower bound. The actual work could require a binary search of // either A(:,i) or B(:,j), or a merge of the two vectors. Or it // could require no work at all if all entries in A(:,i) appear // before all entries in B(:,j), or visa versa. No work is done if // M(i,j)=0. A more accurate estimate is possible to compute, // following the different methods used in // Template/GB_AxB_dot_cij.c. if (bjnz == 0) { // B(:,j) is empty, so C(:,j) is empty as well. No work is to // be done, but it still takes unit work to flag each C(:,j) as // a zombie for ( ; pM < pM_end ; pM++) { Cwork [pM] = 1 ; } } else { int64_t apleft = 0 ; for ( ; pM < pM_end ; pM++) { int64_t work = 1 ; if (GB_mcast (Mx, pM, msize)) { int64_t pA, pA_end, i = Mi [pM] ; GB_lookup (A_is_hyper, Ah, Ap, &apleft, anvec-1, i, &pA, &pA_end) ; int64_t ajnz = pA_end - pA ; work += GB_IMIN (ajnz, bjnz) ; } Cwork [pM] = work ; } } } } //-------------------------------------------------------------------------- // free the current tasks and construct the tasks for the second phase //-------------------------------------------------------------------------- GB_FREE_MEMORY (TaskList, max_ntasks+1, sizeof (GB_task_struct)) ; GB_OK (GB_AxB_dot3_slice (&TaskList, &max_ntasks, &ntasks, &nthreads, C, Context)) ; GBBURBLE ("nthreads %d ntasks %d ", nthreads, ntasks) ; //-------------------------------------------------------------------------- // C<M> = A'*B, via masked dot product method and built-in semiring //-------------------------------------------------------------------------- bool done = false ; #ifndef GBCOMPACT //-------------------------------------------------------------------------- // define the worker for the switch factory //-------------------------------------------------------------------------- #define GB_Adot3B(add,mult,xyname) GB_Adot3B_ ## add ## mult ## xyname #define GB_AxB_WORKER(add,mult,xyname) \ { \ info = GB_Adot3B (add,mult,xyname) (C, M, Mask_struct, \ A, A_is_pattern, B, B_is_pattern, \ TaskList, ntasks, nthreads) ; \ done = (info != GrB_NO_VALUE) ; \ } \ break ; //-------------------------------------------------------------------------- // launch the switch factory //-------------------------------------------------------------------------- GB_Opcode mult_opcode, add_opcode ; GB_Type_code xycode, zcode ; if (GB_AxB_semiring_builtin (A, A_is_pattern, B, B_is_pattern, semiring, flipxy, &mult_opcode, &add_opcode, &xycode, &zcode)) { #include "GB_AxB_factory.c" } #endif //-------------------------------------------------------------------------- // C<M> = A'*B, via masked dot product method and typecasting //-------------------------------------------------------------------------- if (!done) { GB_BURBLE_MATRIX (C, "generic ") ; //---------------------------------------------------------------------- // get operators, functions, workspace, contents of A, B, C, and M //---------------------------------------------------------------------- GxB_binary_function fmult = mult->function ; GxB_binary_function fadd = add->op->function ; size_t csize = C->type->size ; size_t asize = A_is_pattern ? 0 : A->type->size ; size_t bsize = B_is_pattern ? 0 : B->type->size ; size_t xsize = mult->xtype->size ; size_t ysize = mult->ytype->size ; // scalar workspace: because of typecasting, the x/y types need not // be the same as the size of the A and B types. // flipxy false: aki = (xtype) A(k,i) and bkj = (ytype) B(k,j) // flipxy true: aki = (ytype) A(k,i) and bkj = (xtype) B(k,j) size_t aki_size = flipxy ? ysize : xsize ; size_t bkj_size = flipxy ? xsize : ysize ; GB_void *GB_RESTRICT terminal = add->terminal ; GB_cast_function cast_A, cast_B ; if (flipxy) { // A is typecasted to y, and B is typecasted to x cast_A = A_is_pattern ? NULL : GB_cast_factory (mult->ytype->code, A->type->code) ; cast_B = B_is_pattern ? NULL : GB_cast_factory (mult->xtype->code, B->type->code) ; } else { // A is typecasted to x, and B is typecasted to y cast_A = A_is_pattern ? NULL : GB_cast_factory (mult->xtype->code, A->type->code) ; cast_B = B_is_pattern ? NULL : GB_cast_factory (mult->ytype->code, B->type->code) ; } //---------------------------------------------------------------------- // C<M> = A'*B via dot products, function pointers, and typecasting //---------------------------------------------------------------------- // aki = A(k,i), located in Ax [pA] #define GB_GETA(aki,Ax,pA) \ GB_void aki [GB_VLA(aki_size)] ; \ if (!A_is_pattern) cast_A (aki, Ax +((pA)*asize), asize) // bkj = B(k,j), located in Bx [pB] #define GB_GETB(bkj,Bx,pB) \ GB_void bkj [GB_VLA(bkj_size)] ; \ if (!B_is_pattern) cast_B (bkj, Bx +((pB)*bsize), bsize) // break if cij reaches the terminal value #define GB_DOT_TERMINAL(cij) \ if (terminal != NULL && memcmp (cij, terminal, csize) == 0) \ { \ break ; \ } // C(i,j) = A(i,k) * B(k,j) #define GB_MULT(cij, aki, bkj) \ GB_MULTIPLY (cij, aki, bkj) // C(i,j) += A(i,k) * B(k,j) #define GB_MULTADD(cij, aki, bkj) \ GB_void zwork [GB_VLA(csize)] ; \ GB_MULTIPLY (zwork, aki, bkj) ; \ fadd (cij, cij, zwork) // define cij for each task #define GB_CIJ_DECLARE(cij) \ GB_void cij [GB_VLA(csize)] // address of Cx [p] #define GB_CX(p) Cx +((p)*csize) // save the value of C(i,j) #define GB_CIJ_SAVE(cij,p) \ memcpy (GB_CX (p), cij, csize) #define GB_ATYPE GB_void #define GB_BTYPE GB_void #define GB_CTYPE GB_void // no vectorization #define GB_PRAGMA_VECTORIZE #define GB_PRAGMA_VECTORIZE_DOT if (flipxy) { #define GB_MULTIPLY(z,x,y) fmult (z,y,x) #include "GB_AxB_dot3_template.c" #undef GB_MULTIPLY } else { #define GB_MULTIPLY(z,x,y) fmult (z,x,y) #include "GB_AxB_dot3_template.c" #undef GB_MULTIPLY } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- if (C->nzombies > 0) { // C has been created with zombies, so place it in the queue GB_CRITICAL (GB_queue_insert (C)) ; } GB_FREE_WORK ; ASSERT_MATRIX_OK (C, "dot3: C<M> = A'*B output", GB0) ; ASSERT (*Chandle == C) ; ASSERT (GB_ZOMBIES_OK (C)) ; ASSERT (!GB_PENDING (C)) ; return (GrB_SUCCESS) ; }
evolve_kepler.c
/* * The Kepler solver evolves the two-body problem, using standard Huayno sys structures. * TODO add warning when using the kepler solver with softening? */ #include <stdio.h> #include <stdlib.h> #include "evolve.h" #include "universal_kepler_solver.h" #ifdef _OPENMP #include <omp.h> #endif static void evolve_kepler_2(int clevel,struct sys s, DOUBLE stime, DOUBLE etime, DOUBLE dt) { struct particle *ipart,*jpart; CHECK_TIMESTEP(etime,stime,dt,clevel); if (s.n != 2) ENDRUN("two-body solver was called with sys.n=%u\n", s.n); // translate coordinates original frame to 2-body frame int k; ipart=GETPART(s,0); jpart=GETPART(s,1); DOUBLE dpos[3],dpos0[3],pos_cm[3]; DOUBLE dvel[3],dvel0[3],vel_cm[3]; DOUBLE m1 = ipart->mass; DOUBLE m2 = jpart->mass; DOUBLE mtot = ipart->mass + jpart->mass; DOUBLE f1 = m2 / mtot; DOUBLE f2 = m1 / mtot; if(mtot>0.) { for(k=0;k<3;k++) dpos0[k] = ipart->pos[k] - jpart->pos[k]; for(k=0;k<3;k++) dvel0[k] = ipart->vel[k] - jpart->vel[k]; for(k=0;k<3;k++) pos_cm[k] = (m1 * ipart->pos[k] + m2 * jpart->pos[k]) / mtot; for(k=0;k<3;k++) vel_cm[k] = (m1 * ipart->vel[k] + m2 * jpart->vel[k]) / mtot; // evolve center of mass for dt for(k=0;k<3;k++) pos_cm[k] += vel_cm[k] * dt; // call kepler solver int err=universal_kepler_solver(dt,mtot,eps2, dpos0[0],dpos0[1],dpos0[2], dvel0[0],dvel0[1],dvel0[2], &dpos[0],&dpos[1],&dpos[2], &dvel[0],&dvel[1],&dvel[2]); if (err != 0) ENDRUN("kepler solver failure"); // failure of the kepler solver should be very rare now // translate coordinates from 2-body frame to original frame for(k=0;k<3;k++) ipart->pos[k] = pos_cm[k] + f1 * dpos[k]; for(k=0;k<3;k++) ipart->vel[k] = vel_cm[k] + f1 * dvel[k]; for(k=0;k<3;k++) jpart->pos[k] = pos_cm[k] - f2 * dpos[k]; for(k=0;k<3;k++) jpart->vel[k] = vel_cm[k] - f2 * dvel[k]; #ifdef COMPENSATED_SUMMP for(k=0;k<3;k++) ipart->pos_e[k]=0.; for(k=0;k<3;k++) jpart->pos_e[k]=0.; #endif #ifdef COMPENSATED_SUMMV for(k=0;k<3;k++) ipart->vel_e[k]=0.; for(k=0;k<3;k++) jpart->vel_e[k]=0.; #endif } else { for(k=0;k<3;k++) COMPSUMP(ipart->pos[k],ipart->pos_e[k],dt*ipart->vel[k]); for(k=0;k<3;k++) COMPSUMP(jpart->pos[k],jpart->pos_e[k],dt*jpart->vel[k]); } ipart->postime=etime; jpart->postime=etime; diag->cecount[clevel]++; } static void evolve_kepler_n(int clevel,struct sys s, DOUBLE stime, DOUBLE etime, DOUBLE dt) { struct particle *ipart, *spart; DOUBLE dpos[3],dpos0[3],spos[3]; DOUBLE dvel[3],dvel0[3]; UINT err; CHECK_TIMESTEP(etime,stime,dt,clevel); if (s.n-s.nzero > 1) ENDRUN("kepler-n solver was called with too many massive particles sys.n=%u\n", s.n-s.nzero); if (s.n-s.nzero < 1) ENDRUN("kepler-n solver was called with too few massive particles sys.n=%u\n", s.n-s.nzero); if(s.n==s.nzero) { drift(clevel,s,etime, dt); return; } spart=GETPART(s,0); for(int k=0;k<3;k++) spos[k]= spart->pos[k]; // save initial pos for(int k=0;k<3;k++) COMPSUMP(spart->pos[k],spart->pos_e[k],dt*spart->vel[k]); //evolve central spart->postime=etime; err=0; #pragma omp parallel for if((ULONG) s.n>omp_get_num_threads() && !omp_in_parallel()) default(none) \ private(ipart, dpos,dvel,dpos0,dvel0) shared(etime,clevel, dt,spos, s, eps2, spart) reduction(|: err) for(UINT i=1;i<s.n;i++) { ipart=GETPART(s,i); for(int k=0;k<3;k++) dpos0[k] = spos[k] - ipart->pos[k]; for(int k=0;k<3;k++) dvel0[k] = spart->vel[k] - ipart->vel[k]; err|=universal_kepler_solver(dt,spart->mass,eps2, dpos0[0],dpos0[1],dpos0[2], dvel0[0],dvel0[1],dvel0[2], &dpos[0],&dpos[1],&dpos[2], &dvel[0],&dvel[1],&dvel[2]); for(int k=0;k<3;k++) ipart->pos[k] = spart->pos[k] - dpos[k]; for(int k=0;k<3;k++) ipart->vel[k] = spart->vel[k] - dvel[k]; #ifdef COMPENSATED_SUMMP for(int k=0;k<3;k++) ipart->pos_e[k]=0.; #endif #ifdef COMPENSATED_SUMMV for(int k=0;k<3;k++) ipart->vel_e[k]=0.; #endif ipart->postime=etime; } if (err != 0) { ENDRUN("kepler solver failure"); // failure of the kepler solver should be very rare now } diag->cecount[clevel]+=s.nzero; } void evolve_kepler(int clevel,struct sys s, DOUBLE stime, DOUBLE etime, DOUBLE dt) { if(s.n-s.nzero==2) // 2 body { evolve_kepler_2(clevel,s,stime,etime,dt); return; } if(s.n-s.nzero==1 && s.nzero>0) // 1 massive, n orbiters { evolve_kepler_n(clevel,s,stime,etime,dt); return; } if(s.n-s.nzero>1) // more than 1 massive particle, consider heaviest as central; { ENDRUN("evolve_kepler called for a system with more than 1 massive particle"); return; } drift(clevel,s,etime, dt); // 1 massive or only zero mass }
conv_dw_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: qtang@openailab.com */ #include <stdint.h> #include <stdlib.h> #include <math.h> #include "conv_dw_kernel_x86.h" #if __SSE2__ #include <emmintrin.h> #endif #if __AVX__ #include <immintrin.h> #endif #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) static void relu(float* data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = max(data[i], ( float )0); if (activation > 0) { data[i] = min(data[i], ( float )activation); } } } static void pad(float* input, float* output, int in_h, int in_w, int out_h, int out_w, int top, int left, float v) { float* ptr = input; float* outptr = output; int y = 0; // fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } // fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, in_w * sizeof(float)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } // fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } #if __AVX__ static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 3; int channel_remain = inc - (channel_count << 3); // generate the image tmp float* img_tmp = ( float* )sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 8; const float* k0 = img_data + (ii + 0) * inwh; const float* k1 = img_data + (ii + 1) * inwh; const float* k2 = img_data + (ii + 2) * inwh; const float* k3 = img_data + (ii + 3) * inwh; const float* k4 = img_data + (ii + 4) * inwh; const float* k5 = img_data + (ii + 5) * inwh; const float* k6 = img_data + (ii + 6) * inwh; const float* k7 = img_data + (ii + 7) * inwh; const float* f0 = kernel_data + (ii + 0) * 9; const float* f1 = kernel_data + (ii + 1) * 9; const float* f2 = kernel_data + (ii + 2) * 9; const float* f3 = kernel_data + (ii + 3) * 9; const float* f4 = kernel_data + (ii + 4) * 9; const float* f5 = kernel_data + (ii + 5) * 9; const float* f6 = kernel_data + (ii + 6) * 9; const float* f7 = kernel_data + (ii + 7) * 9; const float* b0 = bias_data + (ii + 0); const float* b1 = bias_data + (ii + 1); const float* b2 = bias_data + (ii + 2); const float* b3 = bias_data + (ii + 3); const float* b4 = bias_data + (ii + 4); const float* b5 = bias_data + (ii + 5); const float* b6 = bias_data + (ii + 6); const float* b7 = bias_data + (ii + 7); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0[4] = k4[0]; tmp0[5] = k5[0]; tmp0[6] = k6[0]; tmp0[7] = k7[0]; tmp0 += 8; k0++; k1++; k2++; k3++; k4++; k5++; k6++; k7++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1[4] = f4[0]; tmp1[5] = f5[0]; tmp1[6] = f6[0]; tmp1[7] = f7[0]; tmp1 += 8; f0++; f1++; f2++; f3++; f4++; f5++; f6++; f7++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; tmp2[4] = b4[0]; tmp2[5] = b5[0]; tmp2[6] = b6[0]; tmp2[7] = b7[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; tmp2[4] = 0; tmp2[5] = 0; tmp2[6] = 0; tmp2[7] = 0; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 8; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 8; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + channel_count * 8; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 8; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 8; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 8 * 9; float* btmp = bias_tmp + c * 8; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * inw; float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i + 1) * inw; float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i + 2) * inw; float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw; for (; j + 7 < outw; j += 8) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _sum4 = _mm256_loadu_ps(btmp); __m256 _sum5 = _mm256_loadu_ps(btmp); __m256 _sum6 = _mm256_loadu_ps(btmp); __m256 _sum7 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _va6 = _mm256_loadu_ps(itmp0 + 48); __m256 _va7 = _mm256_loadu_ps(itmp0 + 56); __m256 _va8 = _mm256_loadu_ps(itmp0 + 64); __m256 _va9 = _mm256_loadu_ps(itmp0 + 72); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _va6 = _mm256_loadu_ps(itmp1 + 48); _va7 = _mm256_loadu_ps(itmp1 + 56); _va8 = _mm256_loadu_ps(itmp1 + 64); _va9 = _mm256_loadu_ps(itmp1 + 72); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _va6 = _mm256_loadu_ps(itmp2 + 48); _va7 = _mm256_loadu_ps(itmp2 + 56); _va8 = _mm256_loadu_ps(itmp2 + 64); _va9 = _mm256_loadu_ps(itmp2 + 72); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); _mm256_storeu_ps(otmp + 32, _sum4); _mm256_storeu_ps(otmp + 40, _sum5); _mm256_storeu_ps(otmp + 48, _sum6); _mm256_storeu_ps(otmp + 56, _sum7); itmp0 += 64; itmp1 += 64; itmp2 += 64; otmp += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _mm256_storeu_ps(otmp, _sum0); itmp0 += 8; itmp1 += 8; itmp2 += 8; otmp += 8; } } } // load_data { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 8 * outwh; float* tmp0 = output + i * 8 * outwh; float* tmp1 = output + i * 8 * outwh + 1 * outwh; float* tmp2 = output + i * 8 * outwh + 2 * outwh; float* tmp3 = output + i * 8 * outwh + 3 * outwh; float* tmp4 = output + i * 8 * outwh + 4 * outwh; float* tmp5 = output + i * 8 * outwh + 5 * outwh; float* tmp6 = output + i * 8 * outwh + 6 * outwh; float* tmp7 = output + i * 8 * outwh + 7 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; tmp4[0] = otmp[4]; tmp5[0] = otmp[5]; tmp6[0] = otmp[6]; tmp7[0] = otmp[7]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; tmp4++; tmp5++; tmp6++; tmp7++; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* otmp = output_tmp + ii * outwh; float* tmp0 = output + ii * outwh; float* tmp1 = output + ii * outwh + 1 * outwh; float* tmp2 = output + ii * outwh + 2 * outwh; float* tmp3 = output + ii * outwh + 3 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* otmp = output_tmp + channel_count * 8 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 8; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 3; int channel_remain = inc - (channel_count << 3); // generate the image tmp float* img_tmp = ( float* )sys_malloc(8 * inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 8; const float* k0 = img_data + (ii + 0) * inwh; const float* k1 = img_data + (ii + 1) * inwh; const float* k2 = img_data + (ii + 2) * inwh; const float* k3 = img_data + (ii + 3) * inwh; const float* k4 = img_data + (ii + 4) * inwh; const float* k5 = img_data + (ii + 5) * inwh; const float* k6 = img_data + (ii + 6) * inwh; const float* k7 = img_data + (ii + 7) * inwh; const float* f0 = kernel_data + (ii + 0) * 9; const float* f1 = kernel_data + (ii + 1) * 9; const float* f2 = kernel_data + (ii + 2) * 9; const float* f3 = kernel_data + (ii + 3) * 9; const float* f4 = kernel_data + (ii + 4) * 9; const float* f5 = kernel_data + (ii + 5) * 9; const float* f6 = kernel_data + (ii + 6) * 9; const float* f7 = kernel_data + (ii + 7) * 9; const float* b0 = bias_data + (ii + 0); const float* b1 = bias_data + (ii + 1); const float* b2 = bias_data + (ii + 2); const float* b3 = bias_data + (ii + 3); const float* b4 = bias_data + (ii + 4); const float* b5 = bias_data + (ii + 5); const float* b6 = bias_data + (ii + 6); const float* b7 = bias_data + (ii + 7); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0[4] = k4[0]; tmp0[5] = k5[0]; tmp0[6] = k6[0]; tmp0[7] = k7[0]; tmp0 += 8; k0++; k1++; k2++; k3++; k4++; k5++; k6++; k7++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1[4] = f4[0]; tmp1[5] = f5[0]; tmp1[6] = f6[0]; tmp1[7] = f7[0]; tmp1 += 8; f0++; f1++; f2++; f3++; f4++; f5++; f6++; f7++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; tmp2[4] = b4[0]; tmp2[5] = b5[0]; tmp2[6] = b6[0]; tmp2[7] = b7[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; tmp2[4] = 0; tmp2[5] = 0; tmp2[6] = 0; tmp2[7] = 0; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 8; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 8; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + channel_count * 8; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 8; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 8; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc(outwh * (channel_count + 1) * 8 * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 8 * 9; float* btmp = bias_tmp + c * 8; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * 2 * inw; float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 1) * inw; float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 2) * inw; float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _va6 = _mm256_loadu_ps(itmp0 + 48); __m256 _va7 = _mm256_loadu_ps(itmp0 + 56); __m256 _va8 = _mm256_loadu_ps(itmp0 + 64); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _va6 = _mm256_loadu_ps(itmp1 + 48); _va7 = _mm256_loadu_ps(itmp1 + 56); _va8 = _mm256_loadu_ps(itmp1 + 64); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _va6 = _mm256_loadu_ps(itmp2 + 48); _va7 = _mm256_loadu_ps(itmp2 + 56); _va8 = _mm256_loadu_ps(itmp2 + 64); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); itmp0 += 64; itmp1 += 64; itmp2 += 64; otmp += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _mm256_storeu_ps(otmp, _sum0); itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 8; } } } // load_data { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 8 * outwh; float* tmp0 = output + i * 8 * outwh; float* tmp1 = output + i * 8 * outwh + 1 * outwh; float* tmp2 = output + i * 8 * outwh + 2 * outwh; float* tmp3 = output + i * 8 * outwh + 3 * outwh; float* tmp4 = output + i * 8 * outwh + 4 * outwh; float* tmp5 = output + i * 8 * outwh + 5 * outwh; float* tmp6 = output + i * 8 * outwh + 6 * outwh; float* tmp7 = output + i * 8 * outwh + 7 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; tmp4[0] = otmp[4]; tmp5[0] = otmp[5]; tmp6[0] = otmp[6]; tmp7[0] = otmp[7]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; tmp4++; tmp5++; tmp6++; tmp7++; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* otmp = output_tmp + ii * outwh; float* tmp0 = output + ii * outwh; float* tmp1 = output + ii * outwh + 1 * outwh; float* tmp2 = output + ii * outwh + 2 * outwh; float* tmp3 = output + ii * outwh + 3 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* otmp = output_tmp + channel_count * 8 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 8; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } #elif __SSE2__ static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 2; int channel_remain = inc - (channel_count << 2); // generate the image tmp float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 4; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 4; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 4; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 4 * inwh; float* tmp1 = kernel_tmp + channel_count * 4 * 9; float* tmp2 = bias_tmp + channel_count * 4; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 4; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 4; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 4 * 9; float* btmp = bias_tmp + c * 4; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * inw; float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i + 1) * inw; float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i + 2) * inw; float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw; for (; j + 7 < outw; j += 8) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _sum4 = _mm_loadu_ps(btmp); __m128 _sum5 = _mm_loadu_ps(btmp); __m128 _sum6 = _mm_loadu_ps(btmp); __m128 _sum7 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _va6 = _mm_loadu_ps(itmp0 + 24); __m128 _va7 = _mm_loadu_ps(itmp0 + 28); __m128 _va8 = _mm_loadu_ps(itmp0 + 32); __m128 _va9 = _mm_loadu_ps(itmp0 + 36); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _va6 = _mm_loadu_ps(itmp1 + 24); _va7 = _mm_loadu_ps(itmp1 + 28); _va8 = _mm_loadu_ps(itmp1 + 32); _va9 = _mm_loadu_ps(itmp1 + 36); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _va6 = _mm_loadu_ps(itmp2 + 24); _va7 = _mm_loadu_ps(itmp2 + 28); _va8 = _mm_loadu_ps(itmp2 + 32); _va9 = _mm_loadu_ps(itmp2 + 36); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); _mm_storeu_ps(otmp + 16, _sum4); _mm_storeu_ps(otmp + 20, _sum5); _mm_storeu_ps(otmp + 24, _sum6); _mm_storeu_ps(otmp + 28, _sum7); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum4[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum5[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum6[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum7[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 4] * ktmp[k]; sum1[k] += itmp1[k + 4] * ktmp[k + 12]; sum1[k] += itmp2[k + 4] * ktmp[k + 24]; sum1[k] += itmp0[k + 8] * ktmp[k + 4]; sum1[k] += itmp1[k + 8] * ktmp[k + 16]; sum1[k] += itmp2[k + 8] * ktmp[k + 28]; sum1[k] += itmp0[k + 12] * ktmp[k + 8]; sum1[k] += itmp1[k + 12] * ktmp[k + 20]; sum1[k] += itmp2[k + 12] * ktmp[k + 32]; sum2[k] += itmp0[k + 8] * ktmp[k]; sum2[k] += itmp1[k + 8] * ktmp[k + 12]; sum2[k] += itmp2[k + 8] * ktmp[k + 24]; sum2[k] += itmp0[k + 12] * ktmp[k + 4]; sum2[k] += itmp1[k + 12] * ktmp[k + 16]; sum2[k] += itmp2[k + 12] * ktmp[k + 28]; sum2[k] += itmp0[k + 16] * ktmp[k + 8]; sum2[k] += itmp1[k + 16] * ktmp[k + 20]; sum2[k] += itmp2[k + 16] * ktmp[k + 32]; sum3[k] += itmp0[k + 12] * ktmp[k]; sum3[k] += itmp1[k + 12] * ktmp[k + 12]; sum3[k] += itmp2[k + 12] * ktmp[k + 24]; sum3[k] += itmp0[k + 16] * ktmp[k + 4]; sum3[k] += itmp1[k + 16] * ktmp[k + 16]; sum3[k] += itmp2[k + 16] * ktmp[k + 28]; sum3[k] += itmp0[k + 20] * ktmp[k + 8]; sum3[k] += itmp1[k + 20] * ktmp[k + 20]; sum3[k] += itmp2[k + 20] * ktmp[k + 32]; sum4[k] += itmp0[k + 16] * ktmp[k]; sum4[k] += itmp1[k + 16] * ktmp[k + 12]; sum4[k] += itmp2[k + 16] * ktmp[k + 24]; sum4[k] += itmp0[k + 20] * ktmp[k + 4]; sum4[k] += itmp1[k + 20] * ktmp[k + 16]; sum4[k] += itmp2[k + 20] * ktmp[k + 28]; sum4[k] += itmp0[k + 24] * ktmp[k + 8]; sum4[k] += itmp1[k + 24] * ktmp[k + 20]; sum4[k] += itmp2[k + 24] * ktmp[k + 32]; sum5[k] += itmp0[k + 20] * ktmp[k]; sum5[k] += itmp1[k + 20] * ktmp[k + 12]; sum5[k] += itmp2[k + 20] * ktmp[k + 24]; sum5[k] += itmp0[k + 24] * ktmp[k + 4]; sum5[k] += itmp1[k + 24] * ktmp[k + 16]; sum5[k] += itmp2[k + 24] * ktmp[k + 28]; sum5[k] += itmp0[k + 28] * ktmp[k + 8]; sum5[k] += itmp1[k + 28] * ktmp[k + 20]; sum5[k] += itmp2[k + 28] * ktmp[k + 32]; sum6[k] += itmp0[k + 24] * ktmp[k]; sum6[k] += itmp1[k + 24] * ktmp[k + 12]; sum6[k] += itmp2[k + 24] * ktmp[k + 24]; sum6[k] += itmp0[k + 28] * ktmp[k + 4]; sum6[k] += itmp1[k + 28] * ktmp[k + 16]; sum6[k] += itmp2[k + 28] * ktmp[k + 28]; sum6[k] += itmp0[k + 32] * ktmp[k + 8]; sum6[k] += itmp1[k + 32] * ktmp[k + 20]; sum6[k] += itmp2[k + 32] * ktmp[k + 32]; sum7[k] += itmp0[k + 28] * ktmp[k]; sum7[k] += itmp1[k + 28] * ktmp[k + 12]; sum7[k] += itmp2[k + 28] * ktmp[k + 24]; sum7[k] += itmp0[k + 32] * ktmp[k + 4]; sum7[k] += itmp1[k + 32] * ktmp[k + 16]; sum7[k] += itmp2[k + 32] * ktmp[k + 28]; sum7[k] += itmp0[k + 36] * ktmp[k + 8]; sum7[k] += itmp1[k + 36] * ktmp[k + 20]; sum7[k] += itmp2[k + 36] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; otmp[k + 16] = sum4[k]; otmp[k + 20] = sum5[k]; otmp[k + 24] = sum6[k]; otmp[k + 28] = sum7[k]; } #endif itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 32; } for (; j + 3 < outw; j += 4) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 4] * ktmp[k]; sum1[k] += itmp1[k + 4] * ktmp[k + 12]; sum1[k] += itmp2[k + 4] * ktmp[k + 24]; sum1[k] += itmp0[k + 8] * ktmp[k + 4]; sum1[k] += itmp1[k + 8] * ktmp[k + 16]; sum1[k] += itmp2[k + 8] * ktmp[k + 28]; sum1[k] += itmp0[k + 12] * ktmp[k + 8]; sum1[k] += itmp1[k + 12] * ktmp[k + 20]; sum1[k] += itmp2[k + 12] * ktmp[k + 32]; sum2[k] += itmp0[k + 8] * ktmp[k]; sum2[k] += itmp1[k + 8] * ktmp[k + 12]; sum2[k] += itmp2[k + 8] * ktmp[k + 24]; sum2[k] += itmp0[k + 12] * ktmp[k + 4]; sum2[k] += itmp1[k + 12] * ktmp[k + 16]; sum2[k] += itmp2[k + 12] * ktmp[k + 28]; sum2[k] += itmp0[k + 16] * ktmp[k + 8]; sum2[k] += itmp1[k + 16] * ktmp[k + 20]; sum2[k] += itmp2[k + 16] * ktmp[k + 32]; sum3[k] += itmp0[k + 12] * ktmp[k]; sum3[k] += itmp1[k + 12] * ktmp[k + 12]; sum3[k] += itmp2[k + 12] * ktmp[k + 24]; sum3[k] += itmp0[k + 16] * ktmp[k + 4]; sum3[k] += itmp1[k + 16] * ktmp[k + 16]; sum3[k] += itmp2[k + 16] * ktmp[k + 28]; sum3[k] += itmp0[k + 20] * ktmp[k + 8]; sum3[k] += itmp1[k + 20] * ktmp[k + 20]; sum3[k] += itmp2[k + 20] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; } #endif itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 16; } for (; j < outw; j++) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _mm_storeu_ps(otmp, _sum0); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; } #endif itmp0 += 4; itmp1 += 4; itmp2 += 4; otmp += 4; } } } { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 4 * outwh; float* tmp0 = output + i * 4 * outwh; float* tmp1 = output + i * 4 * outwh + 1 * outwh; float* tmp2 = output + i * 4 * outwh + 2 * outwh; float* tmp3 = output + i * 4 * outwh + 3 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 4; tmp0++; tmp1++; tmp2++; tmp3++; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* otmp = output_tmp + channel_count * 4 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 4; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 2; int channel_remain = inc - (channel_count << 2); // generate the image tmp float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 4; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 4; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 4; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 4 * inwh; float* tmp1 = kernel_tmp + channel_count * 4 * 9; float* tmp2 = bias_tmp + channel_count * 4; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 4; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 4; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 4 * 9; float* btmp = bias_tmp + c * 4; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * 2 * inw; float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 1) * inw; float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 2) * inw; float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw; for (; j + 3 < outw; j += 4) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _va6 = _mm_loadu_ps(itmp0 + 24); __m128 _va7 = _mm_loadu_ps(itmp0 + 28); __m128 _va8 = _mm_loadu_ps(itmp0 + 32); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _va6 = _mm_loadu_ps(itmp1 + 24); _va7 = _mm_loadu_ps(itmp1 + 28); _va8 = _mm_loadu_ps(itmp1 + 32); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _va6 = _mm_loadu_ps(itmp2 + 24); _va7 = _mm_loadu_ps(itmp2 + 28); _va8 = _mm_loadu_ps(itmp2 + 32); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 8] * ktmp[k]; sum1[k] += itmp1[k + 8] * ktmp[k + 12]; sum1[k] += itmp2[k + 8] * ktmp[k + 24]; sum1[k] += itmp0[k + 12] * ktmp[k + 4]; sum1[k] += itmp1[k + 12] * ktmp[k + 16]; sum1[k] += itmp2[k + 12] * ktmp[k + 28]; sum1[k] += itmp0[k + 16] * ktmp[k + 8]; sum1[k] += itmp1[k + 16] * ktmp[k + 20]; sum1[k] += itmp2[k + 16] * ktmp[k + 32]; sum2[k] += itmp0[k + 16] * ktmp[k]; sum2[k] += itmp1[k + 16] * ktmp[k + 12]; sum2[k] += itmp2[k + 16] * ktmp[k + 24]; sum2[k] += itmp0[k + 20] * ktmp[k + 4]; sum2[k] += itmp1[k + 20] * ktmp[k + 16]; sum2[k] += itmp2[k + 20] * ktmp[k + 28]; sum2[k] += itmp0[k + 24] * ktmp[k + 8]; sum2[k] += itmp1[k + 24] * ktmp[k + 20]; sum2[k] += itmp2[k + 24] * ktmp[k + 32]; sum3[k] += itmp0[k + 24] * ktmp[k]; sum3[k] += itmp1[k + 24] * ktmp[k + 12]; sum3[k] += itmp2[k + 24] * ktmp[k + 24]; sum3[k] += itmp0[k + 28] * ktmp[k + 4]; sum3[k] += itmp1[k + 28] * ktmp[k + 16]; sum3[k] += itmp2[k + 28] * ktmp[k + 28]; sum3[k] += itmp0[k + 32] * ktmp[k + 8]; sum3[k] += itmp1[k + 32] * ktmp[k + 20]; sum3[k] += itmp2[k + 32] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; } #endif itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 16; } for (; j < outw; j++) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _mm_storeu_ps(otmp, _sum0); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; } #endif itmp0 += 8; itmp1 += 8; itmp2 += 8; otmp += 4; } } } { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 4 * outwh; float* tmp0 = output + i * 4 * outwh; float* tmp1 = output + i * 4 * outwh + 1 * outwh; float* tmp2 = output + i * 4 * outwh + 2 * outwh; float* tmp3 = output + i * 4 * outwh + 3 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 4; tmp0++; tmp1++; tmp2++; tmp3++; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* otmp = output_tmp + channel_count * 4 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 4; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } #else static void convdw3x3s1(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const float* kernel = _kernel; #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float* out = output + g * c_step_out; float* outptr = out; float* outptr2 = outptr + outw; const float bias0 = _bias ? _bias[g] : 0.f; const float* kernel0 = kernel + g * 9; const float* img0 = input + g * c_step_in; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float* out = output + g * c_step_out; float* outptr = out; const float* kernel0 = kernel + g * 9; const float bias0 = _bias ? _bias[g] : 0.f; const float* img0 = input + g * c_step_in; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } #endif int conv_dw_run(struct ir_tensor* input_tensor, struct ir_tensor* weight_tensor, struct ir_tensor* bias_tensor, struct ir_tensor* output_tensor, struct conv_priv_info* conv_info, struct conv_param* param, int num_thread, int cpu_affinity) { float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float* kernel = ( float* )weight_tensor->data; float* biases = NULL; if (bias_tensor) biases = ( float* )bias_tensor->data; int batch_number = input_tensor->dims[0]; int inc = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_chw = inc * inh * inw; int outc = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_chw = out_hw * outc; int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int stride_w = param->stride_w; int stride_h = param->stride_h; int dilation_w = param->dilation_w; int dilation_h = param->dilation_h; int group = param->group; int activation = param->activation; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; float* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input; else { input_tmp = ( float* )sys_malloc(inh_tmp * inw_tmp * group * sizeof(float)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float* pad_in = input + g * inh * inw; float* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0.f); } } /* process */ for (int i = 0; i < batch_number; i++) { if (stride_h == 1) convdw3x3s1(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); else convdw3x3s2(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); } /* relu */ if (activation >= 0) relu(output, batch_number * out_chw, activation); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; }
hello.c
#include <stdio.h> #include <omp.h> int main(int argc, char **argv) { #pragma omp parallel { printf("Hello, multithreaded world: thread %d of %d\n", omp_get_thread_num(), omp_get_num_threads()); } return 0; }
A1_3.c
// gcc -std=c99 -Wall -lm -fopenmp -o go A1_3.c #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <time.h> void display(int, int *); void swap(int *, int *); unsigned long long tick(void) { unsigned long long d; __asm__ __volatile__("rdtsc": "=A"(d)); return d; } int main(int argc, char *argv[]) { int n = 10; int* v = (int *)malloc(n); // initialize random seed based on current time srand((unsigned) time(NULL)); for (int i = 0; i < n; ++i) { v[i] = rand() % 20; //RAND_MAX; } display(n, v); unsigned long long start = tick(); // Gnome sort (dubbed stupid sort) int i = 1, j = 2; while (i < n) { if (v[i - 1] < v[i]) { i = j++; } else { // swap [i-1] and [i] swap(&v[i], &v[i-1]); if (--i == 0) i = j++; } } // #pragma omp parallel for private(i, j) // { // for // // parallelize this for loop // #pragma omp for // } double time = (double)(tick() - start); display(n, v); printf("Time: %.2f\n", time); free(v); return 0; } void display(int n, int *v) { for (int i = 0; i < n; ++i) printf("%d\t", v[i]); printf("\n"); } void swap(int *x, int *y) { int z = *x; *x = *y; *y = z; }
MinCostFlow.h
#ifndef _MINCostFlow #define _MINCostFlow #include "strPrint.h" #include <algorithm> #include <vector> #include <queue> #include <assert.h> /** * @brief Min-Cost Flow algorithm for simple bipartite graph * */ class MinCostFlow { public: /** * @brief An edge is represented as a struct * destination - denotes the ending node of an edge. For example, 'v' in u-->v * capacity - the maximum capacity of an edge * residualFlow - the residual amount of flow that can flow through the edge * counterEdge - a pointer to the counter edge in residual graph for performance optimization */ struct Edge { int destination; int capacity; int residualFlow; float cost; Edge *counterEdge; }; /** * @brief A graph is represented as a struct * * numVertices - denotes the number of vertices in the graph * adj - Adjacency list : Collection of unordered lists one for each vertex */ struct Graph { int numVertices; std::vector<Edge *> *adj; }; MinCostFlow(int numVertices, int flow, int source, int destination) : numVertices(numVertices), flow(flow), source(source), destination(destination) { // Initialize the graphs resGraph.numVertices = numVertices; resGraph.adj = new std::vector<Edge *>[numVertices]; } ~MinCostFlow() { for (int u = 0; u < numVertices; u++) { for (unsigned int e = 0; e < resGraph.adj[u].size(); e++) { delete resGraph.adj[u][e]; } } delete[] resGraph.adj; } void addEdge(int tu, int tv, int tcap, float tcost) { Edge *tmpEdge1 = genEdge(tv, tcap, tcap, tcost + 0.01); // avoid edge cycle Edge *tmpEdge2 = genEdge(tu, tcap, 0, -tcost); tmpEdge1->counterEdge = tmpEdge2; tmpEdge2->counterEdge = tmpEdge1; resGraph.adj[tu].push_back(tmpEdge1); resGraph.adj[tv].push_back(tmpEdge2); } /** * @brief Calculates the cost of flow 'requiredFlow' from 's' to 't' * * @param s source * @param t sink * @param requiredFlow * @return float Returns 'MAX_VAL' if such a flow is not possible */ float calcMinCostFlow(int s, int t, int requiredFlow) { // int u = -1; int v = -1, currFlow = 0; float runningCost = 0; Edge *te1, *te2; // Run shortest path augmentation int parent[resGraph.numVertices]; Edge *parentEdge[resGraph.numVertices]; int stepProgress = requiredFlow / 100; if (stepProgress < 1) stepProgress = 1; while (SPFA(resGraph, s, t, parent, parentEdge)) { int path_flow = MAX_VAL; for (v = t; v != s; v = parent[v]) { assert(v >= 0); assert(v < numVertices); assert(v != parent[v]); // u = parent[v]; te1 = parentEdge[v]; path_flow = std::min(path_flow, te1->residualFlow); } path_flow = std::min(path_flow, requiredFlow - currFlow); for (v = t; v != s; v = parent[v]) { // u = parent[v]; te1 = parentEdge[v]; te2 = te1->counterEdge; te1->residualFlow -= path_flow; te2->residualFlow += path_flow; runningCost += path_flow * (te1->cost); } currFlow += path_flow; assert(path_flow > 0); // if (currFlow % (5 * stepProgress) == 0) // { // printProgress((double)currFlow / requiredFlow); // printf(" path_flow: %d", path_flow); // } if (currFlow == requiredFlow) { break; } } if (currFlow == requiredFlow) { return runningCost; } else { return MAX_VAL; } } Graph resGraph; private: int numVertices, flow, source, destination; int MAX_VAL = 200000000; float eps = 1e-5; // Generates a new edge (allocating space dynamically) and returns a pointed to the edge Edge *genEdge(int destination, int capacity, int residualFlow, float cost) { Edge *e1 = new Edge; e1->destination = destination; e1->capacity = capacity; e1->residualFlow = residualFlow; e1->cost = cost; return e1; } /** * @brief Finds the shortest path from source to sink * * @param resGraph input graph * @param source source node * @param sink sink node * @param parentVertex parentVertex and parentEdge are updated and can be used to reconstruct the shortest path * @param parentEdge parentVertex and parentEdge are updated and can be used to reconstruct the shortest path * @return true if there is a path from source to sink * @return false if no path exists from source to sink */ bool SPFA(Graph resGraph, int source, int sink, int parentVertex[], Edge *parentEdge[]) { // Initialize variables that will be needed int numVertices = resGraph.numVertices; std::vector<Edge *> *adj = resGraph.adj; float distance[numVertices]; int inQueueCnt[numVertices]; // Initialize visited, parentVertex and distance for (int i = 0; i < numVertices; i++) { parentVertex[i] = -1; distance[i] = MAX_VAL; inQueueCnt[i] = 0; } distance[source] = 0; std::queue<int> nodeQ; std::vector<bool> nodesInQ; nodesInQ.resize(numVertices, false); while (!nodeQ.empty()) nodeQ.pop(); nodeQ.push(source); nodesInQ[source] = true; inQueueCnt[source] = 1; while (!nodeQ.empty()) { int u = nodeQ.front(); nodeQ.pop(); int numEdge = adj[u].size(); //#pragma omp parallel for for (int e = 0; e < numEdge; e++) { if (adj[u][e]->residualFlow > 0) { int v = adj[u][e]->destination; float w = adj[u][e]->cost; if (distance[v] > distance[u] + w + eps) { distance[v] = distance[u] + w; parentVertex[v] = u; parentEdge[v] = adj[u][e]; //#pragma omp critical { if (!nodesInQ[v]) { nodesInQ[v] = true; nodeQ.push(v); inQueueCnt[v]++; assert(inQueueCnt[v] <= numVertices); } } assert(distance[v] > -0.00001); } } } nodesInQ[u] = false; } if (parentVertex[sink] == -1) { return false; } else { return true; } } }; #endif
morphology.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y % % MM MM O O R R P P H H O O L O O G Y Y % % M M M O O RRRR PPPP HHHHH O O L O O G GGG Y % % M M O O R R P H H O O L O O G G Y % % M M OOO R R P H H OOO LLLLL OOO GGG Y % % % % % % MagickCore Morphology Methods % % % % Software Design % % Anthony Thyssen % % January 2010 % % % % % % Copyright 1999-2013 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Morpology is the the application of various kernels, of any size and even % shape, to a image in various ways (typically binary, but not always). % % Convolution (weighted sum or average) is just one specific type of % morphology. Just one that is very common for image bluring and sharpening % effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring. % % This module provides not only a general morphology function, and the ability % to apply more advanced or iterative morphologies, but also functions for the % generation of many different types of kernel arrays from user supplied % arguments. Prehaps even the generation of a kernel from a small image. */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/color-private.h" #include "magick/channel.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/memory-private.h" #include "magick/monitor-private.h" #include "magick/morphology.h" #include "magick/morphology-private.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/prepress.h" #include "magick/quantize.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/string-private.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/utility.h" /* Other global definitions used by module. */ static inline double MagickMin(const double x,const double y) { return( x < y ? x : y); } static inline double MagickMax(const double x,const double y) { return( x > y ? x : y); } #define Minimize(assign,value) assign=MagickMin(assign,value) #define Maximize(assign,value) assign=MagickMax(assign,value) /* Integer Factorial Function - for a Binomial kernel */ #if 1 static inline size_t fact(size_t n) { size_t l,f; for(f=1, l=2; l <= n; f=f*l, l++); return(f); } #elif 1 /* glibc floating point alternatives */ #define fact(n) ((size_t)tgamma((double)n+1)) #else #define fact(n) ((size_t)lgamma((double)n+1)) #endif /* Currently these are only internal to this module */ static void CalcKernelMetaData(KernelInfo *), ExpandMirrorKernelInfo(KernelInfo *), ExpandRotateKernelInfo(KernelInfo *, const double), RotateKernelInfo(KernelInfo *, double); /* Quick function to find last kernel in a kernel list */ static inline KernelInfo *LastKernelInfo(KernelInfo *kernel) { while (kernel->next != (KernelInfo *) NULL) kernel = kernel->next; return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelInfo() takes the given string (generally supplied by the % user) and converts it into a Morphology/Convolution Kernel. This allows % users to specify a kernel from a number of pre-defined kernels, or to fully % specify their own kernel for a specific Convolution or Morphology % Operation. % % The kernel so generated can be any rectangular array of floating point % values (doubles) with the 'control point' or 'pixel being affected' % anywhere within that array of values. % % Previously IM was restricted to a square of odd size using the exact % center as origin, this is no longer the case, and any rectangular kernel % with any value being declared the origin. This in turn allows the use of % highly asymmetrical kernels. % % The floating point values in the kernel can also include a special value % known as 'nan' or 'not a number' to indicate that this value is not part % of the kernel array. This allows you to shaped the kernel within its % rectangular area. That is 'nan' values provide a 'mask' for the kernel % shape. However at least one non-nan value must be provided for correct % working of a kernel. % % The returned kernel should be freed using the DestroyKernelInfo() when you % are finished with it. Do not free this memory yourself. % % Input kernel defintion strings can consist of any of three types. % % "name:args[[@><]" % Select from one of the built in kernels, using the name and % geometry arguments supplied. See AcquireKernelBuiltIn() % % "WxH[+X+Y][@><]:num, num, num ..." % a kernel of size W by H, with W*H floating point numbers following. % the 'center' can be optionally be defined at +X+Y (such that +0+0 % is top left corner). If not defined the pixel in the center, for % odd sizes, or to the immediate top or left of center for even sizes % is automatically selected. % % "num, num, num, num, ..." % list of floating point numbers defining an 'old style' odd sized % square kernel. At least 9 values should be provided for a 3x3 % square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc. % Values can be space or comma separated. This is not recommended. % % You can define a 'list of kernels' which can be used by some morphology % operators A list is defined as a semi-colon separated list kernels. % % " kernel ; kernel ; kernel ; " % % Any extra ';' characters, at start, end or between kernel defintions are % simply ignored. % % The special flags will expand a single kernel, into a list of rotated % kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree % cyclic rotations, while a '>' will generate a list of 90-degree rotations. % The '<' also exands using 90-degree rotates, but giving a 180-degree % reflected kernel before the +/- 90-degree rotations, which can be important % for Thinning operations. % % Note that 'name' kernels will start with an alphabetic character while the % new kernel specification has a ':' character in its specification string. % If neither is the case, it is assumed an old style of a simple list of % numbers generating a odd-sized square kernel has been given. % % The format of the AcquireKernal method is: % % KernelInfo *AcquireKernelInfo(const char *kernel_string) % % A description of each parameter follows: % % o kernel_string: the Morphology/Convolution kernel wanted. % */ /* This was separated so that it could be used as a separate ** array input handling function, such as for -color-matrix */ static KernelInfo *ParseKernelArray(const char *kernel_string) { KernelInfo *kernel; char token[MaxTextExtent]; const char *p, *end; register ssize_t i; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ MagickStatusType flags; GeometryInfo args; kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *)NULL) return(kernel); (void) ResetMagickMemory(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = UserDefinedKernel; kernel->next = (KernelInfo *) NULL; kernel->signature = MagickSignature; if (kernel_string == (const char *) NULL) return(kernel); /* find end of this specific kernel definition string */ end = strchr(kernel_string, ';'); if ( end == (char *) NULL ) end = strchr(kernel_string, '\0'); /* clear flags - for Expanding kernel lists thorugh rotations */ flags = NoValue; /* Has a ':' in argument - New user kernel specification FUTURE: this split on ':' could be done by StringToken() */ p = strchr(kernel_string, ':'); if ( p != (char *) NULL && p < end) { /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, kernel_string, (size_t) (p-kernel_string)); token[p-kernel_string] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); /* Size handling and checks of geometry settings */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 1.0; /* then width = 1 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ kernel->width = (size_t)args.rho; kernel->height = (size_t)args.sigma; /* Offset Handling and Checks */ if ( args.xi < 0.0 || args.psi < 0.0 ) return(DestroyKernelInfo(kernel)); kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi : (ssize_t) (kernel->width-1)/2; kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi : (ssize_t) (kernel->height-1)/2; if ( kernel->x >= (ssize_t) kernel->width || kernel->y >= (ssize_t) kernel->height ) return(DestroyKernelInfo(kernel)); p++; /* advance beyond the ':' */ } else { /* ELSE - Old old specification, forming odd-square kernel */ /* count up number of values given */ p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ for (i=0; p < end; i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); } /* set the size of the kernel - old sized square */ kernel->width = kernel->height= (size_t) sqrt((double) i+1.0); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; p=(const char *) kernel_string; while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\'')) p++; /* ignore "'" chars for convolve filter usage - Cristy */ } /* Read in the kernel values from rest of input string argument */ kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); kernel->minimum = +MagickHuge; kernel->maximum = -MagickHuge; kernel->negative_range = kernel->positive_range = 0.0; for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++) { GetMagickToken(p,&p,token); if (*token == ',') GetMagickToken(p,&p,token); if ( LocaleCompare("nan",token) == 0 || LocaleCompare("-",token) == 0 ) { kernel->values[i] = nan; /* this value is not part of neighbourhood */ } else { kernel->values[i] = StringToDouble(token,(char **) NULL); ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } } /* sanity check -- no more values in kernel definition */ GetMagickToken(p,&p,token); if ( *token != '\0' && *token != ';' && *token != '\'' ) return(DestroyKernelInfo(kernel)); #if 0 /* this was the old method of handling a incomplete kernel */ if ( i < (ssize_t) (kernel->width*kernel->height) ) { Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); for ( ; i < (ssize_t) (kernel->width*kernel->height); i++) kernel->values[i]=0.0; } #else /* Number of values for kernel was not enough - Report Error */ if ( i < (ssize_t) (kernel->width*kernel->height) ) return(DestroyKernelInfo(kernel)); #endif /* check that we recieved at least one real (non-nan) value! */ if ( kernel->minimum == MagickHuge ) return(DestroyKernelInfo(kernel)); if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */ ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */ else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */ else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */ return(kernel); } static KernelInfo *ParseKernelName(const char *kernel_string) { char token[MaxTextExtent]; const char *p, *end; GeometryInfo args; KernelInfo *kernel; MagickStatusType flags; ssize_t type; /* Parse special 'named' kernel */ GetMagickToken(kernel_string,&p,token); type=ParseCommandOption(MagickKernelOptions,MagickFalse,token); if ( type < 0 || type == UserDefinedKernel ) return((KernelInfo *)NULL); /* not a valid named kernel */ while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';')) p++; end = strchr(p, ';'); /* end of this kernel defintion */ if ( end == (char *) NULL ) end = strchr(p, '\0'); /* ParseGeometry() needs the geometry separated! -- Arrgghh */ memcpy(token, p, (size_t) (end-p)); token[end-p] = '\0'; SetGeometryInfo(&args); flags = ParseGeometry(token, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif /* special handling of missing values in input string */ switch( type ) { /* Shape Kernel Defaults */ case UnityKernel: if ( (flags & WidthValue) == 0 ) args.rho = 1.0; /* Default scale = 1.0, zero is valid */ break; case SquareKernel: case DiamondKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: if ( (flags & HeightValue) == 0 ) args.sigma = 1.0; /* Default scale = 1.0, zero is valid */ break; case RingKernel: if ( (flags & XValue) == 0 ) args.xi = 1.0; /* Default scale = 1.0, zero is valid */ break; case RectangleKernel: /* Rectangle - set size defaults */ if ( (flags & WidthValue) == 0 ) /* if no width then */ args.rho = args.sigma; /* then width = height */ if ( args.rho < 1.0 ) /* if width too small */ args.rho = 3; /* then width = 3 */ if ( args.sigma < 1.0 ) /* if height too small */ args.sigma = args.rho; /* then height = width */ if ( (flags & XValue) == 0 ) /* center offset if not defined */ args.xi = (double)(((ssize_t)args.rho-1)/2); if ( (flags & YValue) == 0 ) args.psi = (double)(((ssize_t)args.sigma-1)/2); break; /* Distance Kernel Defaults */ case ChebyshevKernel: case ManhattanKernel: case OctagonalKernel: case EuclideanKernel: if ( (flags & HeightValue) == 0 ) /* no distance scale */ args.sigma = 100.0; /* default distance scaling */ else if ( (flags & AspectValue ) != 0 ) /* '!' flag */ args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */ else if ( (flags & PercentValue ) != 0 ) /* '%' flag */ args.sigma *= QuantumRange/100.0; /* percentage of color range */ break; default: break; } kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args); if ( kernel == (KernelInfo *) NULL ) return(kernel); /* global expand to rotated kernel list - only for single kernels */ if ( kernel->next == (KernelInfo *) NULL ) { if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 45.0); else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */ ExpandRotateKernelInfo(kernel, 90.0); else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */ ExpandMirrorKernelInfo(kernel); } return(kernel); } MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string) { KernelInfo *kernel, *new_kernel; char token[MaxTextExtent]; const char *p; size_t kernel_number; if (kernel_string == (const char *) NULL) return(ParseKernelArray(kernel_string)); p = kernel_string; kernel = NULL; kernel_number = 0; while ( GetMagickToken(p,NULL,token), *token != '\0' ) { /* ignore extra or multiple ';' kernel separators */ if ( *token != ';' ) { /* tokens starting with alpha is a Named kernel */ if (isalpha((int) *token) != 0) new_kernel = ParseKernelName(p); else /* otherwise a user defined kernel array */ new_kernel = ParseKernelArray(p); /* Error handling -- this is not proper error handling! */ if ( new_kernel == (KernelInfo *) NULL ) { (void) FormatLocaleFile(stderr, "Failed to parse kernel number #%.20g\n", (double) kernel_number); if ( kernel != (KernelInfo *) NULL ) kernel=DestroyKernelInfo(kernel); return((KernelInfo *) NULL); } /* initialise or append the kernel list */ if ( kernel == (KernelInfo *) NULL ) kernel = new_kernel; else LastKernelInfo(kernel)->next = new_kernel; } /* look for the next kernel in list */ p = strchr(p, ';'); if ( p == (char *) NULL ) break; p++; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e K e r n e l B u i l t I n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireKernelBuiltIn() returned one of the 'named' built-in types of % kernels used for special purposes such as gaussian blurring, skeleton % pruning, and edge distance determination. % % They take a KernelType, and a set of geometry style arguments, which were % typically decoded from a user supplied string, or from a more complex % Morphology Method that was requested. % % The format of the AcquireKernalBuiltIn method is: % % KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, % const GeometryInfo args) % % A description of each parameter follows: % % o type: the pre-defined type of kernel wanted % % o args: arguments defining or modifying the kernel % % Convolution Kernels % % Unity % The a No-Op or Scaling single element kernel. % % Gaussian:{radius},{sigma} % Generate a two-dimensional gaussian kernel, as used by -gaussian. % The sigma for the curve is required. The resulting kernel is % normalized, % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % NOTE: that the 'radius' is optional, but if provided can limit (clip) % the final size of the resulting kernel to a square 2*radius+1 in size. % The radius should be at least 2 times that of the sigma value, or % sever clipping and aliasing may result. If not given or set to 0 the % radius will be determined so as to produce the best minimal error % result, which is usally much larger than is normally needed. % % LoG:{radius},{sigma} % "Laplacian of a Gaussian" or "Mexician Hat" Kernel. % The supposed ideal edge detection, zero-summing kernel. % % An alturnative to this kernel is to use a "DoG" with a sigma ratio of % approx 1.6 (according to wikipedia). % % DoG:{radius},{sigma1},{sigma2} % "Difference of Gaussians" Kernel. % As "Gaussian" but with a gaussian produced by 'sigma2' subtracted % from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1. % The result is a zero-summing kernel. % % Blur:{radius},{sigma}[,{angle}] % Generates a 1 dimensional or linear gaussian blur, at the angle given % (current restricted to orthogonal angles). If a 'radius' is given the % kernel is clipped to a width of 2*radius+1. Kernel can be rotated % by a 90 degree angle. % % If 'sigma' is zero, you get a single pixel on a field of zeros. % % Note that two convolutions with two "Blur" kernels perpendicular to % each other, is equivalent to a far larger "Gaussian" kernel with the % same sigma value, However it is much faster to apply. This is how the % "-blur" operator actually works. % % Comet:{width},{sigma},{angle} % Blur in one direction only, much like how a bright object leaves % a comet like trail. The Kernel is actually half a gaussian curve, % Adding two such blurs in opposite directions produces a Blur Kernel. % Angle can be rotated in multiples of 90 degrees. % % Note that the first argument is the width of the kernel and not the % radius of the kernel. % % Binomial:[{radius}] % Generate a discrete kernel using a 2 dimentional Pascel's Triangle % of values. Used for special forma of image filters % % # Still to be implemented... % # % # Filter2D % # Filter1D % # Set kernel values using a resize filter, and given scale (sigma) % # Cylindrical or Linear. Is this possible with an image? % # % % Named Constant Convolution Kernels % % All these are unscaled, zero-summing kernels by default. As such for % non-HDRI version of ImageMagick some form of normalization, user scaling, % and biasing the results is recommended, to prevent the resulting image % being 'clipped'. % % The 3x3 kernels (most of these) can be circularly rotated in multiples of % 45 degrees to generate the 8 angled varients of each of the kernels. % % Laplacian:{type} % Discrete Lapacian Kernels, (without normalization) % Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood) % Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood) % Type 2 : 3x3 with center:4 edge:1 corner:-2 % Type 3 : 3x3 with center:4 edge:-2 corner:1 % Type 5 : 5x5 laplacian % Type 7 : 7x7 laplacian % Type 15 : 5x5 LoG (sigma approx 1.4) % Type 19 : 9x9 LoG (sigma approx 1.4) % % Sobel:{angle} % Sobel 'Edge' convolution kernel (3x3) % | -1, 0, 1 | % | -2, 0,-2 | % | -1, 0, 1 | % % Roberts:{angle} % Roberts convolution kernel (3x3) % | 0, 0, 0 | % | -1, 1, 0 | % | 0, 0, 0 | % % Prewitt:{angle} % Prewitt Edge convolution kernel (3x3) % | -1, 0, 1 | % | -1, 0, 1 | % | -1, 0, 1 | % % Compass:{angle} % Prewitt's "Compass" convolution kernel (3x3) % | -1, 1, 1 | % | -1,-2, 1 | % | -1, 1, 1 | % % Kirsch:{angle} % Kirsch's "Compass" convolution kernel (3x3) % | -3,-3, 5 | % | -3, 0, 5 | % | -3,-3, 5 | % % FreiChen:{angle} % Frei-Chen Edge Detector is based on a kernel that is similar to % the Sobel Kernel, but is designed to be isotropic. That is it takes % into account the distance of the diagonal in the kernel. % % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | % | 1, 0, -1 | % % FreiChen:{type},{angle} % % Frei-Chen Pre-weighted kernels... % % Type 0: default un-nomalized version shown above. % % Type 1: Orthogonal Kernel (same as type 11 below) % | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 2: Diagonal form of Kernel... % | 1, sqrt(2), 0 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 0, -sqrt(2) -1 | % % However this kernel is als at the heart of the FreiChen Edge Detection % Process which uses a set of 9 specially weighted kernel. These 9 % kernels not be normalized, but directly applied to the image. The % results is then added together, to produce the intensity of an edge in % a specific direction. The square root of the pixel value can then be % taken as the cosine of the edge, and at least 2 such runs at 90 degrees % from each other, both the direction and the strength of the edge can be % determined. % % Type 10: All 9 of the following pre-weighted kernels... % % Type 11: | 1, 0, -1 | % | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2) % | 1, 0, -1 | % % Type 12: | 1, sqrt(2), 1 | % | 0, 0, 0 | / 2*sqrt(2) % | 1, sqrt(2), 1 | % % Type 13: | sqrt(2), -1, 0 | % | -1, 0, 1 | / 2*sqrt(2) % | 0, 1, -sqrt(2) | % % Type 14: | 0, 1, -sqrt(2) | % | -1, 0, 1 | / 2*sqrt(2) % | sqrt(2), -1, 0 | % % Type 15: | 0, -1, 0 | % | 1, 0, 1 | / 2 % | 0, -1, 0 | % % Type 16: | 1, 0, -1 | % | 0, 0, 0 | / 2 % | -1, 0, 1 | % % Type 17: | 1, -2, 1 | % | -2, 4, -2 | / 6 % | -1, -2, 1 | % % Type 18: | -2, 1, -2 | % | 1, 4, 1 | / 6 % | -2, 1, -2 | % % Type 19: | 1, 1, 1 | % | 1, 1, 1 | / 3 % | 1, 1, 1 | % % The first 4 are for edge detection, the next 4 are for line detection % and the last is to add a average component to the results. % % Using a special type of '-1' will return all 9 pre-weighted kernels % as a multi-kernel list, so that you can use them directly (without % normalization) with the special "-set option:morphology:compose Plus" % setting to apply the full FreiChen Edge Detection Technique. % % If 'type' is large it will be taken to be an actual rotation angle for % the default FreiChen (type 0) kernel. As such FreiChen:45 will look % like a Sobel:45 but with 'sqrt(2)' instead of '2' values. % % WARNING: The above was layed out as per % http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf % But rotated 90 degrees so direction is from left rather than the top. % I have yet to find any secondary confirmation of the above. The only % other source found was actual source code at % http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf % Neigher paper defineds the kernels in a way that looks locical or % correct when taken as a whole. % % Boolean Kernels % % Diamond:[{radius}[,{scale}]] % Generate a diamond shaped kernel with given radius to the points. % Kernel size will again be radius*2+1 square and defaults to radius 1, % generating a 3x3 kernel that is slightly larger than a square. % % Square:[{radius}[,{scale}]] % Generate a square shaped kernel of size radius*2+1, and defaulting % to a 3x3 (radius 1). % % Octagon:[{radius}[,{scale}]] % Generate octagonal shaped kernel of given radius and constant scale. % Default radius is 3 producing a 7x7 kernel. A radius of 1 will result % in "Diamond" kernel. % % Disk:[{radius}[,{scale}]] % Generate a binary disk, thresholded at the radius given, the radius % may be a float-point value. Final Kernel size is floor(radius)*2+1 % square. A radius of 5.3 is the default. % % NOTE: That a low radii Disk kernels produce the same results as % many of the previously defined kernels, but differ greatly at larger % radii. Here is a table of equivalences... % "Disk:1" => "Diamond", "Octagon:1", or "Cross:1" % "Disk:1.5" => "Square" % "Disk:2" => "Diamond:2" % "Disk:2.5" => "Octagon" % "Disk:2.9" => "Square:2" % "Disk:3.5" => "Octagon:3" % "Disk:4.5" => "Octagon:4" % "Disk:5.4" => "Octagon:5" % "Disk:6.4" => "Octagon:6" % All other Disk shapes are unique to this kernel, but because a "Disk" % is more circular when using a larger radius, using a larger radius is % preferred over iterating the morphological operation. % % Rectangle:{geometry} % Simply generate a rectangle of 1's with the size given. You can also % specify the location of the 'control point', otherwise the closest % pixel to the center of the rectangle is selected. % % Properly centered and odd sized rectangles work the best. % % Symbol Dilation Kernels % % These kernel is not a good general morphological kernel, but is used % more for highlighting and marking any single pixels in an image using, % a "Dilate" method as appropriate. % % For the same reasons iterating these kernels does not produce the % same result as using a larger radius for the symbol. % % Plus:[{radius}[,{scale}]] % Cross:[{radius}[,{scale}]] % Generate a kernel in the shape of a 'plus' or a 'cross' with % a each arm the length of the given radius (default 2). % % NOTE: "plus:1" is equivalent to a "Diamond" kernel. % % Ring:{radius1},{radius2}[,{scale}] % A ring of the values given that falls between the two radii. % Defaults to a ring of approximataly 3 radius in a 7x7 kernel. % This is the 'edge' pixels of the default "Disk" kernel, % More specifically, "Ring" -> "Ring:2.5,3.5,1.0" % % Hit and Miss Kernels % % Peak:radius1,radius2 % Find any peak larger than the pixels the fall between the two radii. % The default ring of pixels is as per "Ring". % Edges % Find flat orthogonal edges of a binary shape % Corners % Find 90 degree corners of a binary shape % Diagonals:type % A special kernel to thin the 'outside' of diagonals % LineEnds:type % Find end points of lines (for pruning a skeletion) % Two types of lines ends (default to both) can be searched for % Type 0: All line ends % Type 1: single kernel for 4-conneected line ends % Type 2: single kernel for simple line ends % LineJunctions % Find three line junctions (within a skeletion) % Type 0: all line junctions % Type 1: Y Junction kernel % Type 2: Diagonal T Junction kernel % Type 3: Orthogonal T Junction kernel % Type 4: Diagonal X Junction kernel % Type 5: Orthogonal + Junction kernel % Ridges:type % Find single pixel ridges or thin lines % Type 1: Fine single pixel thick lines and ridges % Type 2: Find two pixel thick lines and ridges % ConvexHull % Octagonal Thickening Kernel, to generate convex hulls of 45 degrees % Skeleton:type % Traditional skeleton generating kernels. % Type 1: Tradional Skeleton kernel (4 connected skeleton) % Type 2: HIPR2 Skeleton kernel (8 connected skeleton) % Type 3: Thinning skeleton based on a ressearch paper by % Dan S. Bloomberg (Default Type) % ThinSE:type % A huge variety of Thinning Kernels designed to preserve conectivity. % many other kernel sets use these kernels as source definitions. % Type numbers are 41-49, 81-89, 481, and 482 which are based on % the super and sub notations used in the source research paper. % % Distance Measuring Kernels % % Different types of distance measuring methods, which are used with the % a 'Distance' morphology method for generating a gradient based on % distance from an edge of a binary shape, though there is a technique % for handling a anti-aliased shape. % % See the 'Distance' Morphological Method, for information of how it is % applied. % % Chebyshev:[{radius}][x{scale}[%!]] % Chebyshev Distance (also known as Tchebychev or Chessboard distance) % is a value of one to any neighbour, orthogonal or diagonal. One why % of thinking of it is the number of squares a 'King' or 'Queen' in % chess needs to traverse reach any other position on a chess board. % It results in a 'square' like distance function, but one where % diagonals are given a value that is closer than expected. % % Manhattan:[{radius}][x{scale}[%!]] % Manhattan Distance (also known as Rectilinear, City Block, or the Taxi % Cab distance metric), it is the distance needed when you can only % travel in horizontal or vertical directions only. It is the % distance a 'Rook' in chess would have to travel, and results in a % diamond like distances, where diagonals are further than expected. % % Octagonal:[{radius}][x{scale}[%!]] % An interleving of Manhatten and Chebyshev metrics producing an % increasing octagonally shaped distance. Distances matches those of % the "Octagon" shaped kernel of the same radius. The minimum radius % and default is 2, producing a 5x5 kernel. % % Euclidean:[{radius}][x{scale}[%!]] % Euclidean distance is the 'direct' or 'as the crow flys' distance. % However by default the kernel size only has a radius of 1, which % limits the distance to 'Knight' like moves, with only orthogonal and % diagonal measurements being correct. As such for the default kernel % you will get octagonal like distance function. % % However using a larger radius such as "Euclidean:4" you will get a % much smoother distance gradient from the edge of the shape. Especially % if the image is pre-processed to include any anti-aliasing pixels. % Of course a larger kernel is slower to use, and not always needed. % % The first three Distance Measuring Kernels will only generate distances % of exact multiples of {scale} in binary images. As such you can use a % scale of 1 without loosing any information. However you also need some % scaling when handling non-binary anti-aliased shapes. % % The "Euclidean" Distance Kernel however does generate a non-integer % fractional results, and as such scaling is vital even for binary shapes. % */ MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type, const GeometryInfo *args) { KernelInfo *kernel; register ssize_t i; register ssize_t u, v; double nan = sqrt((double)-1.0); /* Special Value : Not A Number */ /* Generate a new empty kernel if needed */ kernel=(KernelInfo *) NULL; switch(type) { case UndefinedKernel: /* These should not call this function */ case UserDefinedKernel: assert("Should not call this function" != (char *)NULL); break; case LaplacianKernel: /* Named Descrete Convolution Kernels */ case SobelKernel: /* these are defined using other kernels */ case RobertsKernel: case PrewittKernel: case CompassKernel: case KirschKernel: case FreiChenKernel: case EdgesKernel: /* Hit and Miss kernels */ case CornersKernel: case DiagonalsKernel: case LineEndsKernel: case LineJunctionsKernel: case RidgesKernel: case ConvexHullKernel: case SkeletonKernel: case ThinSEKernel: break; /* A pre-generated kernel is not needed */ #if 0 /* set to 1 to do a compile-time check that we haven't missed anything */ case UnityKernel: case GaussianKernel: case DoGKernel: case LoGKernel: case BlurKernel: case CometKernel: case BinomialKernel: case DiamondKernel: case SquareKernel: case RectangleKernel: case OctagonKernel: case DiskKernel: case PlusKernel: case CrossKernel: case RingKernel: case PeaksKernel: case ChebyshevKernel: case ManhattanKernel: case OctangonalKernel: case EuclideanKernel: #else default: #endif /* Generate the base Kernel Structure */ kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (kernel == (KernelInfo *) NULL) return(kernel); (void) ResetMagickMemory(kernel,0,sizeof(*kernel)); kernel->minimum = kernel->maximum = kernel->angle = 0.0; kernel->negative_range = kernel->positive_range = 0.0; kernel->type = type; kernel->next = (KernelInfo *) NULL; kernel->signature = MagickSignature; break; } switch(type) { /* Convolution Kernels */ case UnityKernel: { kernel->height = kernel->width = (size_t) 1; kernel->x = kernel->y = (ssize_t) 0; kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(1, sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); kernel->maximum = kernel->values[0] = args->rho; break; } break; case GaussianKernel: case DoGKernel: case LoGKernel: { double sigma = fabs(args->sigma), sigma2 = fabs(args->xi), A, B, R; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else if ( (type != DoGKernel) || (sigma >= sigma2) ) kernel->width = GetOptimalKernelWidth2D(args->rho,sigma); else kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2); kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory( kernel->width,kernel->height*sizeof(*kernel->values))); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* WARNING: The following generates a 'sampled gaussian' kernel. * What we really want is a 'discrete gaussian' kernel. * * How to do this is I don't know, but appears to be basied on the * Error Function 'erf()' (intergral of a gaussian) */ if ( type == GaussianKernel || type == DoGKernel ) { /* Calculate a Gaussian, OR positive half of a DoG */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } if ( type == DoGKernel ) { /* Subtract a Negative Gaussian for "Difference of Gaussian" */ if ( sigma2 > MagickEpsilon ) { sigma = sigma2; /* simplify loop expressions */ A = 1.0/(2.0*sigma*sigma); B = (double) (1.0/(Magick2PI*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B; } else /* limiting case - a unity (normalized Dirac) kernel */ kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0; } if ( type == LoGKernel ) { /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */ if ( sigma > MagickEpsilon ) { A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { R = ((double)(u*u+v*v))*A; kernel->values[i] = (1-R)*exp(-R)*B; } } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } } /* Note the above kernels may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (> 0.1) the central value becomes larger than one, and thus ** producing a very bright kernel. ** ** Normalization will still be needed. */ /* Normalize the 2D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); break; } case BlurKernel: { double sigma = fabs(args->sigma), alpha, beta; if ( args->rho >= 1.0 ) kernel->width = (size_t)args->rho*2+1; else kernel->width = GetOptimalKernelWidth1D(args->rho,sigma); kernel->height = 1; kernel->x = (ssize_t) (kernel->width-1)/2; kernel->y = 0; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); #if 1 #define KernelRank 3 /* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix). ** It generates a gaussian 3 times the width, and compresses it into ** the expected range. This produces a closer normalization of the ** resulting kernel, especially for very low sigma values. ** As such while wierd it is prefered. ** ** I am told this method originally came from Photoshop. ** ** A properly normalized curve is generated (apart from edge clipping) ** even though we later normalize the result (for edge clipping) ** to allow the correct generation of a "Difference of Blurs". */ /* initialize */ v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */ (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); /* Calculate a Positive 1D Gaussian */ if ( sigma > MagickEpsilon ) { sigma *= KernelRank; /* simplify loop expressions */ alpha = 1.0/(2.0*sigma*sigma); beta= (double) (1.0/(MagickSQ2PI*sigma )); for ( u=-v; u <= v; u++) { kernel->values[(u+v)/KernelRank] += exp(-((double)(u*u))*alpha)*beta; } } else /* special case - generate a unity kernel */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; #else /* Direct calculation without curve averaging This is equivelent to a KernelRank of 1 */ /* Calculate a Positive Gaussian */ if ( sigma > MagickEpsilon ) { alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */ beta = 1.0/(MagickSQ2PI*sigma); for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = exp(-((double)(u*u))*alpha)*beta; } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; } #endif /* Note the above kernel may have been 'clipped' by a user defined ** radius, producing a smaller (darker) kernel. Also for very small ** sigma's (< 0.1) the central value becomes larger than one, as a ** result of not generating a actual 'discrete' kernel, and thus ** producing a very bright 'impulse'. ** ** Becuase of these two factors Normalization is required! */ /* Normalize the 1D Gaussian Kernel ** ** NB: a CorrelateNormalize performs a normal Normalize if ** there are no negative values. */ CalcKernelMetaData(kernel); /* the other kernel meta-data */ ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue); /* rotate the 1D kernel by given angle */ RotateKernelInfo(kernel, args->xi ); break; } case CometKernel: { double sigma = fabs(args->sigma), A; if ( args->rho < 1.0 ) kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1; else kernel->width = (size_t)args->rho; kernel->x = kernel->y = 0; kernel->height = 1; kernel->negative_range = kernel->positive_range = 0.0; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* A comet blur is half a 1D gaussian curve, so that the object is ** blurred in one direction only. This may not be quite the right ** curve to use so may change in the future. The function must be ** normalised after generation, which also resolves any clipping. ** ** As we are normalizing and not subtracting gaussians, ** there is no need for a divisor in the gaussian formula ** ** It is less comples */ if ( sigma > MagickEpsilon ) { #if 1 #define KernelRank 3 v = (ssize_t) kernel->width*KernelRank; /* start/end points */ (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*sizeof(*kernel->values)); sigma *= KernelRank; /* simplify the loop expression */ A = 1.0/(2.0*sigma*sigma); /* B = 1.0/(MagickSQ2PI*sigma); */ for ( u=0; u < v; u++) { kernel->values[u/KernelRank] += exp(-((double)(u*u))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ } for (i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i]; #else A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */ /* B = 1.0/(MagickSQ2PI*sigma); */ for ( i=0; i < (ssize_t) kernel->width; i++) kernel->positive_range += kernel->values[i] = exp(-((double)(i*i))*A); /* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */ #endif } else /* special case - generate a unity kernel */ { (void) ResetMagickMemory(kernel->values,0, (size_t) kernel->width*kernel->height*sizeof(*kernel->values)); kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; } kernel->minimum = 0.0; kernel->maximum = kernel->values[0]; kernel->negative_range = 0.0; ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */ RotateKernelInfo(kernel, args->xi); /* Rotate by angle */ break; } case BinomialKernel: { size_t order_f; if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; order_f = fact(kernel->width-1); kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=0; v < (ssize_t)kernel->height; v++) { size_t alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) ); for ( u=0; u < (ssize_t)kernel->width; u++, i++) kernel->positive_range += kernel->values[i] = (double) (alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) )); } kernel->minimum = 1.0; kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width]; kernel->negative_range = 0.0; break; } /* Convolution Kernels - Well Known Named Constant Kernels */ case LaplacianKernel: { switch ( (int) args->rho ) { case 0: default: /* laplacian square filter -- default */ kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1"); break; case 1: /* laplacian diamond filter */ kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0"); break; case 2: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); break; case 3: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1"); break; case 5: /* a 5x5 laplacian */ kernel=ParseKernelArray( "5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4"); break; case 7: /* a 7x7 laplacian */ kernel=ParseKernelArray( "7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" ); break; case 15: /* a 5x5 LoG (sigma approx 1.4) */ kernel=ParseKernelArray( "5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0"); break; case 19: /* a 9x9 LoG (sigma approx 1.4) */ /* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */ kernel=ParseKernelArray( "9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; break; } case SobelKernel: { /* Simple Sobel Kernel */ kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case RobertsKernel: { kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case PrewittKernel: { kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case CompassKernel: { kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case KirschKernel: { kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->rho); break; } case FreiChenKernel: /* Direction is set to be left to right positive */ /* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */ /* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */ { switch ( (int) args->rho ) { default: case 0: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +MagickSQ2; kernel->values[5] = -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ break; case 2: kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = kernel->values[3]= +MagickSQ2; kernel->values[5] = kernel->values[7]= -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 10: kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19"); if (kernel == (KernelInfo *) NULL) return(kernel); break; case 1: case 11: kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[3] = +MagickSQ2; kernel->values[5] = -MagickSQ2; CalcKernelMetaData(kernel); /* recalculate meta-data */ ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 12: kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[1] = +MagickSQ2; kernel->values[7] = +MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 13: kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[0] = +MagickSQ2; kernel->values[8] = -MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 14: kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->values[2] = -MagickSQ2; kernel->values[6] = +MagickSQ2; CalcKernelMetaData(kernel); ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue); break; case 15: kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 16: kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/2.0, NoValue); break; case 17: kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 18: kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/6.0, NoValue); break; case 19: kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ScaleKernelInfo(kernel, 1.0/3.0, NoValue); break; } if ( fabs(args->sigma) >= MagickEpsilon ) /* Rotate by correctly supplied 'angle' */ RotateKernelInfo(kernel, args->sigma); else if ( args->rho > 30.0 || args->rho < -30.0 ) /* Rotate by out of bounds 'type' */ RotateKernelInfo(kernel, args->rho); break; } /* Boolean or Shaped Kernels */ case DiamondKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values within diamond area to scale given */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case SquareKernel: case RectangleKernel: { double scale; if ( type == SquareKernel ) { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = (size_t) (2*args->rho+1); kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; scale = args->sigma; } else { /* NOTE: user defaults set in "AcquireKernelInfo()" */ if ( args->rho < 1.0 || args->sigma < 1.0 ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->width = (size_t)args->rho; kernel->height = (size_t)args->sigma; if ( args->xi < 0.0 || args->xi > (double)kernel->width || args->psi < 0.0 || args->psi > (double)kernel->height ) return(DestroyKernelInfo(kernel)); /* invalid args given */ kernel->x = (ssize_t) args->xi; kernel->y = (ssize_t) args->psi; scale = 1.0; } kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values to scale given */ u=(ssize_t) (kernel->width*kernel->height); for ( i=0; i < u; i++) kernel->values[i] = scale; kernel->minimum = kernel->maximum = scale; /* a flat shape */ kernel->positive_range = scale*u; break; } case OctagonKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ( (labs((long) u)+labs((long) v)) <= ((long)kernel->x + (long)(kernel->x/2)) ) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case DiskKernel: { ssize_t limit = (ssize_t)(args->rho*args->rho); if (args->rho < 0.4) /* default radius approx 4.3 */ kernel->width = kernel->height = 9L, limit = 18L; else kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) if ((u*u+v*v) <= limit) kernel->positive_range += kernel->values[i] = args->sigma; else kernel->values[i] = nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ break; } case PlusKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } case CrossKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 5; /* default radius 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set all kernel values along axises to given scale */ for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->values[i] = (u == v || u == -v) ? args->sigma : nan; kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */ kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0); break; } /* HitAndMiss Kernels */ case RingKernel: case PeaksKernel: { ssize_t limit1, limit2, scale; if (args->rho < args->sigma) { kernel->width = ((size_t)args->sigma)*2+1; limit1 = (ssize_t)(args->rho*args->rho); limit2 = (ssize_t)(args->sigma*args->sigma); } else { kernel->width = ((size_t)args->rho)*2+1; limit1 = (ssize_t)(args->sigma*args->sigma); limit2 = (ssize_t)(args->rho*args->rho); } if ( limit2 <= 0 ) kernel->width = 7L, limit1 = 7L, limit2 = 11L; kernel->height = kernel->width; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); /* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */ scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi); for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { ssize_t radius=u*u+v*v; if (limit1 < radius && radius <= limit2) kernel->positive_range += kernel->values[i] = (double) scale; else kernel->values[i] = nan; } kernel->minimum = kernel->maximum = (double) scale; if ( type == PeaksKernel ) { /* set the central point in the middle */ kernel->values[kernel->x+kernel->y*kernel->width] = 1.0; kernel->positive_range = 1.0; kernel->maximum = 1.0; } break; } case EdgesKernel: { kernel=AcquireKernelInfo("ThinSE:482"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */ break; } case CornersKernel: { kernel=AcquireKernelInfo("ThinSE:87"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */ break; } case DiagonalsKernel: { switch ( (int) args->rho ) { case 0: default: { KernelInfo *new_kernel; kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; ExpandMirrorKernelInfo(kernel); return(kernel); } case 1: kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-"); break; case 2: kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineEndsKernel: { /* Kernels for finding the end of thin lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all end of lines */ return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>")); case 1: /* kernel for 4-connected line ends - no rotation */ kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-"); break; case 2: /* kernel to add for 8-connected lines - no rotation */ kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1"); break; case 3: /* kernel to add for orthogonal line ends - does not find corners */ kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0"); break; case 4: /* traditional line end - fails on last T end */ kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case LineJunctionsKernel: { /* kernels for finding the junctions of multiple lines */ switch ( (int) args->rho ) { case 0: default: /* set of kernels to find all line junctions */ return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>")); case 1: /* Y Junction */ kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-"); break; case 2: /* Diagonal T Junctions */ kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1"); break; case 3: /* Orthogonal T Junctions */ kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-"); break; case 4: /* Diagonal X Junctions */ kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1"); break; case 5: /* Orthogonal X Junctions - minimal diamond kernel */ kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } case RidgesKernel: { /* Ridges - Ridge finding kernels */ KernelInfo *new_kernel; switch ( (int) args->rho ) { case 1: default: kernel=ParseKernelArray("3x1:0,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */ break; case 2: kernel=ParseKernelArray("4x1:0,1,1,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */ /* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */ /* Unfortunatally we can not yet rotate a non-square kernel */ /* But then we can't flip a non-symetrical kernel either */ new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; LastKernelInfo(kernel)->next = new_kernel; break; } break; } case ConvexHullKernel: { KernelInfo *new_kernel; /* first set of 8 kernels */ kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* append the mirror versions too - no flip function yet */ new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0"); if (new_kernel == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); new_kernel->type = type; ExpandRotateKernelInfo(new_kernel, 90.0); LastKernelInfo(kernel)->next = new_kernel; break; } case SkeletonKernel: { switch ( (int) args->rho ) { case 1: default: /* Traditional Skeleton... ** A cyclically rotated single kernel */ kernel=AcquireKernelInfo("ThinSE:482"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */ break; case 2: /* HIPR Variation of the cyclic skeleton ** Corners of the traditional method made more forgiving, ** but the retain the same cyclic order. */ kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;"); if (kernel == (KernelInfo *) NULL) return(kernel); if (kernel->next == (KernelInfo *) NULL) return(DestroyKernelInfo(kernel)); kernel->type = type; kernel->next->type = type; ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */ break; case 3: /* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf */ kernel=AcquireKernelInfo( "ThinSE:41; ThinSE:42; ThinSE:43"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; kernel->next->type = type; kernel->next->next->type = type; ExpandMirrorKernelInfo(kernel); /* 12 kernels total */ break; } break; } case ThinSEKernel: { /* Special kernels for general thinning, while preserving connections ** "Connectivity-Preserving Morphological Image Thransformations" ** by Dan S. Bloomberg, available on Leptonica, Selected Papers, ** http://www.leptonica.com/papers/conn.pdf ** And ** http://tpgit.github.com/Leptonica/ccthin_8c_source.html ** ** Note kernels do not specify the origin pixel, allowing them ** to be used for both thickening and thinning operations. */ switch ( (int) args->rho ) { /* SE for 4-connected thinning */ case 41: /* SE_4_1 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1"); break; case 42: /* SE_4_2 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-"); break; case 43: /* SE_4_3 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1"); break; case 44: /* SE_4_4 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-"); break; case 45: /* SE_4_5 */ kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-"); break; case 46: /* SE_4_6 */ kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1"); break; case 47: /* SE_4_7 */ kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-"); break; case 48: /* SE_4_8 */ kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1"); break; case 49: /* SE_4_9 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1"); break; /* SE for 8-connected thinning - negatives of the above */ case 81: /* SE_8_0 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-"); break; case 82: /* SE_8_2 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-"); break; case 83: /* SE_8_3 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-"); break; case 84: /* SE_8_4 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-"); break; case 85: /* SE_8_5 */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-"); break; case 86: /* SE_8_6 */ kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1"); break; case 87: /* SE_8_7 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-"); break; case 88: /* SE_8_8 */ kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-"); break; case 89: /* SE_8_9 */ kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-"); break; /* Special combined SE kernels */ case 423: /* SE_4_2 , SE_4_3 Combined Kernel */ kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-"); break; case 823: /* SE_8_2 , SE_8_3 Combined Kernel */ kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-"); break; case 481: /* SE_48_1 - General Connected Corner Kernel */ kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-"); break; default: case 482: /* SE_48_2 - General Edge Kernel */ kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1"); break; } if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = type; RotateKernelInfo(kernel, args->sigma); break; } /* Distance Measuring Kernels */ case ChebyshevKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*MagickMax(fabs((double)u),fabs((double)v)) ); kernel->maximum = kernel->values[0]; break; } case ManhattanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*(labs((long) u)+labs((long) v)) ); kernel->maximum = kernel->values[0]; break; } case OctagonalKernel: { if (args->rho < 2.0) kernel->width = kernel->height = 5; /* default/minimum radius = 2 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) { double r1 = MagickMax(fabs((double)u),fabs((double)v)), r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5); kernel->positive_range += kernel->values[i] = args->sigma*MagickMax(r1,r2); } kernel->maximum = kernel->values[0]; break; } case EuclideanKernel: { if (args->rho < 1.0) kernel->width = kernel->height = 3; /* default radius = 1 */ else kernel->width = kernel->height = ((size_t)args->rho)*2+1; kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2; kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (kernel->values == (double *) NULL) return(DestroyKernelInfo(kernel)); for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++) for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++) kernel->positive_range += ( kernel->values[i] = args->sigma*sqrt((double)(u*u+v*v)) ); kernel->maximum = kernel->values[0]; break; } default: { /* No-Op Kernel - Basically just a single pixel on its own */ kernel=ParseKernelArray("1:1"); if (kernel == (KernelInfo *) NULL) return(kernel); kernel->type = UndefinedKernel; break; } break; } return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneKernelInfo() creates a new clone of the given Kernel List so that its % can be modified without effecting the original. The cloned kernel should % be destroyed using DestoryKernelInfo() when no longer needed. % % The format of the CloneKernelInfo method is: % % KernelInfo *CloneKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be cloned % */ MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel) { register ssize_t i; KernelInfo *new_kernel; assert(kernel != (KernelInfo *) NULL); new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel)); if (new_kernel == (KernelInfo *) NULL) return(new_kernel); *new_kernel=(*kernel); /* copy values in structure */ /* replace the values with a copy of the values */ new_kernel->values=(double *) AcquireAlignedMemory(kernel->width, kernel->height*sizeof(*kernel->values)); if (new_kernel->values == (double *) NULL) return(DestroyKernelInfo(new_kernel)); for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) new_kernel->values[i]=kernel->values[i]; /* Also clone the next kernel in the kernel list */ if ( kernel->next != (KernelInfo *) NULL ) { new_kernel->next = CloneKernelInfo(kernel->next); if ( new_kernel->next == (KernelInfo *) NULL ) return(DestroyKernelInfo(new_kernel)); } return(new_kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyKernelInfo() frees the memory used by a Convolution/Morphology % kernel. % % The format of the DestroyKernelInfo method is: % % KernelInfo *DestroyKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to be destroyed % */ MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel) { assert(kernel != (KernelInfo *) NULL); if ( kernel->next != (KernelInfo *) NULL ) kernel->next=DestroyKernelInfo(kernel->next); kernel->values=(double *) RelinquishAlignedMemory(kernel->values); kernel=(KernelInfo *) RelinquishMagickMemory(kernel); return(kernel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d M i r r o r K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandMirrorKernelInfo() takes a single kernel, and expands it into a % sequence of 90-degree rotated kernels but providing a reflected 180 % rotatation, before the -/+ 90-degree rotations. % % This special rotation order produces a better, more symetrical thinning of % objects. % % The format of the ExpandMirrorKernelInfo method is: % % void ExpandMirrorKernelInfo(KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ #if 0 static void FlopKernelInfo(KernelInfo *kernel) { /* Do a Flop by reversing each row. */ size_t y; register ssize_t x,r; register double *k,t; for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width) for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--) t=k[x], k[x]=k[r], k[r]=t; kernel->x = kernel->width - kernel->x - 1; angle = fmod(angle+180.0, 360.0); } #endif static void ExpandMirrorKernelInfo(KernelInfo *kernel) { KernelInfo *clone, *last; last = kernel; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 180); /* flip */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 90); /* transpose */ LastKernelInfo(last)->next = clone; last = clone; clone = CloneKernelInfo(last); RotateKernelInfo(clone, 180); /* flop */ LastKernelInfo(last)->next = clone; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating % incrementally by the angle given, until the kernel repeats. % % WARNING: 45 degree rotations only works for 3x3 kernels. % While 90 degree roatations only works for linear and square kernels % % The format of the ExpandRotateKernelInfo method is: % % void ExpandRotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is only internel to this module, as it is not finalized, % especially with regard to non-orthogonal angles, and rotation of larger % 2D kernels. */ /* Internal Routine - Return true if two kernels are the same */ static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1, const KernelInfo *kernel2) { register size_t i; /* check size and origin location */ if ( kernel1->width != kernel2->width || kernel1->height != kernel2->height || kernel1->x != kernel2->x || kernel1->y != kernel2->y ) return MagickFalse; /* check actual kernel values */ for (i=0; i < (kernel1->width*kernel1->height); i++) { /* Test for Nan equivalence */ if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) ) return MagickFalse; if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) ) return MagickFalse; /* Test actual values are equivalent */ if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon ) return MagickFalse; } return MagickTrue; } static void ExpandRotateKernelInfo(KernelInfo *kernel, const double angle) { KernelInfo *clone, *last; last = kernel; while(1) { clone = CloneKernelInfo(last); RotateKernelInfo(clone, angle); if ( SameKernelInfo(kernel, clone) == MagickTrue ) break; LastKernelInfo(last)->next = clone; last = clone; } clone = DestroyKernelInfo(clone); /* kernel has repeated - junk the clone */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a l c M e t a K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only, % using the kernel values. This should only ne used if it is not possible to % calculate that meta-data in some easier way. % % It is important that the meta-data is correct before ScaleKernelInfo() is % used to perform kernel normalization. % % The format of the CalcKernelMetaData method is: % % void CalcKernelMetaData(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % WARNING: Minimum and Maximum values are assumed to include zero, even if % zero is not part of the kernel (as in Gaussian Derived kernels). This % however is not true for flat-shaped morphological kernels. % % WARNING: Only the specific kernel pointed to is modified, not a list of % multiple kernels. % % This is an internal function and not expected to be useful outside this % module. This could change however. */ static void CalcKernelMetaData(KernelInfo *kernel) { register size_t i; kernel->minimum = kernel->maximum = 0.0; kernel->negative_range = kernel->positive_range = 0.0; for (i=0; i < (kernel->width*kernel->height); i++) { if ( fabs(kernel->values[i]) < MagickEpsilon ) kernel->values[i] = 0.0; ( kernel->values[i] < 0) ? ( kernel->negative_range += kernel->values[i] ) : ( kernel->positive_range += kernel->values[i] ); Minimize(kernel->minimum, kernel->values[i]); Maximize(kernel->maximum, kernel->values[i]); } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y A p p l y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyApply() applies a morphological method, multiple times using % a list of multiple kernels. This is the method that should be called by % other 'operators' that internally use morphology operations as part of % their processing. % % It is basically equivalent to as MorphologyImage() (see below) but % without any user controls. This allows internel programs to use this % function, to actually perform a specific task without possible interference % by any API user supplied settings. % % It is MorphologyImage() task to extract any such user controls, and % pass them to this function for processing. % % More specifically all given kernels should already be scaled, normalised, % and blended appropriatally before being parred to this routine. The % appropriate bias, and compose (typically 'UndefinedComposeOp') given. % % The format of the MorphologyApply method is: % % Image *MorphologyApply(const Image *image,MorphologyMethod method, % const ChannelType channel, const ssize_t iterations, % const KernelInfo *kernel, const CompositeMethod compose, % const double bias, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the source image % % o method: the morphology method to be applied. % % o channel: the channels to which the operations are applied % The channel 'sync' flag determines if 'alpha weighting' is % applied for convolution style operations. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % % o compose: How to handle or merge multi-kernel results. % If 'UndefinedCompositeOp' use default for the Morphology method. % If 'NoCompositeOp' force image to be re-iterated by each kernel. % Otherwise merge the results using the compose method given. % % o bias: Convolution Output Bias. % % o exception: return any errors or warnings in this structure. % */ /* Apply a Morphology Primative to an image using the given kernel. ** Two pre-created images must be provided, and no image is created. ** It returns the number of pixels that changed between the images ** for result convergence determination. */ static ssize_t MorphologyPrimitive(const Image *image, Image *result_image, const MorphologyMethod method, const ChannelType channel, const KernelInfo *kernel,const double bias,ExceptionInfo *exception) { #define MorphologyTag "Morphology/Image" CacheView *p_view, *q_view; register ssize_t i; size_t *changes, changed, virt_width; ssize_t y, offx, offy; MagickBooleanType status; MagickOffsetType progress; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(result_image != (Image *) NULL); assert(result_image->signature == MagickSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); status=MagickTrue; progress=0; p_view=AcquireVirtualCacheView(image,exception); q_view=AcquireAuthenticCacheView(result_image,exception); virt_width=image->columns+kernel->width-1; /* Some methods (including convolve) needs use a reflected kernel. * Adjust 'origin' offsets to loop though kernel as a reflection. */ offx = kernel->x; offy = kernel->y; switch(method) { case ConvolveMorphology: case DilateMorphology: case DilateIntensityMorphology: case IterativeDistanceMorphology: /* kernel needs to used with reflection about origin */ offx = (ssize_t) kernel->width-offx-1; offy = (ssize_t) kernel->height-offy-1; break; case ErodeMorphology: case ErodeIntensityMorphology: case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: /* kernel is used as is, without reflection */ break; default: assert("Not a Primitive Morphology Method" != (char *) NULL); break; } changed=0; changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(), sizeof(*changes)); if (changes == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changes[i]=0; if ( method == ConvolveMorphology && kernel->width == 1 ) { /* Special handling (for speed) of vertical (blur) kernels. ** This performs its handling in columns rather than in rows. ** This is only done for convolve as it is the only method that ** generates very large 1-D vertical kernels (such as a 'BlurKernel') ** ** Timing tests (on single CPU laptop) ** Using a vertical 1-d Blue with normal row-by-row (below) ** time convert logo: -morphology Convolve Blur:0x10+90 null: ** 0.807u ** Using this column method ** time convert logo: -morphology Convolve Blur:0x10+90 null: ** 0.620u ** ** Anthony Thyssen, 14 June 2010 */ register ssize_t x; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,result_image,image->columns,1) #endif for (x=0; x < (ssize_t) image->columns; x++) { const int id = GetOpenMPThreadId(); register const PixelPacket *restrict p; register const IndexPacket *restrict p_indexes; register PixelPacket *restrict q; register IndexPacket *restrict q_indexes; register ssize_t y; ssize_t r; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(p_view, x, -offy,1, image->rows+kernel->height-1, exception); q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } p_indexes=GetCacheViewVirtualIndexQueue(p_view); q_indexes=GetCacheViewAuthenticIndexQueue(q_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = offy; for (y=0; y < (ssize_t) image->rows; y++) { DoublePixelPacket result; register ssize_t v; register const double *restrict k; register const PixelPacket *restrict k_pixels; register const IndexPacket *restrict k_indexes; /* Copy input image to the output image for unused channels * This removes need for 'cloning' a new image every iteration */ *q = p[r]; if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+y,GetPixelIndex(p_indexes+r)); /* Set the bias of the weighted average output */ result.red = result.green = result.blue = result.opacity = result.index = bias; /* Weighted Average of pixels using reflected kernel ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. */ k = &kernel->values[ kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; if ( ((channel & SyncChannels) == 0 ) || (image->matte == MagickFalse) ) { /* No 'Sync' involved. ** Convolution is simple greyscale channel operation */ for (v=0; v < (ssize_t) kernel->height; v++) { if ( IsNaN(*k) ) continue; result.red += (*k)*GetPixelRed(k_pixels); result.green += (*k)*GetPixelGreen(k_pixels); result.blue += (*k)*GetPixelBlue(k_pixels); result.opacity += (*k)*GetPixelOpacity(k_pixels); if ( image->colorspace == CMYKColorspace) result.index += (*k)*(*k_indexes); k--; k_pixels++; k_indexes++; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); } else { /* Channel 'Sync' Flag, and Alpha Channel enabled. ** Weight the color channels with Alpha Channel so that ** transparent pixels are not part of the results. */ double gamma; /* divisor, sum of color alpha weighting */ MagickRealType alpha; /* alpha weighting for colors : alpha */ size_t count; /* alpha valus collected, number kernel values */ count=0; gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { if ( IsNaN(*k) ) continue; alpha=QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels)); gamma += alpha; /* normalize alpha weights only */ count++; /* number of alpha values collected */ alpha*=(*k); /* include kernel weighting now */ result.red += alpha*GetPixelRed(k_pixels); result.green += alpha*GetPixelGreen(k_pixels); result.blue += alpha*GetPixelBlue(k_pixels); result.opacity += (*k)*GetPixelOpacity(k_pixels); if ( image->colorspace == CMYKColorspace) result.index += alpha*(*k_indexes); k--; k_pixels++; k_indexes++; } /* Sync'ed channels, all channels are modified */ gamma=(double) count/(fabs((double) gamma) < MagickEpsilon ? MagickEpsilon : gamma); SetPixelRed(q,ClampToQuantum(gamma*result.red)); SetPixelGreen(q,ClampToQuantum(gamma*result.green)); SetPixelBlue(q,ClampToQuantum(gamma*result.blue)); SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+x,ClampToQuantum(gamma*result.index)); } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q)) || ( p[r].green != GetPixelGreen(q)) || ( p[r].blue != GetPixelBlue(q)) || ( p[r].opacity != GetPixelOpacity(q)) || ( image->colorspace == CMYKColorspace && GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) ) changes[id]++; p++; q++; } /* y */ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphologyPrimitive) #endif proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* x */ result_image->type=image->type; q_view=DestroyCacheView(q_view); p_view=DestroyCacheView(p_view); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changed+=changes[i]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t) changed : 0); } /* ** Normal handling of horizontal or rectangular kernels (row by row) */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,result_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const PixelPacket *restrict p; register const IndexPacket *restrict p_indexes; register PixelPacket *restrict q; register IndexPacket *restrict q_indexes; register ssize_t x; size_t r; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width, kernel->height, exception); q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } p_indexes=GetCacheViewVirtualIndexQueue(p_view); q_indexes=GetCacheViewAuthenticIndexQueue(q_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = virt_width*offy + offx; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t v; register ssize_t u; register const double *restrict k; register const PixelPacket *restrict k_pixels; register const IndexPacket *restrict k_indexes; DoublePixelPacket result, min, max; /* Copy input image to the output image for unused channels * This removes need for 'cloning' a new image every iteration */ *q = p[r]; if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+r)); /* Defaults */ min.red = min.green = min.blue = min.opacity = min.index = (double) QuantumRange; max.red = max.green = max.blue = max.opacity = max.index = 0.0; /* default result is the original pixel value */ result.red = (double) p[r].red; result.green = (double) p[r].green; result.blue = (double) p[r].blue; result.opacity = QuantumRange - (double) p[r].opacity; result.index = 0.0; if ( image->colorspace == CMYKColorspace) result.index = (double) GetPixelIndex(p_indexes+r); switch (method) { case ConvolveMorphology: /* Set the bias of the weighted average output */ result.red = result.green = result.blue = result.opacity = result.index = bias; break; case DilateIntensityMorphology: case ErodeIntensityMorphology: /* use a boolean flag indicating when first match found */ result.red = 0.0; /* result is not used otherwise */ break; default: break; } switch ( method ) { case ConvolveMorphology: /* Weighted Average of pixels using reflected kernel ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. ** ** Correlation is actually the same as this but without reflecting ** the kernel, and thus 'lower-level' that Convolution. However ** as Convolution is the more common method used, and it does not ** really cost us much in terms of processing to use a reflected ** kernel, so it is Convolution that is implemented. ** ** Correlation will have its kernel reflected before calling ** this function to do a Convolve. ** ** For more details of Correlation vs Convolution see ** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; if ( ((channel & SyncChannels) == 0 ) || (image->matte == MagickFalse) ) { /* No 'Sync' involved. ** Convolution is simple greyscale channel operation */ for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; result.red += (*k)*k_pixels[u].red; result.green += (*k)*k_pixels[u].green; result.blue += (*k)*k_pixels[u].blue; result.opacity += (*k)*k_pixels[u].opacity; if ( image->colorspace == CMYKColorspace) result.index += (*k)*GetPixelIndex(k_indexes+u); } k_pixels += virt_width; k_indexes += virt_width; } if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum((MagickRealType) result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum((MagickRealType) result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum((MagickRealType) result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelOpacity(q,ClampToQuantum((MagickRealType) result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); } else { /* Channel 'Sync' Flag, and Alpha Channel enabled. ** Weight the color channels with Alpha Channel so that ** transparent pixels are not part of the results. */ double alpha, /* alpha weighting for colors : alpha */ gamma; /* divisor, sum of color alpha weighting */ size_t count; /* alpha valus collected, number kernel values */ count=0; gamma=0.0; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; alpha=QuantumScale*(QuantumRange-k_pixels[u].opacity); gamma += alpha; /* normalize alpha weights only */ count++; /* number of alpha values collected */ alpha=alpha*(*k); /* include kernel weighting now */ result.red += alpha*k_pixels[u].red; result.green += alpha*k_pixels[u].green; result.blue += alpha*k_pixels[u].blue; result.opacity += (*k)*k_pixels[u].opacity; if ( image->colorspace == CMYKColorspace) result.index+=alpha*GetPixelIndex(k_indexes+u); } k_pixels += virt_width; k_indexes += virt_width; } /* Sync'ed channels, all channels are modified */ gamma=(double) count/(fabs((double) gamma) < MagickEpsilon ? MagickEpsilon : gamma); SetPixelRed(q,ClampToQuantum((MagickRealType) (gamma*result.red))); SetPixelGreen(q,ClampToQuantum((MagickRealType) (gamma*result.green))); SetPixelBlue(q,ClampToQuantum((MagickRealType) (gamma*result.blue))); SetPixelOpacity(q,ClampToQuantum(result.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(q_indexes+x,ClampToQuantum((MagickRealType) (gamma* result.index))); } break; case ErodeMorphology: /* Minimum Value within kernel neighbourhood ** ** NOTE that the kernel is not reflected for this operation! ** ** NOTE: in normal Greyscale Morphology, the kernel value should ** be added to the real value, this is currently not done, due to ** the nature of the boolean kernels being used. */ k = kernel->values; k_pixels = p; k_indexes = p_indexes; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; Minimize(min.red, (double) k_pixels[u].red); Minimize(min.green, (double) k_pixels[u].green); Minimize(min.blue, (double) k_pixels[u].blue); Minimize(min.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(min.index,(double) GetPixelIndex( k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case DilateMorphology: /* Maximum Value within kernel neighbourhood ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. ** ** NOTE: in normal Greyscale Morphology, the kernel value should ** be added to the real value, this is currently not done, due to ** the nature of the boolean kernels being used. ** */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; Maximize(max.red, (double) k_pixels[u].red); Maximize(max.green, (double) k_pixels[u].green); Maximize(max.blue, (double) k_pixels[u].blue); Maximize(max.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Maximize(max.index, (double) GetPixelIndex( k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case HitAndMissMorphology: case ThinningMorphology: case ThickenMorphology: /* Minimum of Foreground Pixel minus Maxumum of Background Pixels ** ** NOTE that the kernel is not reflected for this operation, ** and consists of both foreground and background pixel ** neighbourhoods, 0.0 for background, and 1.0 for foreground ** with either Nan or 0.5 values for don't care. ** ** Note that this will never produce a meaningless negative ** result. Such results can cause Thinning/Thicken to not work ** correctly when used against a greyscale image. */ k = kernel->values; k_pixels = p; k_indexes = p_indexes; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) ) continue; if ( (*k) > 0.7 ) { /* minimim of foreground pixels */ Minimize(min.red, (double) k_pixels[u].red); Minimize(min.green, (double) k_pixels[u].green); Minimize(min.blue, (double) k_pixels[u].blue); Minimize(min.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(min.index,(double) GetPixelIndex( k_indexes+u)); } else if ( (*k) < 0.3 ) { /* maximum of background pixels */ Maximize(max.red, (double) k_pixels[u].red); Maximize(max.green, (double) k_pixels[u].green); Maximize(max.blue, (double) k_pixels[u].blue); Maximize(max.opacity, QuantumRange-(double) k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Maximize(max.index, (double) GetPixelIndex( k_indexes+u)); } } k_pixels += virt_width; k_indexes += virt_width; } /* Pattern Match if difference is positive */ min.red -= max.red; Maximize( min.red, 0.0 ); min.green -= max.green; Maximize( min.green, 0.0 ); min.blue -= max.blue; Maximize( min.blue, 0.0 ); min.opacity -= max.opacity; Maximize( min.opacity, 0.0 ); min.index -= max.index; Maximize( min.index, 0.0 ); break; case ErodeIntensityMorphology: /* Select Pixel with Minimum Intensity within kernel neighbourhood ** ** WARNING: the intensity test fails for CMYK and does not ** take into account the moderating effect of the alpha channel ** on the intensity. ** ** NOTE that the kernel is not reflected for this operation! */ k = kernel->values; k_pixels = p; k_indexes = p_indexes; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k++) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; if ( result.red == 0.0 || GetPixelIntensity(image,&(k_pixels[u])) < GetPixelIntensity(result_image,q) ) { /* copy the whole pixel - no channel selection */ *q = k_pixels[u]; if ( result.red > 0.0 ) changes[id]++; result.red = 1.0; } } k_pixels += virt_width; k_indexes += virt_width; } break; case DilateIntensityMorphology: /* Select Pixel with Maximum Intensity within kernel neighbourhood ** ** WARNING: the intensity test fails for CMYK and does not ** take into account the moderating effect of the alpha channel ** on the intensity (yet). ** ** NOTE for correct working of this operation for asymetrical ** kernels, the kernel needs to be applied in its reflected form. ** That is its values needs to be reversed. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) || (*k) < 0.5 ) continue; /* boolean kernel */ if ( result.red == 0.0 || GetPixelIntensity(image,&(k_pixels[u])) > GetPixelIntensity(result_image,q) ) { /* copy the whole pixel - no channel selection */ *q = k_pixels[u]; if ( result.red > 0.0 ) changes[id]++; result.red = 1.0; } } k_pixels += virt_width; k_indexes += virt_width; } break; case IterativeDistanceMorphology: /* Work out an iterative distance from black edge of a white image ** shape. Essentually white values are decreased to the smallest ** 'distance from edge' it can find. ** ** It works by adding kernel values to the neighbourhood, and and ** select the minimum value found. The kernel is rotated before ** use, so kernel distances match resulting distances, when a user ** provided asymmetric kernel is applied. ** ** ** This code is almost identical to True GrayScale Morphology But ** not quite. ** ** GreyDilate Kernel values added, maximum value found Kernel is ** rotated before use. ** ** GrayErode: Kernel values subtracted and minimum value found No ** kernel rotation used. ** ** Note the the Iterative Distance method is essentially a ** GrayErode, but with negative kernel values, and kernel ** rotation applied. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=0; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index,(*k)+GetPixelIndex( k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } break; case UndefinedMorphology: default: break; /* Do nothing */ } /* Final mathematics of results (combine with original image?) ** ** NOTE: Difference Morphology operators Edge* and *Hat could also ** be done here but works better with iteration as a image difference ** in the controlling function (below). Thicken and Thinning however ** should be done here so thay can be iterated correctly. */ switch ( method ) { case HitAndMissMorphology: case ErodeMorphology: result = min; /* minimum of neighbourhood */ break; case DilateMorphology: result = max; /* maximum of neighbourhood */ break; case ThinningMorphology: /* subtract pattern match from original */ result.red -= min.red; result.green -= min.green; result.blue -= min.blue; result.opacity -= min.opacity; result.index -= min.index; break; case ThickenMorphology: /* Add the pattern matchs to the original */ result.red += min.red; result.green += min.green; result.blue += min.blue; result.opacity += min.opacity; result.index += min.index; break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case UndefinedMorphology: case ConvolveMorphology: case DilateIntensityMorphology: case ErodeIntensityMorphology: break; /* full pixel was directly assigned - not a channel method */ default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if ((channel & OpacityChannel) != 0 && image->matte == MagickTrue ) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( p[r].opacity != GetPixelOpacity(q) ) || ( image->colorspace == CMYKColorspace && GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) ) changes[id]++; p++; q++; } /* x */ if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_MorphologyPrimitive) #endif proceed=SetImageProgress(image,MorphologyTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* y */ q_view=DestroyCacheView(q_view); p_view=DestroyCacheView(p_view); for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++) changed+=changes[i]; changes=(size_t *) RelinquishMagickMemory(changes); return(status ? (ssize_t)changed : -1); } /* This is almost identical to the MorphologyPrimative() function above, ** but will apply the primitive directly to the actual image using two ** passes, once in each direction, with the results of the previous (and ** current) row being re-used. ** ** That is after each row is 'Sync'ed' into the image, the next row will ** make use of those values as part of the calculation of the next row. ** It then repeats, but going in the oppisite (bottom-up) direction. ** ** Because of this 're-use of results' this function can not make use ** of multi-threaded, parellel processing. */ static ssize_t MorphologyPrimitiveDirect(Image *image, const MorphologyMethod method, const ChannelType channel, const KernelInfo *kernel,ExceptionInfo *exception) { CacheView *auth_view, *virt_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y, offx, offy; size_t changed, virt_width; status=MagickTrue; changed=0; progress=0; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); /* Some methods (including convolve) needs use a reflected kernel. * Adjust 'origin' offsets to loop though kernel as a reflection. */ offx = kernel->x; offy = kernel->y; switch(method) { case DistanceMorphology: case VoronoiMorphology: /* kernel needs to used with reflection about origin */ offx = (ssize_t) kernel->width-offx-1; offy = (ssize_t) kernel->height-offy-1; break; #if 0 case ?????Morphology: /* kernel is used as is, without reflection */ break; #endif default: assert("Not a PrimativeDirect Morphology Method" != (char *) NULL); break; } /* DO NOT THREAD THIS CODE! */ /* two views into same image (virtual, and actual) */ virt_view=AcquireVirtualCacheView(image,exception); auth_view=AcquireAuthenticCacheView(image,exception); virt_width=image->columns+kernel->width-1; for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *restrict p; register const IndexPacket *restrict p_indexes; register PixelPacket *restrict q; register IndexPacket *restrict q_indexes; register ssize_t x; ssize_t r; /* NOTE read virtual pixels, and authentic pixels, from the same image! ** we read using virtual to get virtual pixel handling, but write back ** into the same image. ** ** Only top half of kernel is processed as we do a single pass downward ** through the image iterating the distance function as we go. */ if (status == MagickFalse) break; p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1, exception); q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) status=MagickFalse; if (status == MagickFalse) break; p_indexes=GetCacheViewVirtualIndexQueue(virt_view); q_indexes=GetCacheViewAuthenticIndexQueue(auth_view); /* offset to origin in 'p'. while 'q' points to it directly */ r = (ssize_t) virt_width*offy + offx; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t v; register ssize_t u; register const double *restrict k; register const PixelPacket *restrict k_pixels; register const IndexPacket *restrict k_indexes; MagickPixelPacket result; /* Starting Defaults */ GetMagickPixelPacket(image,&result); SetMagickPixelPacket(image,q,q_indexes,&result); if ( method != VoronoiMorphology ) result.opacity = QuantumRange - result.opacity; switch ( method ) { case DistanceMorphology: /* Add kernel Value and select the minimum value found. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=0; v <= (ssize_t) offy; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=0; u < (ssize_t) offx; u++, k--) { if ( x+u-offx < 0 ) continue; /* off the edge! */ if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } break; case VoronoiMorphology: /* Apply Distance to 'Matte' channel, while coping the color ** values of the closest pixel. ** ** This is experimental, and realy the 'alpha' component should ** be completely separate 'masking' channel so that alpha can ** also be used as part of the results. */ k = &kernel->values[ kernel->width*kernel->height-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=0; v <= (ssize_t) offy; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=0; u < (ssize_t) offx; u++, k--) { if ( x+u-offx < 0 ) continue; /* off the edge! */ if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case VoronoiMorphology: SetPixelPacket(image,&result,q,q_indexes); break; default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( p[r].opacity != GetPixelOpacity(q) ) || ( image->colorspace == CMYKColorspace && GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) ) changed++; /* The pixel was changed in some way! */ p++; /* increment pixel buffers */ q++; } /* x */ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) if ( SetImageProgress(image,MorphologyTag,progress++,image->rows) == MagickFalse ) status=MagickFalse; } /* y */ /* Do the reversed pass through the image */ for (y=(ssize_t)image->rows-1; y >= 0; y--) { register const PixelPacket *restrict p; register const IndexPacket *restrict p_indexes; register PixelPacket *restrict q; register IndexPacket *restrict q_indexes; register ssize_t x; ssize_t r; if (status == MagickFalse) break; /* NOTE read virtual pixels, and authentic pixels, from the same image! ** we read using virtual to get virtual pixel handling, but write back ** into the same image. ** ** Only the bottom half of the kernel will be processes as we ** up the image. */ p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1, exception); q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) status=MagickFalse; if (status == MagickFalse) break; p_indexes=GetCacheViewVirtualIndexQueue(virt_view); q_indexes=GetCacheViewAuthenticIndexQueue(auth_view); /* adjust positions to end of row */ p += image->columns-1; q += image->columns-1; /* offset to origin in 'p'. while 'q' points to it directly */ r = offx; for (x=(ssize_t)image->columns-1; x >= 0; x--) { ssize_t v; register ssize_t u; register const double *restrict k; register const PixelPacket *restrict k_pixels; register const IndexPacket *restrict k_indexes; MagickPixelPacket result; /* Default - previously modified pixel */ GetMagickPixelPacket(image,&result); SetMagickPixelPacket(image,q,q_indexes,&result); if ( method != VoronoiMorphology ) result.opacity = QuantumRange - result.opacity; switch ( method ) { case DistanceMorphology: /* Add kernel Value and select the minimum value found. */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=offy; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u)); } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) { if ( (x+u-offx) >= (ssize_t)image->columns ) continue; if ( IsNaN(*k) ) continue; Minimize(result.red, (*k)+k_pixels[u].red); Minimize(result.green, (*k)+k_pixels[u].green); Minimize(result.blue, (*k)+k_pixels[u].blue); Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity); if ( image->colorspace == CMYKColorspace) Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u)); } break; case VoronoiMorphology: /* Apply Distance to 'Matte' channel, coping the closest color. ** ** This is experimental, and realy the 'alpha' component should ** be completely separate 'masking' channel. */ k = &kernel->values[ kernel->width*(kernel->y+1)-1 ]; k_pixels = p; k_indexes = p_indexes; for (v=offy; v < (ssize_t) kernel->height; v++) { for (u=0; u < (ssize_t) kernel->width; u++, k--) { if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } k_pixels += virt_width; k_indexes += virt_width; } /* repeat with the just processed pixels of this row */ k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ]; k_pixels = q-offx; k_indexes = q_indexes-offx; for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) { if ( (x+u-offx) >= (ssize_t)image->columns ) continue; if ( IsNaN(*k) ) continue; if( result.opacity > (*k)+k_pixels[u].opacity ) { SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u], &result); result.opacity += *k; } } break; default: /* result directly calculated or assigned */ break; } /* Assign the resulting pixel values - Clamping Result */ switch ( method ) { case VoronoiMorphology: SetPixelPacket(image,&result,q,q_indexes); break; default: if ((channel & RedChannel) != 0) SetPixelRed(q,ClampToQuantum(result.red)); if ((channel & GreenChannel) != 0) SetPixelGreen(q,ClampToQuantum(result.green)); if ((channel & BlueChannel) != 0) SetPixelBlue(q,ClampToQuantum(result.blue)); if (((channel & OpacityChannel) != 0) && (image->matte == MagickTrue)) SetPixelAlpha(q,ClampToQuantum(result.opacity)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetPixelIndex(q_indexes+x,ClampToQuantum(result.index)); break; } /* Count up changed pixels */ if ( ( p[r].red != GetPixelRed(q) ) || ( p[r].green != GetPixelGreen(q) ) || ( p[r].blue != GetPixelBlue(q) ) || ( p[r].opacity != GetPixelOpacity(q) ) || ( image->colorspace == CMYKColorspace && GetPixelIndex(p_indexes+r) != GetPixelIndex(q_indexes+x) ) ) changed++; /* The pixel was changed in some way! */ p--; /* go backward through pixel buffers */ q--; } /* x */ if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) if ( SetImageProgress(image,MorphologyTag,progress++,image->rows) == MagickFalse ) status=MagickFalse; } /* y */ auth_view=DestroyCacheView(auth_view); virt_view=DestroyCacheView(virt_view); return(status ? (ssize_t) changed : -1); } /* Apply a Morphology by calling one of the above low level primitive ** application functions. This function handles any iteration loops, ** composition or re-iteration of results, and compound morphology methods ** that is based on multiple low-level (staged) morphology methods. ** ** Basically this provides the complex grue between the requested morphology ** method and raw low-level implementation (above). */ MagickExport Image *MorphologyApply(const Image *image, const ChannelType channel,const MorphologyMethod method, const ssize_t iterations, const KernelInfo *kernel, const CompositeOperator compose, const double bias, ExceptionInfo *exception) { CompositeOperator curr_compose; Image *curr_image, /* Image we are working with or iterating */ *work_image, /* secondary image for primitive iteration */ *save_image, /* saved image - for 'edge' method only */ *rslt_image; /* resultant image - after multi-kernel handling */ KernelInfo *reflected_kernel, /* A reflected copy of the kernel (if needed) */ *norm_kernel, /* the current normal un-reflected kernel */ *rflt_kernel, /* the current reflected kernel (if needed) */ *this_kernel; /* the kernel being applied */ MorphologyMethod primitive; /* the current morphology primitive being applied */ CompositeOperator rslt_compose; /* multi-kernel compose method for results to use */ MagickBooleanType special, /* do we use a direct modify function? */ verbose; /* verbose output of results */ size_t method_loop, /* Loop 1: number of compound method iterations (norm 1) */ method_limit, /* maximum number of compound method iterations */ kernel_number, /* Loop 2: the kernel number being applied */ stage_loop, /* Loop 3: primitive loop for compound morphology */ stage_limit, /* how many primitives are in this compound */ kernel_loop, /* Loop 4: iterate the kernel over image */ kernel_limit, /* number of times to iterate kernel */ count, /* total count of primitive steps applied */ kernel_changed, /* total count of changed using iterated kernel */ method_changed; /* total count of changed over method iteration */ ssize_t changed; /* number pixels changed by last primitive operation */ char v_info[80]; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); assert(kernel != (KernelInfo *) NULL); assert(kernel->signature == MagickSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); count = 0; /* number of low-level morphology primitives performed */ if ( iterations == 0 ) return((Image *)NULL); /* null operation - nothing to do! */ kernel_limit = (size_t) iterations; if ( iterations < 0 ) /* negative interations = infinite (well alomst) */ kernel_limit = image->columns>image->rows ? image->columns : image->rows; verbose = IsMagickTrue(GetImageArtifact(image,"verbose")); /* initialise for cleanup */ curr_image = (Image *) image; curr_compose = image->compose; (void) curr_compose; work_image = save_image = rslt_image = (Image *) NULL; reflected_kernel = (KernelInfo *) NULL; /* Initialize specific methods * + which loop should use the given iteratations * + how many primitives make up the compound morphology * + multi-kernel compose method to use (by default) */ method_limit = 1; /* just do method once, unless otherwise set */ stage_limit = 1; /* assume method is not a compound */ special = MagickFalse; /* assume it is NOT a direct modify primitive */ rslt_compose = compose; /* and we are composing multi-kernels as given */ switch( method ) { case SmoothMorphology: /* 4 primitive compound morphology */ stage_limit = 4; break; case OpenMorphology: /* 2 primitive compound morphology */ case OpenIntensityMorphology: case TopHatMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case EdgeMorphology: stage_limit = 2; break; case HitAndMissMorphology: rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */ /* FALL THUR */ case ThinningMorphology: case ThickenMorphology: method_limit = kernel_limit; /* iterate the whole method */ kernel_limit = 1; /* do not do kernel iteration */ break; case DistanceMorphology: case VoronoiMorphology: special = MagickTrue; /* use special direct primative */ break; default: break; } /* Apply special methods with special requirments ** For example, single run only, or post-processing requirements */ if ( special == MagickTrue ) { rslt_image=CloneImage(image,0,0,MagickTrue,exception); if (rslt_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse) { InheritException(exception,&rslt_image->exception); goto error_cleanup; } changed = MorphologyPrimitiveDirect(rslt_image, method, channel, kernel, exception); if ( verbose == MagickTrue ) (void) (void) FormatLocaleFile(stderr, "%s:%.20g.%.20g #%.20g => Changed %.20g\n", CommandOptionToMnemonic(MagickMorphologyOptions, method), 1.0,0.0,1.0, (double) changed); if ( changed < 0 ) goto error_cleanup; if ( method == VoronoiMorphology ) { /* Preserve the alpha channel of input image - but turned off */ (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel); (void) CompositeImageChannel(rslt_image, DefaultChannels, CopyOpacityCompositeOp, image, 0, 0); (void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel); } goto exit_cleanup; } /* Handle user (caller) specified multi-kernel composition method */ if ( compose != UndefinedCompositeOp ) rslt_compose = compose; /* override default composition for method */ if ( rslt_compose == UndefinedCompositeOp ) rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */ /* Some methods require a reflected kernel to use with primitives. * Create the reflected kernel for those methods. */ switch ( method ) { case CorrelateMorphology: case CloseMorphology: case CloseIntensityMorphology: case BottomHatMorphology: case SmoothMorphology: reflected_kernel = CloneKernelInfo(kernel); if (reflected_kernel == (KernelInfo *) NULL) goto error_cleanup; RotateKernelInfo(reflected_kernel,180); break; default: break; } /* Loops around more primitive morpholgy methods ** erose, dilate, open, close, smooth, edge, etc... */ /* Loop 1: iterate the compound method */ method_loop = 0; method_changed = 1; while ( method_loop < method_limit && method_changed > 0 ) { method_loop++; method_changed = 0; /* Loop 2: iterate over each kernel in a multi-kernel list */ norm_kernel = (KernelInfo *) kernel; this_kernel = (KernelInfo *) kernel; rflt_kernel = reflected_kernel; kernel_number = 0; while ( norm_kernel != NULL ) { /* Loop 3: Compound Morphology Staging - Select Primative to apply */ stage_loop = 0; /* the compound morphology stage number */ while ( stage_loop < stage_limit ) { stage_loop++; /* The stage of the compound morphology */ /* Select primitive morphology for this stage of compound method */ this_kernel = norm_kernel; /* default use unreflected kernel */ primitive = method; /* Assume method is a primitive */ switch( method ) { case ErodeMorphology: /* just erode */ case EdgeInMorphology: /* erode and image difference */ primitive = ErodeMorphology; break; case DilateMorphology: /* just dilate */ case EdgeOutMorphology: /* dilate and image difference */ primitive = DilateMorphology; break; case OpenMorphology: /* erode then dialate */ case TopHatMorphology: /* open and image difference */ primitive = ErodeMorphology; if ( stage_loop == 2 ) primitive = DilateMorphology; break; case OpenIntensityMorphology: primitive = ErodeIntensityMorphology; if ( stage_loop == 2 ) primitive = DilateIntensityMorphology; break; case CloseMorphology: /* dilate, then erode */ case BottomHatMorphology: /* close and image difference */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; if ( stage_loop == 2 ) primitive = ErodeMorphology; break; case CloseIntensityMorphology: this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateIntensityMorphology; if ( stage_loop == 2 ) primitive = ErodeIntensityMorphology; break; case SmoothMorphology: /* open, close */ switch ( stage_loop ) { case 1: /* start an open method, which starts with Erode */ primitive = ErodeMorphology; break; case 2: /* now Dilate the Erode */ primitive = DilateMorphology; break; case 3: /* Reflect kernel a close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = DilateMorphology; break; case 4: /* Finish the Close */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ErodeMorphology; break; } break; case EdgeMorphology: /* dilate and erode difference */ primitive = DilateMorphology; if ( stage_loop == 2 ) { save_image = curr_image; /* save the image difference */ curr_image = (Image *) image; primitive = ErodeMorphology; } break; case CorrelateMorphology: /* A Correlation is a Convolution with a reflected kernel. ** However a Convolution is a weighted sum using a reflected ** kernel. It may seem stange to convert a Correlation into a ** Convolution as the Correlation is the simplier method, but ** Convolution is much more commonly used, and it makes sense to ** implement it directly so as to avoid the need to duplicate the ** kernel when it is not required (which is typically the ** default). */ this_kernel = rflt_kernel; /* use the reflected kernel */ primitive = ConvolveMorphology; break; default: break; } assert( this_kernel != (KernelInfo *) NULL ); /* Extra information for debugging compound operations */ if ( verbose == MagickTrue ) { if ( stage_limit > 1 ) (void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions,method),(double) method_loop,(double) stage_loop); else if ( primitive != method ) (void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ", CommandOptionToMnemonic(MagickMorphologyOptions, method),(double) method_loop); else v_info[0] = '\0'; } /* Loop 4: Iterate the kernel with primitive */ kernel_loop = 0; kernel_changed = 0; changed = 1; while ( kernel_loop < kernel_limit && changed > 0 ) { kernel_loop++; /* the iteration of this kernel */ /* Create a clone as the destination image, if not yet defined */ if ( work_image == (Image *) NULL ) { work_image=CloneImage(image,0,0,MagickTrue,exception); if (work_image == (Image *) NULL) goto error_cleanup; if (SetImageStorageClass(work_image,DirectClass) == MagickFalse) { InheritException(exception,&work_image->exception); goto error_cleanup; } /* work_image->type=image->type; ??? */ } /* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */ count++; changed = MorphologyPrimitive(curr_image, work_image, primitive, channel, this_kernel, bias, exception); if ( verbose == MagickTrue ) { if ( kernel_loop > 1 ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */ (void) (void) FormatLocaleFile(stderr, "%s%s%s:%.20g.%.20g #%.20g => Changed %.20g", v_info,CommandOptionToMnemonic(MagickMorphologyOptions, primitive),(this_kernel == rflt_kernel ) ? "*" : "", (double) (method_loop+kernel_loop-1),(double) kernel_number, (double) count,(double) changed); } if ( changed < 0 ) goto error_cleanup; kernel_changed += changed; method_changed += changed; /* prepare next loop */ { Image *tmp = work_image; /* swap images for iteration */ work_image = curr_image; curr_image = tmp; } if ( work_image == image ) work_image = (Image *) NULL; /* replace input 'image' */ } /* End Loop 4: Iterate the kernel with primitive */ if ( verbose == MagickTrue && kernel_changed != (size_t)changed ) (void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed); if ( verbose == MagickTrue && stage_loop < stage_limit ) (void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */ #if 0 (void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image); (void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image); (void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image); (void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image); (void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image); #endif } /* End Loop 3: Primative (staging) Loop for Coumpound Methods */ /* Final Post-processing for some Compound Methods ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** Turn off SVG composition 'alpha blending'. */ switch( method ) { case EdgeOutMorphology: case EdgeInMorphology: case TopHatMorphology: case BottomHatMorphology: if ( verbose == MagickTrue ) (void) FormatLocaleFile(stderr, "\n%s: Difference with original image", CommandOptionToMnemonic(MagickMorphologyOptions, method) ); (void) CompositeImageChannel(curr_image, (ChannelType) (channel & ~SyncChannels), DifferenceCompositeOp, image, 0, 0); break; case EdgeMorphology: if ( verbose == MagickTrue ) (void) FormatLocaleFile(stderr, "\n%s: Difference of Dilate and Erode", CommandOptionToMnemonic(MagickMorphologyOptions, method) ); (void) CompositeImageChannel(curr_image, (ChannelType) (channel & ~SyncChannels), DifferenceCompositeOp, save_image, 0, 0); save_image = DestroyImage(save_image); /* finished with save image */ break; default: break; } /* multi-kernel handling: re-iterate, or compose results */ if ( kernel->next == (KernelInfo *) NULL ) rslt_image = curr_image; /* just return the resulting image */ else if ( rslt_compose == NoCompositeOp ) { if ( verbose == MagickTrue ) { if ( this_kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " (re-iterate)"); else (void) FormatLocaleFile(stderr, " (done)"); } rslt_image = curr_image; /* return result, and re-iterate */ } else if ( rslt_image == (Image *) NULL) { if ( verbose == MagickTrue ) (void) FormatLocaleFile(stderr, " (save for compose)"); rslt_image = curr_image; curr_image = (Image *) image; /* continue with original image */ } else { /* Add the new 'current' result to the composition ** ** The removal of any 'Sync' channel flag in the Image Compositon ** below ensures the methematical compose method is applied in a ** purely mathematical way, and only to the selected channels. ** IE: Turn off SVG composition 'alpha blending'. */ if ( verbose == MagickTrue ) (void) FormatLocaleFile(stderr, " (compose \"%s\")", CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) ); (void) CompositeImageChannel(rslt_image, (ChannelType) (channel & ~SyncChannels), rslt_compose, curr_image, 0, 0); curr_image = DestroyImage(curr_image); curr_image = (Image *) image; /* continue with original image */ } if ( verbose == MagickTrue ) (void) FormatLocaleFile(stderr, "\n"); /* loop to the next kernel in a multi-kernel list */ norm_kernel = norm_kernel->next; if ( rflt_kernel != (KernelInfo *) NULL ) rflt_kernel = rflt_kernel->next; kernel_number++; } /* End Loop 2: Loop over each kernel */ } /* End Loop 1: compound method interation */ goto exit_cleanup; /* Yes goto's are bad, but it makes cleanup lot more efficient */ error_cleanup: if ( curr_image == rslt_image ) curr_image = (Image *) NULL; if ( rslt_image != (Image *) NULL ) rslt_image = DestroyImage(rslt_image); exit_cleanup: if ( curr_image == rslt_image || curr_image == image ) curr_image = (Image *) NULL; if ( curr_image != (Image *) NULL ) curr_image = DestroyImage(curr_image); if ( work_image != (Image *) NULL ) work_image = DestroyImage(work_image); if ( save_image != (Image *) NULL ) save_image = DestroyImage(save_image); if ( reflected_kernel != (KernelInfo *) NULL ) reflected_kernel = DestroyKernelInfo(reflected_kernel); return(rslt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o r p h o l o g y I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MorphologyImageChannel() applies a user supplied kernel to the image % according to the given mophology method. % % This function applies any and all user defined settings before calling % the above internal function MorphologyApply(). % % User defined settings include... % * Output Bias for Convolution and correlation ("-bias") % * Kernel Scale/normalize settings ("-set 'option:convolve:scale'") % This can also includes the addition of a scaled unity kernel. % * Show Kernel being applied ("-set option:showkernel 1") % % The format of the MorphologyImage method is: % % Image *MorphologyImage(const Image *image,MorphologyMethod method, % const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception) % % Image *MorphologyImageChannel(const Image *image, const ChannelType % channel,MorphologyMethod method,const ssize_t iterations, % KernelInfo *kernel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the morphology method to be applied. % % o iterations: apply the operation this many times (or no change). % A value of -1 means loop until no change found. % How this is applied may depend on the morphology method. % Typically this is a value of 1. % % o channel: the channel type. % % o kernel: An array of double representing the morphology kernel. % Warning: kernel may be normalized for the Convolve method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MorphologyImageChannel(const Image *image, const ChannelType channel,const MorphologyMethod method, const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception) { KernelInfo *curr_kernel; CompositeOperator compose; Image *morphology_image; /* Apply Convolve/Correlate Normalization and Scaling Factors. * This is done BEFORE the ShowKernelInfo() function is called so that * users can see the results of the 'option:convolve:scale' option. */ curr_kernel = (KernelInfo *) kernel; if ( method == ConvolveMorphology || method == CorrelateMorphology ) { const char *artifact; artifact = GetImageArtifact(image,"convolve:scale"); if ( artifact != (const char *)NULL ) { if ( curr_kernel == kernel ) curr_kernel = CloneKernelInfo(kernel); if (curr_kernel == (KernelInfo *) NULL) { curr_kernel=DestroyKernelInfo(curr_kernel); return((Image *) NULL); } ScaleGeometryKernelInfo(curr_kernel, artifact); } } /* display the (normalized) kernel via stderr */ if ( IsMagickTrue(GetImageArtifact(image,"showkernel")) || IsMagickTrue(GetImageArtifact(image,"convolve:showkernel")) || IsMagickTrue(GetImageArtifact(image,"morphology:showkernel")) ) ShowKernelInfo(curr_kernel); /* Override the default handling of multi-kernel morphology results * If 'Undefined' use the default method * If 'None' (default for 'Convolve') re-iterate previous result * Otherwise merge resulting images using compose method given. * Default for 'HitAndMiss' is 'Lighten'. */ { const char *artifact; compose = UndefinedCompositeOp; /* use default for method */ artifact = GetImageArtifact(image,"morphology:compose"); if ( artifact != (const char *) NULL) compose = (CompositeOperator) ParseCommandOption( MagickComposeOptions,MagickFalse,artifact); } /* Apply the Morphology */ morphology_image = MorphologyApply(image, channel, method, iterations, curr_kernel, compose, image->bias, exception); /* Cleanup and Exit */ if ( curr_kernel != kernel ) curr_kernel=DestroyKernelInfo(curr_kernel); return(morphology_image); } MagickExport Image *MorphologyImage(const Image *image, const MorphologyMethod method, const ssize_t iterations,const KernelInfo *kernel, ExceptionInfo *exception) { Image *morphology_image; morphology_image=MorphologyImageChannel(image,DefaultChannels,method, iterations,kernel,exception); return(morphology_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R o t a t e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateKernelInfo() rotates the kernel by the angle given. % % Currently it is restricted to 90 degree angles, of either 1D kernels % or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels. % It will ignore usless rotations for specific 'named' built-in kernels. % % The format of the RotateKernelInfo method is: % % void RotateKernelInfo(KernelInfo *kernel, double angle) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o angle: angle to rotate in degrees % % This function is currently internal to this module only, but can be exported % to other modules if needed. */ static void RotateKernelInfo(KernelInfo *kernel, double angle) { /* angle the lower kernels first */ if ( kernel->next != (KernelInfo *) NULL) RotateKernelInfo(kernel->next, angle); /* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical ** ** TODO: expand beyond simple 90 degree rotates, flips and flops */ /* Modulus the angle */ angle = fmod(angle, 360.0); if ( angle < 0 ) angle += 360.0; if ( 337.5 < angle || angle <= 22.5 ) return; /* Near zero angle - no change! - At least not at this time */ /* Handle special cases */ switch (kernel->type) { /* These built-in kernels are cylindrical kernels, rotating is useless */ case GaussianKernel: case DoGKernel: case LoGKernel: case DiskKernel: case PeaksKernel: case LaplacianKernel: case ChebyshevKernel: case ManhattanKernel: case EuclideanKernel: return; /* These may be rotatable at non-90 angles in the future */ /* but simply rotating them in multiples of 90 degrees is useless */ case SquareKernel: case DiamondKernel: case PlusKernel: case CrossKernel: return; /* These only allows a +/-90 degree rotation (by transpose) */ /* A 180 degree rotation is useless */ case BlurKernel: if ( 135.0 < angle && angle <= 225.0 ) return; if ( 225.0 < angle && angle <= 315.0 ) angle -= 180; break; default: break; } /* Attempt rotations by 45 degrees -- 3x3 kernels only */ if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 ) { if ( kernel->width == 3 && kernel->height == 3 ) { /* Rotate a 3x3 square by 45 degree angle */ double t = kernel->values[0]; kernel->values[0] = kernel->values[3]; kernel->values[3] = kernel->values[6]; kernel->values[6] = kernel->values[7]; kernel->values[7] = kernel->values[8]; kernel->values[8] = kernel->values[5]; kernel->values[5] = kernel->values[2]; kernel->values[2] = kernel->values[1]; kernel->values[1] = t; /* rotate non-centered origin */ if ( kernel->x != 1 || kernel->y != 1 ) { ssize_t x,y; x = (ssize_t) kernel->x-1; y = (ssize_t) kernel->y-1; if ( x == y ) x = 0; else if ( x == 0 ) x = -y; else if ( x == -y ) y = 0; else if ( y == 0 ) y = x; kernel->x = (ssize_t) x+1; kernel->y = (ssize_t) y+1; } angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */ kernel->angle = fmod(kernel->angle+45.0, 360.0); } else perror("Unable to rotate non-3x3 kernel by 45 degrees"); } if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 ) { if ( kernel->width == 1 || kernel->height == 1 ) { /* Do a transpose of a 1 dimensional kernel, ** which results in a fast 90 degree rotation of some type. */ ssize_t t; t = (ssize_t) kernel->width; kernel->width = kernel->height; kernel->height = (size_t) t; t = kernel->x; kernel->x = kernel->y; kernel->y = t; if ( kernel->width == 1 ) { angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else { angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */ kernel->angle = fmod(kernel->angle+270.0, 360.0); } } else if ( kernel->width == kernel->height ) { /* Rotate a square array of values by 90 degrees */ { register size_t i,j,x,y; register double *k,t; k=kernel->values; for( i=0, x=kernel->width-1; i<=x; i++, x--) for( j=0, y=kernel->height-1; j<y; j++, y--) { t = k[i+j*kernel->width]; k[i+j*kernel->width] = k[j+x*kernel->width]; k[j+x*kernel->width] = k[x+y*kernel->width]; k[x+y*kernel->width] = k[y+i*kernel->width]; k[y+i*kernel->width] = t; } } /* rotate the origin - relative to center of array */ { register ssize_t x,y; x = (ssize_t) (kernel->x*2-kernel->width+1); y = (ssize_t) (kernel->y*2-kernel->height+1); kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2; kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2; } angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */ kernel->angle = fmod(kernel->angle+90.0, 360.0); } else perror("Unable to rotate a non-square, non-linear kernel 90 degrees"); } if ( 135.0 < angle && angle <= 225.0 ) { /* For a 180 degree rotation - also know as a reflection * This is actually a very very common operation! * Basically all that is needed is a reversal of the kernel data! * And a reflection of the origon */ double t; register double *k; size_t i, j; k=kernel->values; for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--) t=k[i], k[i]=k[j], k[j]=t; kernel->x = (ssize_t) kernel->width - kernel->x - 1; kernel->y = (ssize_t) kernel->height - kernel->y - 1; angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */ kernel->angle = fmod(kernel->angle+180.0, 360.0); } /* At this point angle should at least between -45 (315) and +45 degrees * In the future some form of non-orthogonal angled rotates could be * performed here, posibily with a linear kernel restriction. */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e G e o m e t r y K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleGeometryKernelInfo() takes a geometry argument string, typically % provided as a "-set option:convolve:scale {geometry}" user setting, % and modifies the kernel according to the parsed arguments of that setting. % % The first argument (and any normalization flags) are passed to % ScaleKernelInfo() to scale/normalize the kernel. The second argument % is then passed to UnityAddKernelInfo() to add a scled unity kernel % into the scaled/normalized kernel. % % The format of the ScaleGeometryKernelInfo method is: % % void ScaleGeometryKernelInfo(KernelInfo *kernel, % const double scaling_factor,const MagickStatusType normalize_flags) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel to modify % % o geometry: % The geometry string to parse, typically from the user provided % "-set option:convolve:scale {geometry}" setting. % */ MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel, const char *geometry) { GeometryFlags flags; GeometryInfo args; SetGeometryInfo(&args); flags = (GeometryFlags) ParseGeometry(geometry, &args); #if 0 /* For Debugging Geometry Input */ (void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", flags, args.rho, args.sigma, args.xi, args.psi ); #endif if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/ args.rho *= 0.01, args.sigma *= 0.01; if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */ args.rho = 1.0; if ( (flags & SigmaValue) == 0 ) args.sigma = 0.0; /* Scale/Normalize the input kernel */ ScaleKernelInfo(kernel, args.rho, flags); /* Add Unity Kernel, for blending with original */ if ( (flags & SigmaValue) != 0 ) UnityAddKernelInfo(kernel, args.sigma); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleKernelInfo() scales the given kernel list by the given amount, with or % without normalization of the sum of the kernel values (as per given flags). % % By default (no flags given) the values within the kernel is scaled % directly using given scaling factor without change. % % If either of the two 'normalize_flags' are given the kernel will first be % normalized and then further scaled by the scaling factor value given. % % Kernel normalization ('normalize_flags' given) is designed to ensure that % any use of the kernel scaling factor with 'Convolve' or 'Correlate' % morphology methods will fall into -1.0 to +1.0 range. Note that for % non-HDRI versions of IM this may cause images to have any negative results % clipped, unless some 'bias' is used. % % More specifically. Kernels which only contain positive values (such as a % 'Gaussian' kernel) will be scaled so that those values sum to +1.0, % ensuring a 0.0 to +1.0 output range for non-HDRI images. % % For Kernels that contain some negative values, (such as 'Sharpen' kernels) % the kernel will be scaled by the absolute of the sum of kernel values, so % that it will generally fall within the +/- 1.0 range. % % For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel % will be scaled by just the sum of the postive values, so that its output % range will again fall into the +/- 1.0 range. % % For special kernels designed for locating shapes using 'Correlate', (often % only containing +1 and -1 values, representing foreground/brackground % matching) a special normalization method is provided to scale the positive % values separately to those of the negative values, so the kernel will be % forced to become a zero-sum kernel better suited to such searches. % % WARNING: Correct normalization of the kernel assumes that the '*_range' % attributes within the kernel structure have been correctly set during the % kernels creation. % % NOTE: The values used for 'normalize_flags' have been selected specifically % to match the use of geometry options, so that '!' means NormalizeValue, '^' % means CorrelateNormalizeValue. All other GeometryFlags values are ignored. % % The format of the ScaleKernelInfo method is: % % void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor, % const MagickStatusType normalize_flags ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scaling_factor: % multiply all values (after normalization) by this factor if not % zero. If the kernel is normalized regardless of any flags. % % o normalize_flags: % GeometryFlags defining normalization method to use. % specifically: NormalizeValue, CorrelateNormalizeValue, % and/or PercentValue % */ MagickExport void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,const GeometryFlags normalize_flags) { register ssize_t i; register double pos_scale, neg_scale; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags); /* Normalization of Kernel */ pos_scale = 1.0; if ( (normalize_flags&NormalizeValue) != 0 ) { if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon ) /* non-zero-summing kernel (generally positive) */ pos_scale = fabs(kernel->positive_range + kernel->negative_range); else /* zero-summing kernel */ pos_scale = kernel->positive_range; } /* Force kernel into a normalized zero-summing kernel */ if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) { pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon ) ? kernel->positive_range : 1.0; neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon ) ? -kernel->negative_range : 1.0; } else neg_scale = pos_scale; /* finialize scaling_factor for positive and negative components */ pos_scale = scaling_factor/pos_scale; neg_scale = scaling_factor/neg_scale; for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++) if ( ! IsNaN(kernel->values[i]) ) kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale; /* convolution output range */ kernel->positive_range *= pos_scale; kernel->negative_range *= neg_scale; /* maximum and minimum values in kernel */ kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale; kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale; /* swap kernel settings if user's scaling factor is negative */ if ( scaling_factor < MagickEpsilon ) { double t; t = kernel->positive_range; kernel->positive_range = kernel->negative_range; kernel->negative_range = t; t = kernel->maximum; kernel->maximum = kernel->minimum; kernel->minimum = 1; } return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h o w K e r n e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShowKernelInfo() outputs the details of the given kernel defination to % standard error, generally due to a users 'showkernel' option request. % % The format of the ShowKernel method is: % % void ShowKernelInfo(const KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickExport void ShowKernelInfo(const KernelInfo *kernel) { const KernelInfo *k; size_t c, i, u, v; for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) { (void) FormatLocaleFile(stderr, "Kernel"); if ( kernel->next != (KernelInfo *) NULL ) (void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c ); (void) FormatLocaleFile(stderr, " \"%s", CommandOptionToMnemonic(MagickKernelOptions, k->type) ); if ( fabs(k->angle) >= MagickEpsilon ) (void) FormatLocaleFile(stderr, "@%lg", k->angle); (void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long) k->width,(unsigned long) k->height,(long) k->x,(long) k->y); (void) FormatLocaleFile(stderr, " with values from %.*lg to %.*lg\n", GetMagickPrecision(), k->minimum, GetMagickPrecision(), k->maximum); (void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg", GetMagickPrecision(), k->negative_range, GetMagickPrecision(), k->positive_range); if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Zero-Summing)\n"); else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon ) (void) FormatLocaleFile(stderr, " (Normalized)\n"); else (void) FormatLocaleFile(stderr, " (Sum %.*lg)\n", GetMagickPrecision(), k->positive_range+k->negative_range); for (i=v=0; v < k->height; v++) { (void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v ); for (u=0; u < k->width; u++, i++) if ( IsNaN(k->values[i]) ) (void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan"); else (void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3, GetMagickPrecision(), k->values[i]); (void) FormatLocaleFile(stderr,"\n"); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n i t y A d d K e r n a l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel % to the given pre-scaled and normalized Kernel. This in effect adds that % amount of the original image into the resulting convolution kernel. This % value is usually provided by the user as a percentage value in the % 'convolve:scale' setting. % % The resulting effect is to convert the defined kernels into blended % soft-blurs, unsharp kernels or into sharpening kernels. % % The format of the UnityAdditionKernelInfo method is: % % void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale ) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % % o scale: % scaling factor for the unity kernel to be added to % the given kernel. % */ MagickExport void UnityAddKernelInfo(KernelInfo *kernel, const double scale) { /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) UnityAddKernelInfo(kernel->next, scale); /* Add the scaled unity kernel to the existing kernel */ kernel->values[kernel->x+kernel->y*kernel->width] += scale; CalcKernelMetaData(kernel); /* recalculate the meta-data */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z e r o K e r n e l N a n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroKernelNans() replaces any special 'nan' value that may be present in % the kernel with a zero value. This is typically done when the kernel will % be used in special hardware (GPU) convolution processors, to simply % matters. % % The format of the ZeroKernelNans method is: % % void ZeroKernelNans (KernelInfo *kernel) % % A description of each parameter follows: % % o kernel: the Morphology/Convolution kernel % */ MagickExport void ZeroKernelNans(KernelInfo *kernel) { register size_t i; /* do the other kernels in a multi-kernel list first */ if ( kernel->next != (KernelInfo *) NULL) ZeroKernelNans(kernel->next); for (i=0; i < (kernel->width*kernel->height); i++) if ( IsNaN(kernel->values[i]) ) kernel->values[i] = 0.0; return; }
openmp_kernels.c
#include "pcg_basic.h" #include "openmp_kernels.h" #include <stdio.h> #define SIMD 8 void sg_omp( sgData_t* restrict target, sgIdx_t* restrict ti, sgData_t* restrict source, sgIdx_t* restrict si, size_t n) { #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for(long i = 0; i < n; i++){ target[ti[i]] = source[si[i]]; } } void scatter_omp( sgData_t* restrict target, sgIdx_t* restrict ti, sgData_t* restrict source, sgIdx_t* restrict si, size_t n) { #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for(long i = 0; i < n; i++){ target[ti[i]] = source[i]; } } void gather_omp( sgData_t* restrict target, sgIdx_t* restrict ti, sgData_t* restrict source, sgIdx_t* restrict si, size_t n) { //Users may want to set a specific safelen value like 32 #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for(long i = 0; i < n; i++){ target[i] = source[si[i]]; } } int get_ind(void) { return 1; } void gather( sgData_t* restrict target, sgData_t* restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n) { #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for (size_t i = 0; i < n; i++) { #pragma loop_info est_trips(8) #pragma loop_info prefetch for (size_t j = 0; j < pat_len; j++) { target[i*pat_len+j] = source[pat[j]]; } source += delta; } } void gather_smallbuf( sgData_t** restrict target, sgData_t* const restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n, size_t target_len) { #ifdef __GNUC__ #pragma omp parallel #else #pragma omp parallel shared(pat) #endif { int t = omp_get_thread_num(); #ifdef __CRAYC__ #pragma concurrent #endif #ifdef __INTEL_COMPILER #pragma ivdep #endif #pragma omp for for (size_t i = 0; i < n; i++) { sgData_t *sl = source + delta * i; sgData_t *tl = target[t] + pat_len*(i%target_len); #ifdef __CRAYC__ #pragma concurrent #endif #if defined __CRAYC__ || defined __INTEL_COMPILER #pragma vector always,unaligned #endif for (size_t j = 0; j < pat_len; j++) { tl[j] = sl[pat[j]]; } } } } void gather_smallbuf_rdm( sgData_t** restrict target, sgData_t* const restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n, size_t target_len) { #ifdef __GNUC__ #pragma omp parallel #else #pragma omp parallel shared(pat) #endif { int t = omp_get_thread_num(); #ifdef __CRAYC__ #pragma concurrent #endif #ifdef __INTEL_COMPILER #pragma ivdep #endif #pragma omp for for (size_t i = 0; i < n; i++) { sgData_t *sl = source + rand()%((n-1)*delta); sgData_t *tl = target[t] + pat_len*(i%target_len); #ifdef __CRAYC__ #pragma concurrent #endif #if defined __CRAYC__ || defined __INTEL_COMPILER #pragma vector always,unaligned #endif for (size_t j = 0; j < pat_len; j++) { tl[j] = sl[pat[j]]; } } } } void scatter_smallbuf( sgData_t* restrict target, sgData_t** const restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n, size_t source_len) { #ifdef __GNUC__ #pragma omp parallel #else #pragma omp parallel shared(pat) #endif { int t = omp_get_thread_num(); #ifdef __CRAYC__ #pragma concurrent #endif #ifdef __INTEL_COMPILER #pragma ivdep #endif #pragma omp for for (size_t i = 0; i < n; i++) { sgData_t *tl = target + delta * i; sgData_t *sl = source[t] + pat_len*(i%source_len); #ifdef __CRAYC__ #pragma concurrent #endif #if defined __CRAYC__ || defined __INTEL_COMPILER #pragma vector always,unaligned #endif for (size_t j = 0; j < pat_len; j++) { tl[pat[j]] = sl[j]; } } } } void gather_smallbuf_random( sgData_t** restrict target, sgData_t* const restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n, size_t target_len, long initstate) { #ifdef __GNUC__ #pragma omp parallel #else #pragma omp parallel shared(pat) #endif { int t = omp_get_thread_num(); pcg32_random_t rng; pcg32_srandom_r(&rng, initstate, t); #ifdef __CRAYC__ #pragma concurrent #endif #pragma omp for for (size_t i = 0; i < n; i++) { //long r = ()%n; uint32_t r = pcg32_boundedrand_r(&rng, (uint32_t)n); sgData_t *sl = source + delta * r; sgData_t *tl = target[t] + pat_len*(i%target_len); #ifdef __CRAYC__ #pragma concurrent #pragma vector always,unaligned #endif for (size_t j = 0; j < pat_len; j++) { tl[j] = sl[pat[j]]; } } } } void scatter_smallbuf_random( sgData_t* restrict target, sgData_t** const restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n, size_t source_len, long initstate) { if (n > 1ll<<32) {printf("n too big for rng, exiting.\n"); exit(1);} #ifdef __GNUC__ #pragma omp parallel #else #pragma omp parallel shared(pat) #endif { int t = omp_get_thread_num(); pcg32_random_t rng; pcg32_srandom_r(&rng, initstate, t); #ifdef __CRAYC__ #pragma concurrent #endif #pragma omp for for (size_t i = 0; i < n; i++) { uint32_t r = pcg32_boundedrand_r(&rng, (uint32_t)n); sgData_t *tl = target + delta * r; sgData_t *sl = source[t] + pat_len*(i%source_len); #ifdef __CRAYC__ #pragma concurrent #pragma vector always,unaligned #endif for (size_t j = 0; j < pat_len; j++) { tl[pat[j]] = sl[j]; } } } } void gather_smallbuf_multidelta( sgData_t** restrict target, sgData_t* restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t *delta, size_t n, size_t target_len, size_t delta_len) { #ifdef __GNUC__ #pragma omp parallel #else #pragma omp parallel shared(pat) #endif { int t = omp_get_thread_num(); #ifdef __CRAYC__ #pragma concurrent #endif //taget_len is in multiples of pat_len #pragma omp for for (size_t i = 0; i < n; i++) { sgData_t *sl = source + (i/delta_len)*delta[delta_len-1] + delta[i%delta_len] - delta[0]; sgData_t *tl = target[t] + pat_len*(i%target_len); //sgData_t *sl = source; //sgData_t *tl = target[0]; #ifdef __CRAYC__ #pragma concurrent #pragma vector always,unaligned #endif for (size_t j = 0; j < pat_len; j++) { //printf("i: %zu, j: %zu\n", i, j); tl[j] = sl[pat[j]]; //tl[j] = sl[pat[j]]; } } } } void scatter( sgData_t* restrict target, sgData_t* restrict source, sgIdx_t* const restrict pat, size_t pat_len, size_t delta, size_t n) { #pragma omp parallel for simd safelen(SIMD) #pragma prefervector for (size_t i = 0; i < n; i++) { #pragma loop_info est_trips(8) #pragma loop_info prefetch for (size_t j = 0; j < pat_len; j++) { target[pat[j]] = source[i*pat_len+j]; } source += delta; } } void gather_stride_os( sgData_t* restrict target, sgData_t* restrict source, sgIdx_t* restrict pat, size_t pat_len, size_t delta, size_t n, size_t target_wrap) { for (size_t i = 0; i < n; i++) { for (size_t j = 0; j < pat_len; j++) { target[(i%target_wrap)*pat_len+j] = source[pat[j]]; } source += delta; } } void gather_stride8( sgData_t* restrict target, sgData_t* restrict source, sgIdx_t* restrict pat, size_t delta, size_t n) { for (size_t i = 0; i < n; i++) { for (size_t j = 0; j < 8; j++) { target[i*8+j] = source[pat[j]]; } source += delta; } } void gather_stride16( sgData_t* restrict target, sgData_t* restrict source, sgIdx_t* restrict pat, size_t stride, size_t delta, size_t n) { for (size_t i = 0; i < n; i++) { for (size_t j = 0; j < 16; j++) { //printf("%zu <- %zu\n", i*16+j, pat[j]); target[i*16+j] = source[pat[j]]; } source += delta; } } void sg_accum_omp( sgData_t* restrict target, sgIdx_t* restrict ti, sgData_t* restrict source, sgIdx_t* restrict si, size_t n) { #pragma omp parallel for schedule(runtime) for(long i = 0; i < n; i++){ target[ti[i]] += source[si[i]]; } } void scatter_accum_omp( sgData_t* restrict target, sgIdx_t* restrict ti, sgData_t* restrict source, sgIdx_t* restrict si, size_t n) { #pragma omp parallel for schedule(runtime) for(long i = 0; i < n; i++){ target[ti[i]] += source[i]; } } void gather_accum_omp( sgData_t* restrict target, sgIdx_t* restrict ti, sgData_t* restrict source, sgIdx_t* restrict si, size_t n) { #pragma omp parallel for schedule(runtime) for(long i = 0; i < n; i++){ target[i] += source[si[i]]; } }
GB_binop__bclr_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__bclr_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__bclr_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__bclr_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__bclr_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__bclr_uint32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__bclr_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__bclr_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bclr_uint32) // C=scalar+B GB (_bind1st__bclr_uint32) // C=scalar+B' GB (_bind1st_tran__bclr_uint32) // C=A+scalar GB (_bind2nd__bclr_uint32) // C=A'+scalar GB (_bind2nd_tran__bclr_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = GB_BITCLR (aij, bij, uint32_t, 32) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_BITCLR (x, y, uint32_t, 32) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_BCLR || GxB_NO_UINT32 || GxB_NO_BCLR_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__bclr_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__bclr_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__bclr_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__bclr_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__bclr_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__bclr_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__bclr_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__bclr_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__bclr_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = GB_BITCLR (x, bij, uint32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__bclr_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = GB_BITCLR (aij, y, uint32_t, 32) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (x, aij, uint32_t, 32) ; \ } GrB_Info GB (_bind1st_tran__bclr_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_BITCLR (aij, y, uint32_t, 32) ; \ } GrB_Info GB (_bind2nd_tran__bclr_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif