source
stringlengths
3
92
c
stringlengths
26
2.25M
render.h
#ifndef RENDER_H #define RENDER_H #include <stdio.h> #include <omp.h> #include <algorithm> #include <ctime> #include "bitmap.h" #include "model.h" class Render; class FrameBuffer { public: uint32_t *fb_; int w_, h_; std::vector<float *> z_buffers; // hierarchical z_buffer std::vector<int> z_buffer_w; // width of each z_buffer std::vector<int> z_buffer_area; float *z_buffer0; int w0; int levels; // 3, 2, 1, ... FrameBuffer(int w, int h) : w_(w), h_(h) { if (w < h) { std::cout << "screen width < height \n"; return; } fb_ = new uint32_t[w * h]; int block_size = 1; // lowest level int level = 0; while (block_size < w) { block_size *= 2; level++; } z_buffers.resize(level); z_buffer_w.resize(level); z_buffer_area.resize(level); levels = level - 1; for (int i = levels; i >= 0; --i) // level = 3, size=8, 7*7 , first = 4, 2 1 { block_size /= 2; int z_buffer_width = (w - 1) / block_size + 1; int z_buffer_height = (h - 1) / block_size + 1; z_buffer_w[i] = z_buffer_width; z_buffer_area[i] = z_buffer_width * z_buffer_height; z_buffers[i] = new float[z_buffer_width * z_buffer_height]; } w0 = z_buffer_w[0]; z_buffer0 = z_buffers[0]; } ~FrameBuffer() { if (fb_) { delete[] fb_; fb_ = NULL; } for (int i = 0; i <= levels; ++i) { if (z_buffers[i]) delete[] z_buffers[i]; } } inline void fill(uint32_t color) { for (int i = levels; i >= 0; --i) { for (int j = 0; j < z_buffer_area[i]; ++j) z_buffers[i][j] = FLT_MAX; } for (int i = 0; i < w_ * h_; ++i) fb_[i] = color; } // 0 <= x1 < x2, 0 <= y1 < y2 inline bool visiable_box(int x1, int y1, int x2, int y2, float z) { int i = 0; while (x2 - x1 > 1 || y2 - y1 > 1) { x1 >>= 1; x2 >>= 1; y1 >>= 1; y2 >>= 1; i++; } return z < z_buffers[i][z_buffer_w[i] * y1 + x1] || z < z_buffers[i][z_buffer_w[i] * y1 + x2] || z < z_buffers[i][z_buffer_w[i] * y2 + x1] || z < z_buffers[i][z_buffer_w[i] * y2 + x2]; } // y1 < y2 inline bool visiable_scanline(int x, int y1, int y2, float z) { int i = 0; while (y2 - y1 > 1) { x >>= 1; y1 >>= 1; y2 >>= 1; i++; } return z < z_buffers[i][z_buffer_w[i] * y1 + x] || z < z_buffers[i][z_buffer_w[i] * y2 + x]; } inline bool visiable_pixel_hierarchical(int x, int y, float z) { return z < z_buffer0[w0 * y + x]; } inline void set_pixel_hierarchical(int x, int y, float z, uint32_t color) { fb_[y * w_ + x] = color; z_buffer0[w0 * y + x] = z; float *zb_curr = z_buffer0; int w_curr = w0; for (int i = 1; i <= levels; ++i) { x &= (~1); y &= (~1); int idx = w_curr * y + x; float z00 = zb_curr[idx]; float z10 = zb_curr[idx + 1]; x >>= 1; y >>= 1; float z01 = zb_curr[idx + w_curr]; float z11 = zb_curr[idx + w_curr + 1]; if (z00 < z01) z00 = z01; if (z10 < z11) z10 = z11; if (z00 < z10) z00 = z10; zb_curr = z_buffers[i]; w_curr = z_buffer_w[i]; float &z_curr = zb_curr[w_curr * y + x]; if (z00 < z_curr) z_curr = z00; else return; } } inline float get_z_hierarchical(int i, int x, int y) { return z_buffers[i][z_buffer_w[i] * y + x]; } inline void set_pixel(int x, int y, float z, uint32_t color) { int idx = y * w0 + x; // #pragma omp critical { if (z < z_buffer0[idx]) { z_buffer0[idx] = z; fb_[idx] = color; } } } inline bool visiable(int x, int y, float z) { return z < z_buffer0[y * w_ + x]; } }; struct Vertex2D { float x; float y; float z; Vec2f uv; int norm; bool operator<(const Vertex2D &a) const { return x < a.x; } std::ostream &operator<<(std::ostream &os) { os << "x:" << x << " y:" << y << " z:" << z << " norm_ID:" << norm; return os; } }; struct Face2D { Vertex2D v1, v2, v3; Bitmap *diffuse_map; std::vector<Vec3f> *norms; }; struct FaceID { float z1; int level, pz1, pz2, pz3, pz4; // hierarchical z_buffer pointers Face2D *f; bool operator<(const FaceID &a) const { return z1 < a.z1; } }; struct Obj { Model *model; Mat4x4f coordinate; float scale = 1; Obj(Model *m_, Mat4x4f pose_, float s_) : model(m_), coordinate(pose_), scale(s_) {} }; class RenderObj { public: int w_, h_; // size of screen Mat4x4f *camera; float *camera_scale; Mat4x4f *obj_coordinate; float *obj_scale; int X1, Y1, X2, Y2; //bounding box of the whole obj float Z1, Z2; Model *model; std::vector<int> z_buffer_w; std::vector<Vec3f> norms_; // transformed std::vector<Face2D> faces_; // clipped faces std::vector<FaceID> face_ids; RenderObj(Render *render, Obj *obj); void clip_faces() { // initial state faces_.clear(); face_ids.clear(); X1 = w_ - 1; X2 = 0; Y1 = h_ - 1; Y2 = 0; Z1 = FLT_MAX; Z2 = -1; Mat4x4f transform = transformm_invert(*camera) * (*obj_coordinate); // 转换到相机空间. Mat3x3f rotate_ = transformm_rotate(transform) * (*obj_scale); // 提取旋转矩阵. Vec3f move_ = transformm_move(transform); // 提取位移. float scale_c = *camera_scale; // 全局放大. int mx = w_ / 2; // 屏幕中心. int my = h_ / 2; // some ponters std::vector<Vec3f> *p_norms = &norms_; std::vector<Vec2f> &uvs = model->_uv; Bitmap *p_diffusemap = model->_diffusemap; // 顶点法向量转换. for (int i = 0; i < norms_.size(); ++i) { norms_[i] = rotate_ * (model->_norms[i]); } // 片元组装. for (int i = 0; i < model->_faces.size(); ++i) { Vector<3, Vec3i> &faceInt = model->_faces[i]; // 顶点索引/纹理坐标索引/顶点法向量索引. Vec3i p1 = faceInt[0]; Vec3i p2 = faceInt[1]; Vec3i p3 = faceInt[2]; // 顶点坐标转换. Vec3f v31 = rotate_ * (model->_verts[p1.x]) + move_; Vec3f v32 = rotate_ * (model->_verts[p2.x]) + move_; Vec3f v33 = rotate_ * (model->_verts[p3.x]) + move_; float z1 = v31.z / scale_c; float z2 = v32.z / scale_c; float z3 = v33.z / scale_c; // 视锥剔除1. if (z1 < 0.001 || z2 < 0.001 || z3 < 0.001) continue; // 透视投影. float x1f = v31.x / z1 + mx; float x2f = v32.x / z2 + mx; float x3f = v33.x / z3 + mx; float y1f = v31.y / z1 + my; float y2f = v32.y / z2 + my; float y3f = v33.y / z3 + my; // 四舍五入. int x1 = x1f + 0.5; int x2 = x2f + 0.5; int x3 = x3f + 0.5; int y1 = y1f + 0.5; int y2 = y2f + 0.5; int y3 = y3f + 0.5; // bounding box: (x1, y1, z1) (x2, y2, z2) sort3(x1, x3, x2); sort3(y1, y3, y2); // 视锥剔除2. if ((x2 < 0) | (x1 >= w_) | (y2 < 0) | (y1 >= h_)) continue; // 背面剔除. // if ((x2f - x1f) * (y3f - y2f) - (y2f - y1f) * (x3f - x2f) >= 0) // continue; Face2D ff = {{x1f, y1f, z1, uvs[p1.y], p1.z}, {x2f, y2f, z2, uvs[p2.y], p2.z}, {x3f, y3f, z3, uvs[p3.y], p3.z}, p_diffusemap, p_norms}; sort3(ff.v1, ff.v2, ff.v3); // for bresenham faces_.push_back(ff); // push 之后. sort3(z1, z3, z2); // hierarchical z_buffer. x1 = between(0, w_ - 1, x1); x2 = between(0, w_ - 1, x2); y1 = between(0, h_ - 1, y1); y2 = between(0, h_ - 1, y2); // 更新obj bounding box; X1 = min(X1, x1); X2 = max(X2, x2); Y1 = min(Y1, y1); Y2 = max(Y2, y2); Z1 = min(Z1, z1); Z2 = max(Z2, z2); int level = 0; while (x2 - x1 > 1 || y2 - y1 > 1) { x1 >>= 1; x2 >>= 1; y1 >>= 1; y2 >>= 1; level++; } int s = z_buffer_w[level]; face_ids.push_back({z1, level, s * y1 + x1, s * y1 + x2, s * y2 + x1, s * y2 + x2, &faces_.back()}); } std::sort(face_ids.begin(), face_ids.end()); } }; class Render { public: Mat4x4f camera = matrix_set_identity(); // camera pose float camera_scale = 1; // scale of screen FrameBuffer fb; // frame buffer std::vector<FrameBuffer *> fbs; std::vector<RenderObj *> obj_renders; // all objs int n_threads; // performance counter clock_t timer; int visiable_objs; int visiable_triangles; int visiable_scanlines; int visiable_pixels; Render(int w, int h) : fb(FrameBuffer(w, h)) { n_threads = omp_get_max_threads() - 2; for (int i = 0; i < n_threads; ++i) { fbs.push_back(new FrameBuffer(w, h)); } } ~Render() { for (auto i : obj_renders) delete i; obj_renders.clear(); } void set_camera(Mat4x4f c, float scale) { camera = c; camera_scale = scale; } void move_camera_x(float dis) { camera.m[0][3] += camera.m[0][0] * dis; camera.m[1][3] += camera.m[1][0] * dis; camera.m[2][3] += camera.m[2][0] * dis; } void move_camera_y(float dis) { camera.m[0][3] += camera.m[0][1] * dis; camera.m[1][3] += camera.m[1][1] * dis; camera.m[2][3] += camera.m[2][1] * dis; } void move_camera_z(float dis) { camera.m[0][3] += camera.m[0][2] * dis; camera.m[1][3] += camera.m[1][2] * dis; camera.m[2][3] += camera.m[2][2] * dis; } void rotate_camera_left(float theta) { camera = camera * matrix_set_rotate(camera.m[0][1], camera.m[1][1], camera.m[2][1], theta); } void rotate_camera_up(float theta) { camera = camera * matrix_set_rotate(camera.m[0][0], camera.m[1][0], camera.m[2][0], theta); } void scale_camera(float scale) { float s = camera_scale * scale; camera_scale = between(0.0001f, 10000.0f, s); } void add_obj(Obj *obj) { obj_renders.push_back(new RenderObj(this, obj)); } double get_time_ms() { double ret = (double)(clock() - timer) * 1000.0 / CLOCKS_PER_SEC; timer = clock(); return ret; } uint32_t *get_framebuffer() { return fb.fb_; } void render(uint32_t color) { std::cout << "=================================== new frame =====\n"; timer = clock(); visiable_objs = 0; visiable_triangles = 0; visiable_scanlines = 0; visiable_pixels = 0; int N = obj_renders.size(); int n_faces = 0; int n_obj = (N - 1) / n_threads + 1; #pragma omp parallel for for (int i = 0; i < N; ++i) { obj_renders[i]->clip_faces(); } for (auto i : obj_renders) n_faces += i->faces_.size(); std::sort(std::begin(obj_renders), std::end(obj_renders), [](RenderObj *a, RenderObj *b) -> bool { return a->X1 < b->X1; }); std::cout << "time clip_faces = " << get_time_ms() << " ms\n"; omp_set_num_threads(n_threads); #pragma omp parallel { int thread_id = omp_get_thread_num(); FrameBuffer *fb_ = fbs[thread_id]; fb_->fill(color); int n_start = thread_id * n_obj; int n_end = min(N, n_start + n_obj); if (n_end > n_start) std::sort(std::begin(obj_renders) + n_start, std::begin(obj_renders) + n_end - 1, [](RenderObj *a, RenderObj *b) -> bool { return a->Z1 < b->Z1; }); for (int i = n_start; i < n_end; ++i) { RenderObj *c_ = obj_renders[i]; if (c_->Z2 < 0 || !fb_->visiable_box(c_->X1, c_->Y1, c_->X2, c_->Y2, c_->Z1)) continue; for (auto f : c_->face_ids) Draw_triangle(f, fb_); } } std::cout << "time Draw = " << get_time_ms() << " ms" << std::endl; #pragma omp parallel for //num_threads(6) for (int i = 0; i < fb.w_ * fb.h_; ++i) { float min_z = FLT_MAX; uint32_t min_color = color; for (int j = 0; j < n_threads; ++j) { float curr_z = fbs[j]->z_buffer0[i]; uint32_t curr_color = fbs[j]->fb_[i]; if (curr_z < min_z) { min_z = curr_z; min_color = curr_color; } } fb.fb_[i] = min_color; } std::cout << "time Merge = " << get_time_ms() << " ms\n"; std::cout << ">> faces:" << n_faces << "\t|obj:" << visiable_objs << "\t|tiangle:" << visiable_triangles << "\t|scanline:" << visiable_scanlines << "\t|pixel:" << visiable_pixels << std::endl; } struct Face2D_Coeff { float ax, ay, ak, bx, by, bk, cx, cy, ck; float dx, dy; }; inline void Draw_triangle(FaceID &face_id, FrameBuffer *fb_) { // 片元剔除. float *zb_ = fb_->z_buffers[face_id.level]; float min_z = face_id.z1; if ((min_z < zb_[face_id.pz1] || min_z < zb_[face_id.pz2] || min_z < zb_[face_id.pz3] || min_z < zb_[face_id.pz4])) { Face2D face = *face_id.f; float x1f = face.v1.x; float x2f = face.v2.x; float x3f = face.v3.x; float y1f = face.v1.y; float y2f = face.v2.y; float y3f = face.v3.y; float z2 = face.v2.z; float z3 = face.v3.z; int x1 = x1f + 0.5; int x2 = x2f + 0.5; int x3 = x3f + 0.5; int y1 = y1f + 0.5; int y2 = y2f + 0.5; int y3 = y3f + 0.5; int c = (y3 - y1) * (x2 - x1) - (y2 - y1) * (x3 - x1); // up, down, line // visiable_triangles += 1; if (c == 0) return; float coeff1 = (y2f - y3f) * (x1f - x3f) + (x3f - x2f) * (y1f - y3f); float dz23 = (z2 - z3) / coeff1; float dz12 = (face.v1.z - z2) / coeff1; float cz11 = coeff1 * face.v1.z; float cz12 = coeff1 * z2; float cz13 = coeff1 * z3; Face2D_Coeff f = {(y2f - y3f) / cz11, (x3f - x2f) / cz11, (x2f * y3f - x3f * y2f) / cz11, (y3f - y1f) / cz12, (x1f - x3f) / cz12, (x3f * y1f - x1f * y3f) / cz12, (y1f - y2f) / cz13, (x2f - x1f) / cz13, (x1f * y2f - x2f * y1f) / cz13, (y2f - y1f) * dz23 + (y2f - y3f) * dz12, (x1f - x2f) * dz23 + (x3f - x2f) * dz12}; if (c < 0) // up { Bresenham l1(x1, y1, x3, y3, false); Bresenham l2(x1, y1, x2, y2, true); Bresenham l3(x2, y2, x3, y3, true); for (int i = x1; i < x2; ++i) Draw_scanline(i, l1.step(), l2.step(), f, face, fb_); for (int i = x2; i < x3; ++i) Draw_scanline(i, l1.step(), l3.step(), f, face, fb_); if (x2 == x3) Draw_scanline(x3, y3, y2, f, face, fb_); else Draw_scanline(x3, max(y3, l1.step()), min(y3, l3.step()), f, face, fb_); } else // down { Bresenham l1(x1, y1, x3, y3, true); Bresenham l2(x1, y1, x2, y2, false); Bresenham l3(x2, y2, x3, y3, false); int i = x1; for (; i < x2; ++i) Draw_scanline(i, l2.step(), l1.step(), f, face, fb_); for (; i < x3; ++i) Draw_scanline(i, l3.step(), l1.step(), f, face, fb_); if (x2 == x3) Draw_scanline(x3, y2, y3, f, face, fb_); else Draw_scanline(x3, max(y3, l3.step()), min(y3, l1.step()), f, face, fb_); } } } // y1 <= y2 inline void Draw_scanline(int x, int y1, int y2, Face2D_Coeff &f, Face2D &face, FrameBuffer *fb_) { if (x < 0 || x >= fb.w_ || y2 < 0 || y1 >= fb.h_) return; // visiable_scanlines += 1; y1 = between(0, fb.h_ - 1, y1); y2 = between(0, fb.h_ - 1, y2); float z1 = (x - face.v1.x) * f.dx + (y1 - face.v1.y) * f.dy + face.v1.z; for (int y = y1; y <= y2; ++y) { float z_ = (y - y1) * f.dy + z1; // 像素剔除. if (!fb_->visiable_pixel_hierarchical(x, y, z_)) continue; // visiable_pixels += 1; float frac1 = f.ax * x + f.ay * y + f.ak; float frac2 = f.bx * x + f.by * y + f.bk; float frac3 = f.cx * x + f.cy * y + f.ck; Vec2f &uv1 = face.v1.uv; Vec2f &uv2 = face.v2.uv; Vec2f &uv3 = face.v3.uv; float uv_x = (frac1 * uv1.x + frac2 * uv2.x + frac3 * uv3.x) * z_; float uv_y = (frac1 * uv1.y + frac2 * uv2.y + frac3 * uv3.y) * z_; fb_->set_pixel_hierarchical(x, y, z_, face.diffuse_map->Sample2D_easy(uv_x, uv_y)); } } struct Bresenham { uint32_t ret_; int dx; int dy; int D; int y_step; int y, ret; bool flip = true; Bresenham(int x0, int y0, int x1, int y1, bool UP) : dx(x1 - x0), dy(y1 - y0), y(y0), ret(y0) { int mask = (dy > -1); y_step = (mask << 1) - 1; // if k < 0, only change the y direction mask = dy >> 31; dy = (dy + mask) ^ mask; // dy = abs(dy) flip = dx < dy; if (flip) std::swap(dx, dy); // flip D = -dx; // error term dy *= 2; dx *= 2; // if (up && y_step = 1 || down && y_step = -1), y-y_step // ret_ = (UP ^ (y_step > 0)) ? 0xffffffff : 0; mask = UP ^ (y_step > 0); ret_ = -mask; } inline int step() { ret = y; while (flip) { y = y + y_step; D = D + dy; if (D > 0) { D = D - dx; return ret_ & ret | (~ret_) & (y - y_step); } } D = D + dy; int mask = D > 0; mask = -mask; y = y + (mask & y_step); D = D - (mask & dx); return ret; } }; }; RenderObj::RenderObj(Render *render, Obj *obj) : w_(render->fb.w_), h_(render->fb.h_), z_buffer_w(render->fb.z_buffer_w) { camera = &(render->camera); camera_scale = &(render->camera_scale); model = obj->model; obj_coordinate = &(obj->coordinate); obj_scale = &(obj->scale); norms_.resize(model->_norms.size()); faces_.reserve(model->_faces.size()); face_ids.reserve(model->_faces.size()); } #endif
DRACC_OMP_025_MxV_Partially_Missing_Enter_Data_yes.c
/* Matrix Vector multiplication with partially Matrix missing on Accelerator. Using the target enter data construct. */ #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #define C 512 int *a; int *b; int *c; int init(){ for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ b[j+i*C]=1; } a[i]=1; c[i]=0; } return 0; } int Mult(){ #pragma omp target enter data map(to:a[0:C],b[0:C],c[0:C]) device(0) #pragma omp target device(0) { #pragma omp teams distribute parallel for for(int i=0; i<C; i++){ for(int j=0; j<C; j++){ c[i]+=b[j+i*C]*a[j]; } } } #pragma omp target exit data map(from:c[0:C]) map(release:a[0:C],b[0:C*C]) device(0) return 0; } int check(){ bool test = false; for(int i=0; i<C; i++){ if(c[i]!=C){ test = true; } } printf("Memory Access Issue visible: %s\n",test ? "true" : "false"); return 0; } int main(){ a = malloc(C*sizeof(int)); b = malloc(C*C*sizeof(int)); c = malloc(C*sizeof(int)); init(); Mult(); check(); free(a); free(b); free(c); return 0; }
data.c
// SPDX-License-Identifier: BSD-2-Clause /* Copyright 1999-2016 Bernard Parent Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <src/data.h> #include <model/_model.h> #include <cycle/_cycle.h> #include <unistd.h> #define dt_steady 1.0e99 #define DATATYPE_BINARY 1 #define DATATYPE_ASCII 2 #ifdef _3DL #define SUBZONE_DESIRED_WIDTH 40 #else #define SUBZONE_DESIRED_WIDTH 40 #endif #define MIN_NUMSUBZONE_PER_THREAD 3 long _ai_mpidatafile(gl_t *gl, long i, long j, long k) { long ii; ii=(i-gl->domain_all.is); if2DL( ii=ii*(gl->domain_all.je-gl->domain_all.js+1)+(j-gl->domain_all.js); if3DL(ii=ii*(gl->domain_all.ke-gl->domain_all.ks+1)+(k-gl->domain_all.ks);) ) return(ii); } void find_NODEVALID_on_domain_all(np_t *np, gl_t *gl, int TYPELEVEL, bool *NODEVALID){ long i,j,k; #ifdef DISTMPI int rank,thisrank; int THISNODEVALID; #endif for_ijk(gl->domain_lim_all,is,js,ks,ie,je,ke){ NODEVALID[_ai_all(gl,i,j,k)]=FALSE; } #ifdef DISTMPI MPI_Comm_rank(MPI_COMM_WORLD, &rank); for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (j==gl->domain_all.js && k==gl->domain_all.ks) MPI_Barrier(MPI_COMM_WORLD); thisrank=_node_rank(gl, i, j, k); if (thisrank==rank) THISNODEVALID=(int)(is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL)); MPI_Bcast(&THISNODEVALID,1,MPI_INT,thisrank,MPI_COMM_WORLD); assert(THISNODEVALID==TRUE || THISNODEVALID==FALSE); NODEVALID[_ai_all(gl,i,j,k)]=(bool)THISNODEVALID; } MPI_Barrier(MPI_COMM_WORLD); #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ NODEVALID[_ai_all(gl,i,j,k)]=is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL); } #endif } void read_data_file_binary_ascii(char *filename, np_t *np, gl_t *gl, long level, int DATATYPE){ FILE *datafile; char data_format_str[100]; long i,j,k,flux,cnt,tmp1,tmp2,tmp3; double CFLmem; #ifdef _RESTIME_STORAGE_TRAPEZOIDAL flux_t Res; bool NORES=FALSE; long NOREScount=0; #endif #ifndef UNSTEADY double tmp_double; #endif bool FORMAT010; bool *NODEVALID; flux_t U; #ifdef EMFIELD double Lcmem; fluxemfield_t Uemfield; #endif #ifdef DISTMPI int rank,numproc; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Barrier(MPI_COMM_WORLD); #endif NODEVALID=(bool *)malloc((gl->domain_lim_all.ie-gl->domain_lim_all.is+1) #ifdef _2DL *(gl->domain_lim_all.je-gl->domain_lim_all.js+1) #endif #ifdef _3DL *(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1) #endif *sizeof(bool)); CFLmem=gl->CFL; #ifdef EMFIELD Lcmem=gl->Lc; #endif datafile = fopen(filename, "r"); if (datafile==NULL) fatal_error("Having problems opening datafile %s.",filename); for (cnt=0; cnt<16; cnt++) { if (fscanf(datafile,"%c",&(data_format_str[cnt]))!=1) fatal_error("Problem with fscanf in read_data_file_binary()."); } data_format_str[16]=EOS; wfprintf(stdout,"Reading data file %s ",filename); if (level!=0) wfprintf(stdout,"to time level minus %ld ",level); FORMAT010=FALSE; switch (DATATYPE){ case DATATYPE_BINARY: if (strcmp("WARPBINFORMAT010",data_format_str)==0) { wfprintf(stdout,"in CFDWARP binary format 010.."); FORMAT010=TRUE; } break; case DATATYPE_ASCII: if (strcmp("WARPASCFORMAT010",data_format_str)==0) { wfprintf(stdout,"in CFDWARP ASCII format 010.."); FORMAT010=TRUE; } break; } if (FORMAT010) { if (level==0) { if (fscanf(datafile," windowis=%ld windowie=%ld iter=%ld effiter_U=%lg effiter_R=%lg CFL=%lg", &(gl->window.is),&(gl->window.ie), &(gl->iter),&(gl->effiter_U),&(gl->effiter_R),&(gl->CFL))!=6) fatal_error("Problem with fscanf in read_data_file_binary()."); if (fscanf(datafile," nd=%ld ns=%ld nf=%ld", &tmp1,&tmp2, &tmp3)!=3) fatal_error("Problem with fscanf in read_data_file_binary()."); if (tmp1!=nd) fatal_error("Data file has %ld dimensions but CFDWARP is compiled with %ld dimensions.",tmp1,nd); if (tmp2!=ns) fatal_error("Data file has %ld species but CFDWARP is compiled with %ld species.",tmp2,ns); if (tmp3!=nf) fatal_error("Data file has %ld fluxes but CFDWARP is compiled with %ld fluxes.",tmp3,nf); if (fscanf(datafile," is=%ld ie=%ld", &tmp1,&tmp2)!=2) fatal_error("Problem with fscanf in read_data_file_binary()."); if ((tmp2-tmp1)!=(gl->domain_all.ie-gl->domain_all.is)) fatal_error("Data file has %ld grid lines along i but the control file specifies %ld grid lines.",tmp2-tmp1,(gl->domain_all.ie-gl->domain_all.is)); #ifdef _2DL if (fscanf(datafile," js=%ld je=%ld", &tmp1,&tmp2)!=2) fatal_error("Problem with fscanf in read_data_file_binary()."); if ((tmp2-tmp1)!=(gl->domain_all.je-gl->domain_all.js)) fatal_error("Data file has %ld grid lines along j but the control file specifies %ld grid lines.",tmp2-tmp1,(gl->domain_all.je-gl->domain_all.js)); #endif #ifdef _3DL if (fscanf(datafile," ks=%ld ke=%ld", &tmp1,&tmp2)!=2) fatal_error("Problem with fscanf in read_data_file_binary()."); if ((tmp2-tmp1)!=(gl->domain_all.ke-gl->domain_all.js)) fatal_error("Data file has %ld grid lines along k but the control file specifies %ld grid lines.",tmp2-tmp1,(gl->domain_all.ke-gl->domain_all.ks)); #endif #if defined(UNSTEADY) if (fscanf(datafile," time=%lg",&(gl->time))!=1) fatal_error("Problem reading time variable within fscanf in read_data_file_binary()."); #else if (fscanf(datafile," time=%lg",&tmp_double)!=1) fatal_error("Problem reading time variable within fscanf in read_data_file_binary()."); #endif #ifdef UNSTEADY if (fscanf(datafile," dt=%lg",&(gl->dt))!=1) fatal_error("Problem reading dt variable within fscanf in read_data_file_binary()."); #else if (fscanf(datafile," dt=%lg",&tmp_double)!=1) fatal_error("Problem reading dt variable within fscanf in read_data_file_binary()."); #endif #ifdef EMFIELD if (fscanf(datafile," Lc=%lg effiter_U_emfield=%lg effiter_R_emfield=%lg",&(gl->Lc),&(gl->effiter_U_emfield),&(gl->effiter_R_emfield))!=3) fatal_error("Problem reading EMFIELD variables within fscanf in read_data_file_binary()."); #endif if (fscanf(datafile,"%*[^\n]")!=0) fatal_error("Problem with fscanf in read_data_file_binary()."); } else { if (fscanf(datafile," %*[^\n]")!=0) fatal_error("Problem with fscanf in read_data_file_binary()."); } fgetc(datafile); } if (!FORMAT010){ fatal_error("Data file format invalid."); } wfprintf(stdout,"fluid."); find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_FLUID, NODEVALID); wfprintf(stdout,"."); for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ #ifdef DISTMPI if (rank==0) { #endif if (NODEVALID[_ai_all(gl,i,j,k)]) { switch (DATATYPE){ case DATATYPE_BINARY: if (fread(U, sizeof(flux_t), 1, datafile)!=1) fatal_error("Could not read all data properly."); break; case DATATYPE_ASCII: for (flux=0; flux<nf; flux++){ if (fscanf(datafile,"%lg%*[^\n]",&(U[flux]))!=1) fatal_error("Could not read all data properly."); } break; default: fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY."); } } #ifdef DISTMPI } MPI_Bcast_Node(&U, nf, MPI_DOUBLE, 0, MPI_COMM_WORLD, i, j, k, gl); if (j==gl->domain_all.js && k==gl->domain_all.ks) MPI_Barrier(MPI_COMM_WORLD); #endif if (is_node_in_zone(i,j,k,gl->domain_lim)) { for (flux=0; flux<nf; flux++){ if (level==0) np[_ai(gl,i,j,k)].bs->U[flux]=U[flux]; #ifdef UNSTEADY if (level==1) np[_ai(gl,i,j,k)].bs->Um1[flux]=U[flux]; #if _RESTIME_BW > 2 if (level==2) np[_ai(gl,i,j,k)].bs->Um2[flux]=U[flux]; #endif #if _RESTIME_BW > 3 if (level==3) np[_ai(gl,i,j,k)].bs->Um3[flux]=U[flux]; #endif #endif } np[_ai(gl,i,j,k)].INIT_FLUID=TRUE; } } #ifdef EMFIELD wfprintf(stdout,"emfield."); find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_EMFIELD, NODEVALID); wfprintf(stdout,"."); for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ #ifdef DISTMPI if (rank==0) { #endif if (NODEVALID[_ai_all(gl,i,j,k)]) { switch (DATATYPE){ case DATATYPE_BINARY: if (fread(Uemfield, sizeof(fluxemfield_t), 1, datafile)!=1) fatal_error("Could not read all data properly."); break; case DATATYPE_ASCII: for (flux=0; flux<nfe; flux++){ if (fscanf(datafile,"%lg%*[^\n]",&(Uemfield[flux]))!=1) fatal_error("Could not read all data properly."); } break; default: fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY."); } } #ifdef DISTMPI } MPI_Bcast_Node(&Uemfield, nfe, MPI_DOUBLE, 0, MPI_COMM_WORLD, i, j, k, gl); if (j==gl->domain_all.js && k==gl->domain_all.ks) MPI_Barrier(MPI_COMM_WORLD); #endif if (is_node_in_zone(i,j,k,gl->domain_lim)) { for (flux=0; flux<nfe; flux++) { if (level==0) np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Uemfield[flux]; #ifdef UNSTEADY if (level==1) np[_ai(gl,i,j,k)].bs->Uemfieldm1[flux]=Uemfield[flux]; #endif } np[_ai(gl,i,j,k)].INIT_EMFIELD=TRUE; } } #endif #ifdef _RESTIME_STORAGE_TRAPEZOIDAL wfprintf(stdout,"trap."); find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_FLUID, NODEVALID); wfprintf(stdout,"."); for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ #ifdef DISTMPI if (rank==0) { #endif if (NODEVALID[_ai_all(gl,i,j,k)]) { switch (DATATYPE){ case DATATYPE_BINARY: if (fread(Res, sizeof(flux_t), 1, datafile)!=1){ NORES=TRUE; NOREScount++; } break; case DATATYPE_ASCII: for (flux=0; flux<nf; flux++){ if (fscanf(datafile,"%lg%*[^\n]",&(Res[flux]))!=1){ NORES=TRUE; NOREScount++; } } break; default: fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY."); } } #ifdef DISTMPI } MPI_Bcast(&NORES, 1, MPI_C_BOOL, 0, MPI_COMM_WORLD); if (!NORES) MPI_Bcast_Node(&Res, nf, MPI_DOUBLE, 0, MPI_COMM_WORLD, i, j, k, gl); if (j==gl->domain_all.js && k==gl->domain_all.ks) MPI_Barrier(MPI_COMM_WORLD); #endif if (is_node_in_zone(i,j,k,gl->domain_lim)) { for (flux=0; flux<nf; flux++){ if (!NORES) np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]=Res[flux]; else if (NORES) np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]=0.0; } } NORES=FALSE; } if(NOREScount>0) wfprintf(stdout,"WARNING: The residual at the previous time step could not be found within the data file %s. The residual has been set to zero..",filename); #endif fclose(datafile); wfprintf(stdout,"done;\n"); if (level!=0) gl->CFL=CFLmem; #ifdef EMFIELD if (level!=0) gl->Lc=Lcmem; #endif #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); if (rank!=0) { gl->effiter_U=0.0; gl->effiter_R=0.0; #ifdef EMFIELD gl->effiter_U_emfield=0.0; gl->effiter_R_emfield=0.0; #endif } #endif free(NODEVALID); } void read_data_file_mpi(char *filename, np_t *np, gl_t *gl, long level){ FILE *datafile; char data_format_str[100]; long i,j,k,flux,cnt,tmp1,tmp2,tmp3; double CFLmem; #ifdef _RESTIME_STORAGE_TRAPEZOIDAL flux_t Res; bool NORES=FALSE; long NOREScount=0; #endif #ifndef UNSTEADY double tmp_double; #endif bool FORMAT010; flux_t U; #ifdef EMFIELD double Lcmem; fluxemfield_t Uemfield; #endif #ifdef DISTMPI int rank,numproc,fd; long offset=0; zone_t domain; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Barrier(MPI_COMM_WORLD); #endif CFLmem=gl->CFL; #ifdef EMFIELD Lcmem=gl->Lc; #endif datafile = fopen(filename, "r"); if (datafile==NULL) fatal_error("Having problems opening datafile %s.",filename); for (cnt=0; cnt<16; cnt++) { if (fscanf(datafile,"%c",&(data_format_str[cnt]))!=1) fatal_error("Problem with fscanf in read_data_file_mpi()."); } data_format_str[16]=EOS; wfprintf(stdout,"Reading data file %s ",filename); if (level!=0) wfprintf(stdout,"to time level minus %ld ",level); FORMAT010=FALSE; if (strcmp("WARPMPBFORMAT010",data_format_str)==0) { wfprintf(stdout,"in CFDWARP binary (MPI) format 010.."); FORMAT010=TRUE; } if (FORMAT010) { if (level==0) { if (fscanf(datafile," windowis=%ld windowie=%ld iter=%ld effiter_U=%lg effiter_R=%lg CFL=%lg", &(gl->window.is),&(gl->window.ie), &(gl->iter),&(gl->effiter_U),&(gl->effiter_R),&(gl->CFL))!=6) fatal_error("Problem with fscanf in read_data_file_mpi()."); if (fscanf(datafile," nd=%ld ns=%ld nf=%ld", &tmp1,&tmp2, &tmp3)!=3) fatal_error("Problem with fscanf in read_data_file_mpi()."); if (tmp1!=nd) fatal_error("Data file has %ld dimensions but CFDWARP is compiled with %ld dimensions.",tmp1,nd); if (tmp2!=ns) fatal_error("Data file has %ld species but CFDWARP is compiled with %ld species.",tmp2,ns); if (tmp3!=nf) fatal_error("Data file has %ld fluxes but CFDWARP is compiled with %ld fluxes.",tmp3,nf); if (fscanf(datafile," is=%ld ie=%ld", &tmp1,&tmp2)!=2) fatal_error("Problem with fscanf in read_data_file_mpi()."); if ((tmp2-tmp1)!=(gl->domain_all.ie-gl->domain_all.is)) fatal_error("Data file has %ld grid lines along i but the control file specifies %ld grid lines.",tmp2-tmp1,(gl->domain_all.ie-gl->domain_all.is)); #ifdef _2DL if (fscanf(datafile," js=%ld je=%ld", &tmp1,&tmp2)!=2) fatal_error("Problem with fscanf in read_data_file_mpi()."); if ((tmp2-tmp1)!=(gl->domain_all.je-gl->domain_all.js)) fatal_error("Data file has %ld grid lines along j but the control file specifies %ld grid lines.",tmp2-tmp1,(gl->domain_all.je-gl->domain_all.js)); #endif #ifdef _3DL if (fscanf(datafile," ks=%ld ke=%ld", &tmp1,&tmp2)!=2) fatal_error("Problem with fscanf in read_data_file_mpi()."); if ((tmp2-tmp1)!=(gl->domain_all.ke-gl->domain_all.js)) fatal_error("Data file has %ld grid lines along k but the control file specifies %ld grid lines.",tmp2-tmp1,(gl->domain_all.ke-gl->domain_all.ks)); #endif #if defined(UNSTEADY) if (fscanf(datafile," time=%lg",&(gl->time))!=1) fatal_error("Problem reading time variable within fscanf in read_data_file_mpi()."); #else if (fscanf(datafile," time=%lg",&tmp_double)!=1) fatal_error("Problem reading time variable within fscanf in read_data_file_mpi()."); #endif #ifdef UNSTEADY if (fscanf(datafile," dt=%lg",&(gl->dt))!=1) fatal_error("Problem reading dt variable within fscanf in read_data_file_mpi()."); #else if (fscanf(datafile," dt=%lg",&tmp_double)!=1) fatal_error("Problem reading dt variable within fscanf in read_data_file_mpi()."); #endif #ifdef EMFIELD if (fscanf(datafile," Lc=%lg effiter_U_emfield=%lg effiter_R_emfield=%lg",&(gl->Lc),&(gl->effiter_U_emfield),&(gl->effiter_R_emfield))!=3) fatal_error("Problem reading EMFIELD variables within fscanf in read_data_file_mpi()."); #endif if (fscanf(datafile,"%*[^\n]")!=0) fatal_error("Problem with fscanf in read_data_file_mpi()."); } else { if (fscanf(datafile," %*[^\n]")!=0) fatal_error("Problem with fscanf in read_data_file_mpi()."); } fgetc(datafile); } if (!FORMAT010){ fatal_error("Data file format invalid."); } #ifdef DISTMPI fd=fileno(datafile); if(level==0) offset=ftell(datafile); domain=_zone_intersection(gl->domain_all,gl->domain_lim); #endif wfprintf(stdout,"fluid."); wfprintf(stdout,"."); #ifdef DISTMPI for_ijk(domain,is,js,ks,ie,je,ke){ if(pread(fd,U,sizeof(flux_t),_ai_mpidatafile(gl,i,j,k)*sizeof(flux_t)+offset)==-1) fatal_error("Could not read all data properly."); for (flux=0; flux<nf; flux++){ if (level==0) np[_ai(gl,i,j,k)].bs->U[flux]=U[flux]; #ifdef UNSTEADY if (level==1) np[_ai(gl,i,j,k)].bs->Um1[flux]=U[flux]; #if _RESTIME_BW > 2 if (level==2) np[_ai(gl,i,j,k)].bs->Um2[flux]=U[flux]; #endif #if _RESTIME_BW > 3 if (level==3) np[_ai(gl,i,j,k)].bs->Um3[flux]=U[flux]; #endif #endif } np[_ai(gl,i,j,k)].INIT_FLUID=TRUE; } offset+=((gl->domain_all.ie-gl->domain_all.is+1)*(gl->domain_all.je-gl->domain_all.js+1)*(gl->domain_all.ke-gl->domain_all.ks+1)*sizeof(flux_t)); #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (fread(U, sizeof(flux_t), 1, datafile)!=1) fatal_error("Could not read all data properly."); for (flux=0; flux<nf; flux++){ if (level==0) np[_ai(gl,i,j,k)].bs->U[flux]=U[flux]; #ifdef UNSTEADY if (level==1) np[_ai(gl,i,j,k)].bs->Um1[flux]=U[flux]; #if _RESTIME_BW > 2 if (level==2) np[_ai(gl,i,j,k)].bs->Um2[flux]=U[flux]; #endif #if _RESTIME_BW > 3 if (level==3) np[_ai(gl,i,j,k)].bs->Um3[flux]=U[flux]; #endif #endif } np[_ai(gl,i,j,k)].INIT_FLUID=TRUE; } #endif #ifdef EMFIELD wfprintf(stdout,"emfield."); wfprintf(stdout,"."); #ifdef DISTMPI for_ijk(domain,is,js,ks,ie,je,ke){ if(pread(fd,Uemfield,sizeof(fluxemfield_t),_ai_mpidatafile(gl,i,j,k)*sizeof(fluxemfield_t)+offset)==-1) fatal_error("Could not read all data properly."); for (flux=0; flux<nfe; flux++) { if (level==0) np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Uemfield[flux]; #ifdef UNSTEADY if (level==1) np[_ai(gl,i,j,k)].bs->Uemfieldm1[flux]=Uemfield[flux]; #endif } np[_ai(gl,i,j,k)].INIT_EMFIELD=TRUE; } offset+=((gl->domain_all.ie-gl->domain_all.is+1)*(gl->domain_all.je-gl->domain_all.js+1)*(gl->domain_all.ke-gl->domain_all.ks+1)*sizeof(fluxemfield_t)); #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (fread(Uemfield, sizeof(fluxemfield_t), 1, datafile)!=1) fatal_error("Could not read all data properly."); for (flux=0; flux<nfe; flux++) { if (level==0) np[_ai(gl,i,j,k)].bs->Uemfield[flux]=Uemfield[flux]; #ifdef UNSTEADY if (level==1) np[_ai(gl,i,j,k)].bs->Uemfieldm1[flux]=Uemfield[flux]; #endif } np[_ai(gl,i,j,k)].INIT_EMFIELD=TRUE; } #endif #endif #ifdef _RESTIME_STORAGE_TRAPEZOIDAL wfprintf(stdout,"trap."); wfprintf(stdout,"."); #ifdef DISTMPI for_ijk(domain,is,js,ks,ie,je,ke){ if(pread(fd,Res,sizeof(flux_t),_ai_mpidatafile(gl,i,j,k)*sizeof(flux_t)+offset)==-1){ printf("failed to read value at %ld,%ld,%ld;",i,j,k); NORES=TRUE; NOREScount++; } for (flux=0; flux<nf; flux++) { if (!NORES) np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]=Res[flux]; else if (NORES) np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]=0.0; } NORES=FALSE; } #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (fread(Res, sizeof(flux_t), 1, datafile)!=1){ NORES=TRUE; NOREScount++; } for (flux=0; flux<nf; flux++) { if (!NORES) np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]=Res[flux]; else if (NORES) np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]=0.0; } NORES=FALSE; } #endif if(NOREScount>0) wfprintf(stdout,"WARNING: The residual at the previous time step could not be found within the data file %s. The residual has been set to zero..",filename); #endif fclose(datafile); wfprintf(stdout,"done;\n"); if (level!=0) gl->CFL=CFLmem; #ifdef EMFIELD if (level!=0) gl->Lc=Lcmem; #endif #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); if (rank!=0) { gl->effiter_U=0.0; gl->effiter_R=0.0; #ifdef EMFIELD gl->effiter_U_emfield=0.0; gl->effiter_R_emfield=0.0; #endif } #endif } void write_data_file_binary_ascii(char *filename, np_t *np, gl_t *gl, int DATATYPE){ FILE *datafile; long i,j,k; flux_t *fluxtmp; bool *NODEVALID; #ifdef EMFIELD double effiter_U_emfield,effiter_R_emfield; #endif long flux; double effiter_U,effiter_R; #ifdef DISTMPI zone_t domain; flux_t U; int rank,proc,numproc; MPI_Status MPI_Status1; #endif #ifdef EMFIELD assert(nf>=nfe); #endif fluxtmp=(flux_t *)malloc((gl->domain_lim_all.ie-gl->domain_lim_all.is+1) #ifdef _2DL *(gl->domain_lim_all.je-gl->domain_lim_all.js+1) #endif #ifdef _3DL *(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1) #endif *sizeof(flux_t)); NODEVALID=(bool *)malloc((gl->domain_lim_all.ie-gl->domain_lim_all.is+1) #ifdef _2DL *(gl->domain_lim_all.je-gl->domain_lim_all.js+1) #endif #ifdef _3DL *(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1) #endif *sizeof(bool)); effiter_U=gl->effiter_U; effiter_R=gl->effiter_R; #ifdef EMFIELD effiter_U_emfield=gl->effiter_U_emfield; effiter_R_emfield=gl->effiter_R_emfield; #endif #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Allreduce(&gl->effiter_U, &effiter_U, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&gl->effiter_R, &effiter_R, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #ifdef EMFIELD MPI_Allreduce(&gl->effiter_U_emfield, &effiter_U_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&gl->effiter_R_emfield, &effiter_R_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #endif #endif datafile = wfopen(filename, "w"); wfprintf(stdout,"Writing to CFDWARP "); switch (DATATYPE){ case DATATYPE_BINARY: wfprintf(stdout,"binary"); wfprintf(datafile,"WARPBINFORMAT010"); break; case DATATYPE_ASCII: wfprintf(stdout,"ASCII"); wfprintf(datafile,"WARPASCFORMAT010"); break; default: fatal_error("DATATYPE must be either DATATYPE_BINARY or DATATYPE_ASCII."); } wfprintf(stdout," data file %s..",filename); wfprintf(datafile," windowis=%ld windowie=%ld iter=%ld effiter_U=%E effiter_R=%E CFL=%E", gl->window.is,gl->window.ie,gl->iter,effiter_U,effiter_R,gl->CFL); wfprintf(datafile," nd=%ld ns=%ld nf=%ld",nd,ns,nf); wfprintf(datafile," is=%ld ie=%ld",gl->domain_all.is,gl->domain_all.ie); #ifdef _2DL wfprintf(datafile," js=%ld je=%ld",gl->domain_all.js,gl->domain_all.je); #endif #ifdef _3DL wfprintf(datafile," ks=%ld ke=%ld",gl->domain_all.ks,gl->domain_all.ke); #endif #if defined(UNSTEADY) wfprintf(datafile," time=%E",gl->time); #else wfprintf(datafile," time=%E",0.0); #endif #ifdef UNSTEADY wfprintf(datafile," dt=%E",gl->dt); #else wfprintf(datafile," dt=%E",dt_steady); #endif #ifdef EMFIELD wfprintf(datafile," Lc=%E effiter_U_emfield=%E effiter_R_emfield=%E",gl->Lc,effiter_U_emfield,effiter_R_emfield); #endif wfprintf(datafile,"\n"); find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_FLUID, NODEVALID); #ifdef DISTMPI for (proc=0; proc<numproc; proc++){ domain=_domain_from_rank(proc,gl); for_ijk(domain,is,js,ks,ie,je,ke){ if (proc==rank){ for (flux=0; flux<nf; flux++) U[flux]=np[_ai(gl,i,j,k)].bs->U[flux]; if (proc!=0) { MPI_Send(U,nf,MPI_DOUBLE,0,0,MPI_COMM_WORLD); } } if (rank==0 && proc!=0) { MPI_Recv(U,nf,MPI_DOUBLE,proc,0,MPI_COMM_WORLD,&MPI_Status1); } for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=U[flux]; } MPI_Barrier(MPI_COMM_WORLD); } #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->U[flux]; } #endif for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (NODEVALID[_ai_all(gl,i,j,k)]) { switch (DATATYPE){ case DATATYPE_BINARY: wfwrite(fluxtmp[_ai_all(gl,i,j,k)], sizeof(flux_t), 1, datafile); break; case DATATYPE_ASCII: for (flux=0; flux<nf; flux++) wfprintf(datafile, "%18.16E\n",fluxtmp[_ai_all(gl,i,j,k)][flux]); break; default: fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY."); } } } #ifdef EMFIELD find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_EMFIELD, NODEVALID); #ifdef DISTMPI for (proc=0; proc<numproc; proc++){ domain=_domain_from_rank(proc,gl); for_ijk(domain,is,js,ks,ie,je,ke){ if (proc==rank){ for (flux=0; flux<nfe; flux++) U[flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux]; if (proc!=0) { MPI_Send(U,nfe,MPI_DOUBLE,0,0,MPI_COMM_WORLD); } } if (rank==0 && proc!=0) { MPI_Recv(U,nfe,MPI_DOUBLE,proc,0,MPI_COMM_WORLD,&MPI_Status1); } for (flux=0; flux<nfe; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=U[flux]; } MPI_Barrier(MPI_COMM_WORLD); } #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ for (flux=0; flux<nfe; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux]; } #endif for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (NODEVALID[_ai_all(gl,i,j,k)]) { switch (DATATYPE){ case DATATYPE_BINARY: wfwrite(fluxtmp[_ai_all(gl,i,j,k)], sizeof(fluxemfield_t), 1, datafile); break; case DATATYPE_ASCII: for (flux=0; flux<nfe; flux++) wfprintf(datafile, "%18.16E\n",fluxtmp[_ai_all(gl,i,j,k)][flux]); break; default: fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY."); } } } #endif #ifdef _RESTIME_STORAGE_TRAPEZOIDAL find_NODEVALID_on_domain_all(np, gl, TYPELEVEL_FLUID, NODEVALID); #ifdef DISTMPI for (proc=0; proc<numproc; proc++){ domain=_domain_from_rank(proc,gl); for_ijk(domain,is,js,ks,ie,je,ke){ if (proc==rank){ for (flux=0; flux<nf; flux++) U[flux]=np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]; if (proc!=0) { MPI_Send(U,nf,MPI_DOUBLE,0,0,MPI_COMM_WORLD); } } if (rank==0 && proc!=0) { MPI_Recv(U,nf,MPI_DOUBLE,proc,0,MPI_COMM_WORLD,&MPI_Status1); } for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=U[flux]; } MPI_Barrier(MPI_COMM_WORLD); } #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]; } #endif for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (NODEVALID[_ai_all(gl,i,j,k)]) { switch (DATATYPE){ case DATATYPE_BINARY: wfwrite(fluxtmp[_ai_all(gl,i,j,k)], sizeof(flux_t), 1, datafile); break; case DATATYPE_ASCII: for (flux=0; flux<nf; flux++) wfprintf(datafile, "%18.16E\n",fluxtmp[_ai_all(gl,i,j,k)][flux]); break; default: fatal_error("DATATYPE must be either DATATYPE_ASCII or DATATYPE_BINARY."); } } } #endif #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); #endif wfclose(datafile); wfprintf(stdout,"done.\n"); free(fluxtmp); free(NODEVALID); } void write_data_file_mpi(char *filename, np_t *np, gl_t *gl){ FILE *datafile; long i,j,k; flux_t *fluxtmp; #ifdef EMFIELD double effiter_U_emfield,effiter_R_emfield; #endif long flux; double effiter_U,effiter_R; #ifdef DISTMPI zone_t domain; int rank,fd; #endif #ifdef DISTMPI long offset=0; #endif #ifdef EMFIELD assert(nf>=nfe); #endif fluxtmp=(flux_t *)malloc((gl->domain_lim_all.ie-gl->domain_lim_all.is+1) #ifdef _2DL *(gl->domain_lim_all.je-gl->domain_lim_all.js+1) #endif #ifdef _3DL *(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1) #endif *sizeof(flux_t)); effiter_U=gl->effiter_U; effiter_R=gl->effiter_R; #ifdef EMFIELD effiter_U_emfield=gl->effiter_U_emfield; effiter_R_emfield=gl->effiter_R_emfield; #endif #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Allreduce(&gl->effiter_U, &effiter_U, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&gl->effiter_R, &effiter_R, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #ifdef EMFIELD MPI_Allreduce(&gl->effiter_U_emfield, &effiter_U_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&gl->effiter_R_emfield, &effiter_R_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #endif #endif datafile = wfopen(filename, "w"); wfprintf(stdout,"Writing to CFDWARP binary (MPI)"); wfprintf(datafile,"WARPMPBFORMAT010"); wfprintf(stdout," data file %s..",filename); wfprintf(datafile," windowis=%ld windowie=%ld iter=%ld effiter_U=%E effiter_R=%E CFL=%E", gl->window.is,gl->window.ie,gl->iter,effiter_U,effiter_R,gl->CFL); wfprintf(datafile," nd=%ld ns=%ld nf=%ld",nd,ns,nf); wfprintf(datafile," is=%ld ie=%ld",gl->domain_all.is,gl->domain_all.ie); #ifdef _2DL wfprintf(datafile," js=%ld je=%ld",gl->domain_all.js,gl->domain_all.je); #endif #ifdef _3DL wfprintf(datafile," ks=%ld ke=%ld",gl->domain_all.ks,gl->domain_all.ke); #endif #if defined(UNSTEADY) wfprintf(datafile," time=%E",gl->time); #else wfprintf(datafile," time=%E",0.0); #endif #ifdef UNSTEADY wfprintf(datafile," dt=%E",gl->dt); #else wfprintf(datafile," dt=%E",dt_steady); #endif #ifdef EMFIELD wfprintf(datafile," Lc=%E effiter_U_emfield=%E effiter_R_emfield=%E",gl->Lc,effiter_U_emfield,effiter_R_emfield); #endif wfprintf(datafile,"\n"); #ifdef DISTMPI if(rank==0) { fseek(datafile,0,SEEK_END); offset=ftell(datafile); } MPI_Bcast(&offset,1,MPI_LONG,0,MPI_COMM_WORLD); #endif #ifdef DISTMPI wfclose(datafile); do {datafile = fopen(filename, "rb+");} while(datafile==NULL); domain=_domain_from_rank(rank,gl); #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->U[flux]; } #endif #ifdef DISTMPI fd=fileno(datafile); for_ijk(domain,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++) fluxtmp[_ai(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->U[flux]; pwrite(fd,fluxtmp[_ai(gl,i,j,k)],sizeof(flux_t),_ai_mpidatafile(gl,i,j,k)*sizeof(flux_t)+offset); } offset+=((gl->domain_all.ie-gl->domain_all.is+1)*(gl->domain_all.je-gl->domain_all.js+1)*(gl->domain_all.ke-gl->domain_all.ks+1)*sizeof(flux_t)); #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ wfwrite(fluxtmp[_ai_all(gl,i,j,k)], sizeof(flux_t), 1, datafile); } #endif #ifdef EMFIELD #ifndef DISTMPI for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ for (flux=0; flux<nfe; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux]; } #endif #ifdef DISTMPI for_ijk(domain,is,js,ks,ie,je,ke){ for (flux=0; flux<nfe; flux++) fluxtmp[_ai(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->Uemfield[flux]; pwrite(fd,fluxtmp[_ai(gl,i,j,k)],sizeof(fluxemfield_t),_ai_mpidatafile(gl,i,j,k)*sizeof(fluxemfield_t)+offset); } offset+=((gl->domain_all.ie-gl->domain_all.is+1)*(gl->domain_all.je-gl->domain_all.js+1)*(gl->domain_all.ke-gl->domain_all.ks+1)*sizeof(fluxemfield_t)); #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ wfwrite(fluxtmp[_ai_all(gl,i,j,k)], sizeof(fluxemfield_t), 1, datafile); } #endif #endif #ifdef _RESTIME_STORAGE_TRAPEZOIDAL #ifndef DISTMPI for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++) fluxtmp[_ai_all(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]; } #endif #ifdef DISTMPI for_ijk(domain,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++) fluxtmp[_ai(gl,i,j,k)][flux]=np[_ai(gl,i,j,k)].bs->trapezoidalm1[flux]; pwrite(fd,fluxtmp[_ai(gl,i,j,k)],sizeof(flux_t),_ai_mpidatafile(gl,i,j,k)*sizeof(flux_t)+offset); } #else for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ wfwrite(fluxtmp[_ai_all(gl,i,j,k)], sizeof(flux_t), 1, datafile); } #endif #endif #ifdef DISTMPI fclose(datafile); #else wfclose(datafile); #endif wfprintf(stdout,"done.\n"); free(fluxtmp); } void write_data_file(np_t *np, gl_t *gl){ char *tmp_filename,*tmp_filename2; #ifdef DISTMPI int rank; MPI_Comm_rank(MPI_COMM_WORLD, &rank); #endif tmp_filename=(char *)malloc((strlen(gl->output_filename)+10)*sizeof(char)); tmp_filename2=(char *)malloc((strlen(gl->output_filename)+10)*sizeof(char)); strcpy(tmp_filename,gl->output_filename); SOAP_strins(".wbak",&tmp_filename,strlen(tmp_filename)); strcpy(tmp_filename2,gl->output_filename); SOAP_strins(".wbak2",&tmp_filename2,strlen(tmp_filename2)); #ifdef DISTMPI if (rank==0) { #endif rename(tmp_filename,tmp_filename2); rename(gl->output_filename,tmp_filename); #ifdef DISTMPI } #endif if (gl->OUTPUTASCII) { write_data_file_binary_ascii(gl->output_filename, np, gl, DATATYPE_ASCII); } else if (gl->OUTPUTINTERPOLATION){ write_data_file_interpolation(gl->output_filename, np, gl); } else if (gl->OUTPUTBINARYMPI){ write_data_file_mpi(gl->output_filename, np, gl); } else { write_data_file_binary_ascii(gl->output_filename, np, gl, DATATYPE_BINARY); } free(tmp_filename); free(tmp_filename2); } void read_data_file(input_t input, np_t *np, gl_t *gl){ #ifdef UNSTEADY long i,j,k; long flux; #endif long spec; gl->nsinit=ns; for (spec=0; spec<ns; spec++) gl->initspecies[spec]=spec; if (input.READDATAFILE) { if (input.ASCII) { read_data_file_binary_ascii(input.name, np, gl, 0, DATATYPE_ASCII); } else if (input.INTERPOLATION){ read_data_file_interpolation(input.name, np, gl); } else if (input.BINARYMPI) { read_data_file_mpi(input.name, np, gl, 0); } else { read_data_file_binary_ascii(input.name, np, gl, 0, DATATYPE_BINARY); } gl->iter=max(gl->iter,1); gl->INIT_FLUID_READ=TRUE; gl->INIT_EMFIELD_READ=TRUE; } #ifdef UNSTEADY for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++){ (np)[_ai(gl,i,j,k)].bs->Um1[flux]=(np)[_ai(gl,i,j,k)].bs->U[flux]; } #ifdef EMFIELD for (flux=0; flux<nfe; flux++){ (np)[_ai(gl,i,j,k)].bs->Uemfieldm1[flux]=(np)[_ai(gl,i,j,k)].bs->Uemfield[flux]; } #endif } if (input.MM1) read_data_file_mpi(input.name_m1, np, gl, 1); if (input.M1) read_data_file_binary_ascii(input.name_m1, np, gl, 1, DATATYPE_BINARY); #if _RESTIME_BW > 2 for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++){ (np)[_ai(gl,i,j,k)].bs->Um2[flux]=(np)[_ai(gl,i,j,k)].bs->Um1[flux]; } } if (input.MM2) read_data_file_mpi(input.name_m2, np, gl, 2); if (input.M2) read_data_file_binary_ascii(input.name_m2, np, gl, 2, DATATYPE_BINARY); #endif #if _RESTIME_BW > 3 for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ for (flux=0; flux<nf; flux++){ (np)[_ai(gl,i,j,k)].bs->Um3[flux]=(np)[_ai(gl,i,j,k)].bs->Um2[flux]; } } if (input.MM3) read_data_file_mpi(input.name_m3, np, gl, 3); if (input.M3) read_data_file_binary_ascii(input.name_m3, np, gl, 3, DATATYPE_BINARY); #endif #endif } void find_interpolation_weight(np_t *np, gl_t *gl, long l, dim_t x_file, dim_t dx1_file, #ifdef _2DL dim_t dx2_file, #endif #ifdef _3DL dim_t dx3_file, #endif double radiusmax2, double *thisweight){ double distance; EXM_mat_t mat1,mat2,mat3,mat1inv; long dim; distance=0.0; for (dim=0; dim<nd; dim++) distance+=sqr(_x(np[l],dim)-x_file[dim]); *thisweight=0.0; if (distance<radiusmax2) { EXM_init_matrix(&mat1, nd, nd); for (dim=0; dim<nd; dim++){ mat1.cont[EXM_aim(mat1.glm,dim,0)]=dx1_file[dim]; #ifdef _2DL mat1.cont[EXM_aim(mat1.glm,dim,1)]=dx2_file[dim]; #endif #ifdef _3DL mat1.cont[EXM_aim(mat1.glm,dim,2)]=dx3_file[dim]; #endif } EXM_init_matrix(&mat1inv, nd, nd); EXM_invert_matrix_analytical(mat1, &mat1inv); EXM_init_matrix(&mat2, nd, 1); for (dim=0; dim<nd; dim++){ mat2.cont[EXM_aim(mat2.glm,dim,0)]=_x(np[l],dim)-x_file[dim]; } EXM_init_matrix(&mat3, nd, 1); EXM_multiply_matrices(mat1inv, mat2, &mat3); *thisweight=0.0; //for (dim=0; dim<nd; dim++) thisweight=max(thisweight,fabs(mat3.cont[EXM_aim(mat3.glm,dim,0)])); for (dim=0; dim<nd; dim++) *thisweight+=fabs(pow(fabs(mat3.cont[EXM_aim(mat3.glm,dim,0)]),3.0)); *thisweight=fabs(pow(*thisweight,1.0/3.0)); *thisweight=max(1e-16,max(0.0003-(*thisweight)*0.00001,1.0-(*thisweight))); EXM_free_matrix(&mat1); EXM_free_matrix(&mat1inv); EXM_free_matrix(&mat2); EXM_free_matrix(&mat3); } } bool is_interpolation_occurring_in_zone(np_t *np, gl_t *gl, int TYPELEVEL, dim_t x_file, double radiusmax2, zone_t zone, long *i, long *j, long *k){ long dim; double distance; bool FOUND; FOUND=FALSE; // fprintf(stderr,"zone.is=%ld zone.ie=%ld\n",zone.is,zone.ie); //if (zone.ie>gl->domain_all.ie) fatal_error("problem here.."); //if (zone.is<gl->domain_all.is) fatal_error("problem here.."); *i=zone.is-1; do { (*i)++; *j=zone.js-1; do { (*j)++; #ifdef _3DL *k=zone.ks-1; do { (*k)++; #endif if (is_node_valid(np[_ai(gl,*i,*j,*k)],TYPELEVEL)){ distance=0.0; for (dim=0; dim<nd; dim++) distance+=sqr(_x(np[_ai(gl,*i,*j,*k)],dim)-x_file[dim]); if (distance<radiusmax2) { //if (zone.is==zone.ie) fprintf(stderr,"[[%E %E %ld,%ld,%ld %E %E]]\n",distance,radiusmax2,*i,*j,*k,_x(np[_ai(gl,*i,*j,*k)],0),x_file[0]); FOUND=TRUE; } } #ifdef _3DL } while(!FOUND && *k<zone.ke); #endif } while(!FOUND && *j<zone.je); } while(!FOUND && *i<zone.ie); return(FOUND); } bool find_interpolation_zone(np_t *np, gl_t *gl, int TYPELEVEL, dim_t x_file, double radiusmax2, zone_t *zone){ long i,j,k,offset,imem; bool FOUNDWITHIN,FOUNDLEFT,FOUNDRIGHT,FOUND; zone_t istationzone,domain_eff; domain_eff=_zone_intersection(gl->domain_all,gl->domain_lim); FOUNDWITHIN=FALSE; FOUNDLEFT=FALSE; FOUNDRIGHT=FALSE; istationzone=domain_eff; if (is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,*zone,&i,&j,&k)){ istationzone.is=i; istationzone.ie=i; FOUNDWITHIN=TRUE; } else { offset=0; do { offset++; if (zone->ie+offset<=domain_eff.ie) { istationzone.ie=zone->ie+offset; istationzone.is=zone->ie+offset; if (is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,istationzone,&i,&j,&k)){ FOUNDLEFT=TRUE; } } if (zone->is-offset>=domain_eff.is && !FOUNDLEFT) { istationzone.ie=zone->is-offset; istationzone.is=zone->is-offset; if (is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,istationzone,&i,&j,&k)){ FOUNDRIGHT=TRUE; } } } while (!FOUNDLEFT && !FOUNDRIGHT && offset<=(domain_eff.ie-domain_eff.is+1) ); } if (FOUNDRIGHT || FOUNDLEFT || FOUNDWITHIN){ imem=istationzone.is; if (FOUNDRIGHT){ zone->ie=imem; } else { FOUND=TRUE; while (istationzone.ie<domain_eff.ie && FOUND) { istationzone.ie++; istationzone.is++; FOUND=is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,istationzone,&i,&j,&k); } if (!FOUND) zone->ie=istationzone.ie-1; else zone->ie=domain_eff.ie; } if (FOUNDLEFT){ zone->is=imem; } else { istationzone.is=imem; istationzone.ie=imem; FOUND=TRUE; while (istationzone.is>domain_eff.is && FOUND) { istationzone.is--; istationzone.ie--; FOUND=is_interpolation_occurring_in_zone(np,gl,TYPELEVEL,x_file,radiusmax2,istationzone,&i,&j,&k); } if (!FOUND) zone->is=istationzone.is+1; else zone->is=domain_eff.is; } } return(FOUNDRIGHT || FOUNDLEFT || FOUNDWITHIN); } bool is_data_point_in_domain(dim_t x_file, dim_t xmin, dim_t xmax, double radiusmax2){ bool INDOMAIN; long dim; INDOMAIN=TRUE; for (dim=0; dim<nd; dim++) { if (INDOMAIN) { if (x_file[dim]<xmin[dim] && sqr(x_file[dim]-xmin[dim])>radiusmax2) INDOMAIN=FALSE; } } for (dim=0; dim<nd; dim++) { if (INDOMAIN) { if (x_file[dim]>xmax[dim] && sqr(x_file[dim]-xmax[dim])>radiusmax2) INDOMAIN=FALSE; } } return(INDOMAIN); } void read_data_file_interpolation(char *filename, np_t *np, gl_t *gl){ FILE *datafile; char data_format_str[100]; long i,j,k,l_file,cnt,dim,cntzone; long numsubzone, numflux_read,numspec_read,numdim_read,numnodes; double tmp_dt,tmp_time; double *weight,*radiusmax2_file,thisweight; zone_t *subzone; dim_t *xmin,*xmax; initvar_t *initvar; initvar_t *initvar_file; zone_t zone; long numsubzone_desired; #ifdef EMFIELD initvar_emfield_t *initvar_emfield; initvar_emfield_t *initvar_emfield_file; #endif bool FORMAT001; dim_t *dx1_file,*x_file; #ifdef _2DL dim_t *dx2_file; #endif #ifdef _3DL dim_t *dx3_file; #endif int cnterror; #ifdef OPENMPTHREADS omp_lock_t *nodelock; #endif #ifdef DISTMPI int rank,numproc; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &numproc); MPI_Barrier(MPI_COMM_WORLD); #endif weight=(double *)malloc(sizeof(double)*(gl->domain_lim.ie+4) #ifdef _2DL *(gl->domain_lim.je+4) #endif #ifdef _3DL *(gl->domain_lim.ke+4) #endif ); #ifdef OPENMPTHREADS nodelock=(omp_lock_t *)malloc(sizeof(double)*(gl->domain_lim.ie+4) #ifdef _2DL *(gl->domain_lim.je+4) #endif #ifdef _3DL *(gl->domain_lim.ke+4) #endif ); #endif datafile = fopen(filename, "r"); if (datafile==NULL) fatal_error("Having problems opening interpolation datafile %s.",filename); /* first do the fluid properties */ initvar=(initvar_t *)malloc(sizeof(initvar_t)*(gl->domain_lim.ie+4) #ifdef _2DL *(gl->domain_lim.je+4) #endif #ifdef _3DL *(gl->domain_lim.ke+4) #endif ); for (cnt=0; cnt<16; cnt++){ if (fscanf(datafile,"%c",&(data_format_str[cnt]))!=1) { fatal_error("Problem with fscanf in read_data_file_interpolation()."); } } data_format_str[16]=EOS; wfprintf(stdout,"Reading interpolation data file %s ",filename); FORMAT001=FALSE; if (strcmp("WARPINTFORMAT001",data_format_str)==0) { wfprintf(stdout,"in CFDWARP format 001.."); FORMAT001=TRUE; } if (FORMAT001) { if (fscanf(datafile," numnodes=%ld nf=%ld nd=%ld ns=%ld windowis=%ld windowie=%ld iter=%ld effiter_U=%lg effiter_R=%lg CFL=%lg time=%lg dt=%lg%*[^\n]", &numnodes,&numflux_read,&numdim_read,&numspec_read,&(gl->window.is),&(gl->window.ie), &(gl->iter),&(gl->effiter_U),&(gl->effiter_R),&(gl->CFL),&(tmp_time),&tmp_dt)!=12) fatal_error("Problem reading interpolation data file."); #ifdef UNSTEADY gl->time=tmp_time; gl->dt=tmp_dt; #endif fgetc(datafile); if (numdim_read!=nd) fatal_error("Number of dimensions read (%ld) does not equal current number of dimensions (%ld).",numdim_read,nd); if (numspec_read!=ns) fatal_error("Number of species read (%ld) does not equal current number of species (%ld).",numspec_read,ns); if (numflux_read!=nf) fatal_error("Number of fluxes read (%ld) does not equal current number of fluxes (%ld).",numflux_read,nf); } else { fatal_error("Interpolation file format unknown."); } /* read data and store in ram */ initvar_file=(initvar_t *)malloc(numnodes*sizeof(initvar_t)); x_file=(dim_t *)malloc(numnodes*sizeof(dim_t)); dx1_file=(dim_t *)malloc(numnodes*sizeof(dim_t)); #ifdef _2DL dx2_file=(dim_t *)malloc(numnodes*sizeof(dim_t)); #endif #ifdef _3DL dx3_file=(dim_t *)malloc(numnodes*sizeof(dim_t)); #endif radiusmax2_file=(double *)malloc(numnodes*sizeof(double)); for (l_file=0; l_file<numnodes; l_file++){ cnterror=0; if (fread(initvar_file[l_file], sizeof(initvar_t), 1, datafile)!=1) cnterror++; if (fread(x_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; if (fread(dx1_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; #ifdef _2DL if (fread(dx2_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; #endif #ifdef _3DL if (fread(dx3_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; #endif if (cnterror>0) fatal_error("Could not read all data properly."); radiusmax2_file[l_file]=0.0e0; for (dim=0; dim<nd; dim++) radiusmax2_file[l_file]+=sqr(fabs(dx1_file[l_file][dim]) #ifdef _2DL +fabs(dx2_file[l_file][dim]) #endif #ifdef _3DL +fabs(dx3_file[l_file][dim]) #endif ); radiusmax2_file[l_file]*=1.1; } for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ weight[_ai(gl,i,j,k)]=0.0e0; #ifdef OPENMPTHREADS omp_init_lock(&(nodelock[_ai(gl,i,j,k)])); #endif for (cnt=0; cnt<numinitvar; cnt++) (initvar[_ai(gl,i,j,k)])[cnt]=0.0; } zone=_zone_intersection(gl->domain_all,gl->domain_lim); subzone=(zone_t *)malloc(sizeof(zone_t)); find_subzones_in_zone_given_zonelength(SUBZONE_DESIRED_WIDTH, zone, &numsubzone, &subzone); #ifdef OPENMPTHREADS numsubzone_desired=MIN_NUMSUBZONE_PER_THREAD*omp_get_max_threads(); #else numsubzone_desired=MIN_NUMSUBZONE_PER_THREAD; #endif if (numsubzone<numsubzone_desired) find_subzones_in_zone_given_numsubzone(zone, numsubzone_desired, &numsubzone, &subzone); xmin=(dim_t *)malloc(numsubzone*sizeof(dim_t)); xmax=(dim_t *)malloc(numsubzone*sizeof(dim_t)); for (cntzone=0; cntzone<numsubzone; cntzone++){ for (dim=0; dim<nd; dim++){ xmin[cntzone][dim]=1e99; xmax[cntzone][dim]=-1e99; } for_ijk(subzone[cntzone],is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)){ for (dim=0; dim<nd; dim++){ xmin[cntzone][dim]=min(xmin[cntzone][dim],_x(np[_ai(gl,i,j,k)],dim)); xmax[cntzone][dim]=max(xmax[cntzone][dim],_x(np[_ai(gl,i,j,k)],dim)); } } } } #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); wfprintf(stdout,"Fluid/%ld",numsubzone*numproc); #else wfprintf(stdout,"Fluid/%ld",numsubzone); #endif #if defined(OPENMPTHREADS) #pragma omp parallel for private(l_file,cntzone,dim,zone,i,j,k,cnt,thisweight) schedule(dynamic) #endif for (cntzone=0; cntzone<numsubzone; cntzone++){ for (l_file=0; l_file<numnodes; l_file++){ if (is_data_point_in_domain(x_file[l_file],xmin[cntzone],xmax[cntzone],radiusmax2_file[l_file])){ zone=subzone[cntzone]; if (find_interpolation_zone(np,gl,TYPELEVEL_FLUID,x_file[l_file],radiusmax2_file[l_file],&zone)){ for_jik(zone,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)){ find_interpolation_weight(np,gl,_ai(gl,i,j,k),x_file[l_file],dx1_file[l_file], #ifdef _2DL dx2_file[l_file], #endif #ifdef _3DL dx3_file[l_file], #endif radiusmax2_file[l_file],&thisweight); #ifdef OPENMPTHREADS omp_set_lock(&(nodelock[_ai(gl,i,j,k)])); #endif if (thisweight>1e-99) { weight[_ai(gl,i,j,k)]+=thisweight; for (cnt=0; cnt<numinitvar; cnt++) initvar[_ai(gl,i,j,k)][cnt]+=thisweight*initvar_file[l_file][cnt]; } #ifdef OPENMPTHREADS omp_unset_lock(&(nodelock[_ai(gl,i,j,k)])); #endif } } } } } fprintf(stdout,"."); fflush(stdout); // if (mod(cntzone,numsubzone/100+1)==0) wfprintf(stdout,"."); } #ifdef OPENMPTHREADS #pragma omp parallel for private(i,j,k,cnt) schedule(static) #endif for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (weight[_ai(gl,i,j,k)]>1e-99 && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_FLUID)) { for (cnt=0; cnt<numinitvar; cnt++) initvar[_ai(gl,i,j,k)][cnt]=initvar[_ai(gl,i,j,k)][cnt]/weight[_ai(gl,i,j,k)]; init_node_fluid(np,_ai(gl,i,j,k), gl, defaultinitvartypefluid, initvar[_ai(gl,i,j,k)]); np[_ai(gl,i,j,k)].INIT_FLUID=TRUE; } } free(initvar); free(initvar_file); /* second do the emfield properties */ #ifdef EMFIELD initvar_emfield=(initvar_emfield_t *)malloc(sizeof(initvar_emfield_t)*(gl->domain_lim.ie+4) #ifdef _2DL *(gl->domain_lim.je+4) #endif #ifdef _3DL *(gl->domain_lim.ke+4) #endif ); for (cnt=0; cnt<16; cnt++){ if (fscanf(datafile,"%c",&(data_format_str[cnt]))!=1){ fatal_error("Problem with fscanf in emfield part of read_data_file_interpolation()."); } } data_format_str[16]=EOS; FORMAT001=FALSE; if (strcmp("WARPINTFORMAT001",data_format_str)==0) { FORMAT001=TRUE; } if (FORMAT001) { if (fscanf(datafile," numnodes_emfield=%ld nfe=%ld nd=%ld Lc=%lg effiter_U_emfield=%lg effiter_R_emfield=%lg%*[^\n]", &numnodes,&numflux_read,&numdim_read,&(gl->Lc),&(gl->effiter_U_emfield),&(gl->effiter_R_emfield))!=6){ fatal_error("Problem reading emfield preambule in interpolating file."); } fgetc(datafile); if (numdim_read!=nd) fatal_error("Number of dimensions read (%ld) does not equal current number of dimensions (%ld).",numdim_read,nd); if (numflux_read!=nfe) fatal_error("Number of fluxes read (%ld) does not equal current number of emfield fluxes (%ld).",numflux_read,nfe); gl->Lc=1.0e0; } else { fatal_error("Interpolation file format unknown for EMfield variables."); } /* read data and store in ram */ initvar_emfield_file=(initvar_emfield_t *)malloc(numnodes*sizeof(initvar_emfield_t)); x_file=(dim_t *)realloc(x_file,numnodes*sizeof(dim_t)); dx1_file=(dim_t *)realloc(dx1_file,numnodes*sizeof(dim_t)); #ifdef _2DL dx2_file=(dim_t *)realloc(dx2_file,numnodes*sizeof(dim_t)); #endif #ifdef _3DL dx3_file=(dim_t *)realloc(dx3_file,numnodes*sizeof(dim_t)); #endif radiusmax2_file=(double *)realloc(radiusmax2_file,numnodes*sizeof(double)); for (l_file=0; l_file<numnodes; l_file++){ cnterror=0; if (fread(initvar_emfield_file[l_file], sizeof(initvar_emfield_t), 1, datafile)!=1) cnterror++; if (fread(x_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; if (fread(dx1_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; #ifdef _2DL if (fread(dx2_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; #endif #ifdef _3DL if (fread(dx3_file[l_file], sizeof(dim_t), 1, datafile)!=1) cnterror++; #endif if (cnterror>0) fatal_error("Could not read all data properly."); radiusmax2_file[l_file]=0.0e0; for (dim=0; dim<nd; dim++) radiusmax2_file[l_file]+=sqr(fabs(dx1_file[l_file][dim]) #ifdef _2DL +fabs(dx2_file[l_file][dim]) #endif #ifdef _3DL +fabs(dx3_file[l_file][dim]) #endif ); radiusmax2_file[l_file]*=1.1; } for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ weight[_ai(gl,i,j,k)]=0.0e0; for (cnt=0; cnt<numinitvar_emfield; cnt++) (initvar_emfield[_ai(gl,i,j,k)])[cnt]=0.0; } for (cntzone=0; cntzone<numsubzone; cntzone++){ for (dim=0; dim<nd; dim++){ xmin[cntzone][dim]=1e99; xmax[cntzone][dim]=-1e99; } for_ijk(subzone[cntzone],is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)){ for (dim=0; dim<nd; dim++){ xmin[cntzone][dim]=min(xmin[cntzone][dim],_x(np[_ai(gl,i,j,k)],dim)); xmax[cntzone][dim]=max(xmax[cntzone][dim],_x(np[_ai(gl,i,j,k)],dim)); } } } } #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); wfprintf(stdout,"EMfield/%ld",numsubzone*numproc); #else wfprintf(stdout,"EMfield/%ld",numsubzone); #endif #if defined(OPENMPTHREADS) //&& !defined(DISTMPI) #pragma omp parallel for private(l_file,cntzone,cnt,thisweight,dim,zone,i,j,k) schedule(dynamic) #endif for (cntzone=0; cntzone<numsubzone; cntzone++){ for (l_file=0; l_file<numnodes; l_file++){ if (is_data_point_in_domain(x_file[l_file],xmin[cntzone],xmax[cntzone],radiusmax2_file[l_file])){ zone=subzone[cntzone]; if (find_interpolation_zone(np,gl,TYPELEVEL_EMFIELD,x_file[l_file],radiusmax2_file[l_file],&zone)){ for_jik(zone,is,js,ks,ie,je,ke){ if (is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)){ find_interpolation_weight(np,gl,_ai(gl,i,j,k),x_file[l_file],dx1_file[l_file], #ifdef _2DL dx2_file[l_file], #endif #ifdef _3DL dx3_file[l_file], #endif radiusmax2_file[l_file],&thisweight); #ifdef OPENMPTHREADS omp_set_lock(&(nodelock[_ai(gl,i,j,k)])); #endif if (thisweight>1e-99) { weight[_ai(gl,i,j,k)]+=thisweight; for (cnt=0; cnt<numinitvar_emfield; cnt++) initvar_emfield[_ai(gl,i,j,k)][cnt]+=thisweight*initvar_emfield_file[l_file][cnt]; } #ifdef OPENMPTHREADS omp_unset_lock(&(nodelock[_ai(gl,i,j,k)])); #endif } } } } } // if (mod(cntzone,numsubzone/100+1)==0) wfprintf(stdout,"."); fprintf(stdout,"."); fflush(stdout); } #ifdef OPENMPTHREADS #pragma omp parallel for private(i,j,k,cnt) schedule(static) #endif for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ if (weight[_ai(gl,i,j,k)]>1e-99 && is_node_valid(np[_ai(gl,i,j,k)],TYPELEVEL_EMFIELD)) { for (cnt=0; cnt<numinitvar_emfield; cnt++) initvar_emfield[_ai(gl,i,j,k)][cnt]=initvar_emfield[_ai(gl,i,j,k)][cnt]/weight[_ai(gl,i,j,k)]; init_node_emfield(np[_ai(gl,i,j,k)], gl, defaultinitvartypeemfield, initvar_emfield[_ai(gl,i,j,k)]); np[_ai(gl,i,j,k)].INIT_EMFIELD=TRUE; } } free(initvar_emfield); free(initvar_emfield_file); #endif //EMFIELD free(subzone); free(xmin); free(xmax); fclose(datafile); free(weight); #ifdef OPENMPTHREADS for_ijk(gl->domain_lim,is,js,ks,ie,je,ke){ omp_destroy_lock(&(nodelock[_ai(gl,i,j,k)])); } free(nodelock); #endif #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); if (rank!=0) { gl->effiter_U=0.0; gl->effiter_R=0.0; #ifdef EMFIELD gl->effiter_U_emfield=0.0; gl->effiter_R_emfield=0.0; #endif } #endif wfprintf(stdout,"done;\n"); free(x_file); free(dx1_file); #ifdef _2DL free(dx2_file); #endif #ifdef _3DL free(dx3_file); #endif free(radiusmax2_file); } void write_data_file_interpolation(char *filename, np_t *np, gl_t *gl){ FILE *datafile; long i,j,k,cnt; dim_t dx1,x; #ifdef _2DL dim_t dx2; #endif #ifdef _3DL dim_t dx3; #endif double tmp_time, tmp_dt; long numnodes,dim; int TYPELEVEL,pass,passmax; bool *NODEVALID; initvar_t initvar; double effiter_U,effiter_R; #ifdef EMFIELD double effiter_U_emfield,effiter_R_emfield; initvar_emfield_t initvar_emfield; #endif #ifdef DISTMPI int rank; MPI_Status MPI_Status1; #endif /* nodes may be suspended. Hence, ensure that appropriate nodes are resumed. */ resume_nodes_in_zone(np,gl,gl->domain); NODEVALID=(bool *)malloc(sizeof(bool)*(gl->domain_lim_all.ie-gl->domain_lim_all.is+1) #ifdef _2DL *(gl->domain_lim_all.je-gl->domain_lim_all.js+1) #endif #ifdef _3DL *(gl->domain_lim_all.ke-gl->domain_lim_all.ks+1) #endif ); effiter_U=gl->effiter_U; effiter_R=gl->effiter_R; #ifdef EMFIELD effiter_U_emfield=gl->effiter_U_emfield; effiter_R_emfield=gl->effiter_R_emfield; #endif #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Allreduce(&gl->effiter_U, &effiter_U, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&gl->effiter_R, &effiter_R, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #ifdef EMFIELD MPI_Allreduce(&gl->effiter_U_emfield, &effiter_U_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(&gl->effiter_R_emfield, &effiter_R_emfield, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD); #endif #endif datafile = wfopen(filename, "w"); wfprintf(stdout,"Writing to CFDWARP interpolation data file %s..",filename); #ifdef EMFIELD passmax=2; #else passmax=1; #endif for (pass=1; pass<=passmax; pass++){ if (pass==1){ TYPELEVEL=TYPELEVEL_FLUID; } else { #ifdef EMFIELD TYPELEVEL=TYPELEVEL_EMFIELD; #endif } #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); #endif find_NODEVALID_on_domain_all(np, gl, TYPELEVEL, NODEVALID); numnodes=0; for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ if (NODEVALID[_ai_all(gl,i,j,k)]) { numnodes++; } } if (pass==1){ #ifdef UNSTEADY tmp_time=gl->time; tmp_dt=gl->dt; #else tmp_time=0.0; tmp_dt=dt_steady; #endif wfprintf(datafile,"WARPINTFORMAT001 numnodes=%ld nf=%ld nd=%ld ns=%ld windowis=%ld windowie=%ld iter=%ld effiter_U=%E effiter_R=%E CFL=%E time=%E dt=%E\n",numnodes,nf,nd, ns,gl->window.is,gl->window.ie,gl->iter,effiter_U,effiter_R,gl->CFL,tmp_time,tmp_dt); } else { #ifdef EMFIELD wfprintf(datafile,"WARPINTFORMAT001 numnodes_emfield=%ld nfe=%ld nd=%ld Lc=%E effiter_U_emfield=%E effiter_R_emfield=%E\n",numnodes,nfe,nd,gl->Lc,effiter_U_emfield,effiter_R_emfield); #endif } for_ijk(gl->domain_all,is,js,ks,ie,je,ke){ #ifdef DISTMPI if (pass==1){ if (_node_rank(gl,i,j,k)==rank) { if (NODEVALID[_ai_all(gl,i,j,k)]) { find_default_initvar(np, gl, _ai(gl,i,j,k), initvar); } else { for (cnt=0; cnt<numinitvar; cnt++) initvar[cnt]=0.0; } if (rank!=0) { MPI_Ssend(initvar,numinitvar,MPI_DOUBLE,0,0,MPI_COMM_WORLD); } } if (rank==0 && _node_rank(gl,i,j,k)!=0){ MPI_Recv(initvar,numinitvar,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1); } } else { #ifdef EMFIELD if (_node_rank(gl,i,j,k)==rank) { if (NODEVALID[_ai_all(gl,i,j,k)]) { find_default_initvar_emfield(np, gl, _ai(gl,i,j,k),initvar_emfield); } else { for (cnt=0; cnt<numinitvar_emfield; cnt++) initvar_emfield[cnt]=0.0; } if (rank!=0) { MPI_Ssend(initvar_emfield,numinitvar_emfield,MPI_DOUBLE,0,0,MPI_COMM_WORLD); } } if (rank==0 && _node_rank(gl,i,j,k)!=0){ MPI_Recv(initvar_emfield,numinitvar_emfield,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1); } #endif } #else if (pass==1){ if (NODEVALID[_ai_all(gl,i,j,k)]) { find_default_initvar(np, gl, _ai(gl,i,j,k), initvar); } else { for (cnt=0; cnt<numinitvar; cnt++) initvar[cnt]=0.0; } } else { #ifdef EMFIELD if (NODEVALID[_ai_all(gl,i,j,k)]) { find_default_initvar_emfield(np, gl, _ai(gl,i,j,k), initvar_emfield); } else { for (cnt=0; cnt<numinitvar_emfield; cnt++) initvar_emfield[cnt]=0.0; } #endif } #endif if (NODEVALID[_ai_all(gl,i,j,k)]) { #ifdef DISTMPI if (_node_rank(gl,i,j,k)==rank) { #endif for (dim=0; dim<nd; dim++) x[dim]=_x(np[_ai(gl,i,j,k)],dim); for (dim=0; dim<nd; dim++){ if ((i<gl->domain_all.ie && NODEVALID[_ai_all(gl,i+1,j,k)]) && (i>gl->domain_all.is && NODEVALID[_ai_all(gl,i-1,j,k)])) { dx1[dim]=0.5*(np[_ai(gl,i+1,j,k)].bs->x[dim]-np[_ai(gl,i-1,j,k)].bs->x[dim]); } else { if (i<gl->domain_all.ie && NODEVALID[_ai_all(gl,i+1,j,k)]) { dx1[dim]=(np[_ai(gl,i+1,j,k)].bs->x[dim]-np[_ai(gl,i,j,k)].bs->x[dim]); } else { if (i>gl->domain_all.is && NODEVALID[_ai_all(gl,i-1,j,k)]) { dx1[dim]=(np[_ai(gl,i,j,k)].bs->x[dim]-np[_ai(gl,i-1,j,k)].bs->x[dim]); } else { fatal_error("Couldn't find adjacent valid node along i needed for interpolation."); } } } #ifdef _2DL if ((j<gl->domain_all.je && NODEVALID[_ai_all(gl,i,j+1,k)]) && (j>gl->domain_all.js && NODEVALID[_ai_all(gl,i,j-1,k)])) { dx2[dim]=0.5*(np[_ai(gl,i,j+1,k)].bs->x[dim]-np[_ai(gl,i,j-1,k)].bs->x[dim]); } else { if (j<gl->domain_all.je && NODEVALID[_ai_all(gl,i,j+1,k)]) { dx2[dim]=(np[_ai(gl,i,j+1,k)].bs->x[dim]-np[_ai(gl,i,j,k)].bs->x[dim]); } else { if (j>gl->domain_all.js && NODEVALID[_ai_all(gl,i,j-1,k)]) { dx2[dim]=(np[_ai(gl,i,j,k)].bs->x[dim]-np[_ai(gl,i,j-1,k)].bs->x[dim]); } else { fatal_error("Couldn't find adjacent valid node along j needed for interpolation."); } } } #endif #ifdef _3DL if ((k<gl->domain_all.ke && NODEVALID[_ai_all(gl,i,j,k+1)]) && (k>gl->domain_all.ks && NODEVALID[_ai_all(gl,i,j,k-1)])) { dx3[dim]=0.5*(np[_ai(gl,i,j,k+1)].bs->x[dim]-np[_ai(gl,i,j,k-1)].bs->x[dim]); } else { if (k<gl->domain_all.ke && NODEVALID[_ai_all(gl,i,j,k+1)]) { dx3[dim]=(np[_ai(gl,i,j,k+1)].bs->x[dim]-np[_ai(gl,i,j,k)].bs->x[dim]); } else { if (k>gl->domain_all.ks && NODEVALID[_ai_all(gl,i,j,k-1)]) { dx3[dim]=(np[_ai(gl,i,j,k)].bs->x[dim]-np[_ai(gl,i,j,k-1)].bs->x[dim]); } else { fatal_error("Couldn't find adjacent valid node along k needed for interpolation."); } } } #endif } #ifdef DISTMPI } if (rank!=0 && _node_rank(gl,i,j,k)==rank) MPI_Ssend(x,nd,MPI_DOUBLE,0,0,MPI_COMM_WORLD); if (rank==0 && _node_rank(gl,i,j,k)!=0) MPI_Recv(x,nd,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1); if (rank!=0 && _node_rank(gl,i,j,k)==rank) MPI_Ssend(dx1,nd,MPI_DOUBLE,0,0,MPI_COMM_WORLD); if (rank==0 && _node_rank(gl,i,j,k)!=0) MPI_Recv(dx1,nd,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1); #ifdef _2DL if (rank!=0 && _node_rank(gl,i,j,k)==rank) MPI_Ssend(dx2,nd,MPI_DOUBLE,0,0,MPI_COMM_WORLD); if (rank==0 && _node_rank(gl,i,j,k)!=0) MPI_Recv(dx2,nd,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1); #endif #ifdef _3DL if (rank!=0 && _node_rank(gl,i,j,k)==rank) MPI_Ssend(dx3,nd,MPI_DOUBLE,0,0,MPI_COMM_WORLD); if (rank==0 && _node_rank(gl,i,j,k)!=0) MPI_Recv(dx3,nd,MPI_DOUBLE,_node_rank(gl,i,j,k),0,MPI_COMM_WORLD,&MPI_Status1); #endif #endif if (pass==1) { wfwrite(initvar, sizeof(initvar_t), 1, datafile); } else { #ifdef EMFIELD wfwrite(initvar_emfield, sizeof(initvar_emfield_t), 1, datafile); #endif } wfwrite(x, sizeof(dim_t), 1, datafile); wfwrite(dx1, sizeof(dim_t), 1, datafile); #ifdef _2DL wfwrite(dx2, sizeof(dim_t), 1, datafile); #endif #ifdef _3DL wfwrite(dx3, sizeof(dim_t), 1, datafile); #endif } //end if nodevalid } // for_ijk }//pass #ifdef DISTMPI MPI_Barrier(MPI_COMM_WORLD); #endif wfclose(datafile); wfprintf(stdout,"done.\n"); free(NODEVALID); }
GB_unaryop__ainv_uint32_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint32_uint16 // op(A') function: GB_tran__ainv_uint32_uint16 // C type: uint32_t // A type: uint16_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint32_uint16 ( uint32_t *restrict Cx, const uint16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint32_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cpotrs.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zpotrs.c, normal z -> c, Fri Sep 28 17:38:02 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_potrs * * Solves a system of linear equations A * X = B with a Hermitian positive * definite in the complex matrix A using the Cholesky factorization * A = U^H*U or A = L*L^H computed by plasma_cpotrf. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in] nrhs * The number of right hand sides, i.e., the number of * columns of the matrix B. nrhs >= 0. * * @param[in,out] pA * The triangular factor U or L from the Cholesky * factorization A = U^H*U or A = L*L^H, computed by * plasma_cpotrf. * Remark: If out-of-place layout translation is used, the * matrix A can be considered as input, however if inplace * layout translation is enabled, the content of A will be * reordered for computation and restored before exiting the * function. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * * @param[in,out] pB * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] ldb * The leading dimension of the array B. ldb >= max(1,n). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * @retval < 0 if -i, the i-th argument had an illegal value * ******************************************************************************* * * @sa plasma_omp_cpotrs * @sa plasma_cpotrs * @sa plasma_dpotrs * @sa plasma_spotrs * @sa plasma_cpotrf * ******************************************************************************/ int plasma_cpotrs(plasma_enum_t uplo, int n, int nrhs, plasma_complex32_t *pA, int lda, plasma_complex32_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -1; } if (n < 0) { plasma_error("illegal value of n"); return -2; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -3; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -5; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -7; } // quick return if (imax(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_trsm(plasma, PlasmaComplexFloat, n, n); // Set tiling parameters. int nb = plasma->nb; // Initialize tile matrix descriptors. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); plasma_omp_cge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_cpotrs(uplo, A, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request); } // implicit synchronization // Free matrix A in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_potrs * * Solves a system of linear equations using previously * computed Cholesky factorization. * Non-blocking tile version of plasma_cpotrs(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * The triangular factor U or L from the Cholesky factorization * A = U^H*U or A = L*L^H, computed by plasma_cpotrf. * * @param[in,out] B * On entry, the n-by-nrhs right hand side matrix B. * On exit, if return value = 0, the n-by-nrhs solution matrix X. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_cpotrs * @sa plasma_omp_cpotrs * @sa plasma_omp_cpotrs * @sa plasma_omp_dpotrs * @sa plasma_omp_spotrs * @sa plasma_omp_cpotrf * ******************************************************************************/ void plasma_omp_cpotrs(plasma_enum_t uplo, plasma_desc_t A, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pctrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaConjTrans : PlasmaNoTrans, PlasmaNonUnit, 1.0, A, B, sequence, request); plasma_pctrsm(PlasmaLeft, uplo, uplo == PlasmaUpper ? PlasmaNoTrans : PlasmaConjTrans, PlasmaNonUnit, 1.0, A, B, sequence, request); }
matmult.c
/****************************************************************************** * OpenMp Example - Matrix Multiply - C Version * Demonstrates a matrix multiply using OpenMP. * * Modified from here: * https://computing.llnl.gov/tutorials/openMP/samples/C/omp_mm.c * * For PAPI_FP_INS, the exclusive count for the event: * for (null) [OpenMP location: file:matmult.c ] * should be 2E+06 / Number of Threads ******************************************************************************/ #include <stdio.h> #include <stdlib.h> #include "matmult_initialize.h" #ifdef TAU_MPI int provided; #include <mpi.h> /* NOTE: MPI is just used to spawn multiple copies of the kernel to different ranks. This is not a parallel implementation */ #endif /* TAU_MPI */ #ifdef PTHREADS #include <pthread.h> #include <unistd.h> #include <errno.h> /*** NOTE THE ATTR INITIALIZER HERE! ***/ pthread_mutex_t mutexsum; #endif /* PTHREADS */ #define APP_USE_INLINE_MULTIPLY 1 #ifndef MATRIX_SIZE #define MATRIX_SIZE 512 #endif #define NRA MATRIX_SIZE /* number of rows in matrix A */ #define NCA MATRIX_SIZE /* number of columns in matrix A */ #define NCB MATRIX_SIZE /* number of columns in matrix B */ double** allocateMatrix(int rows, int cols) { int i; double **matrix = (double**)malloc((sizeof(double*)) * rows); for (i=0; i<rows; i++) { matrix[i] = (double*)malloc((sizeof(double)) * cols); } return matrix; } void freeMatrix(double** matrix, int rows, int cols) { int i; for (i=0; i<rows; i++) { free(matrix[i]); } free(matrix); } double multiply(double a, double b) { return a * b; } #ifdef TAU_OPENMP // cols_a and rows_b are the same value void compute_nested(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; double tmp = 0.0; /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp parallel for private(i,j,k) shared(a,b,c) for (i=0; i<rows_a; i++) { { for (k=0; k<cols_a; k++) { for(j=0; j<cols_b; j++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else tmp = a[i][k]; tmp = tmp * b[k][j]; c[i][j] += tmp; #endif } } } } } #endif // cols_a and rows_b are the same value void compute(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; #pragma omp parallel private(i,j,k) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for nowait for (i=0; i<rows_a; i++) { for(j=0; j<cols_b; j++) { for (k=0; k<cols_a; k++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else /* APP_USE_INLINE_MULTIPLY */ c[i][j] += a[i][k] * b[k][j]; #endif /* APP_USE_INLINE_MULTIPLY */ } } } } /*** End of parallel region ***/ } void compute_interchange(double **a, double **b, double **c, int rows_a, int cols_a, int cols_b) { int i,j,k; #pragma omp parallel private(i,j,k) shared(a,b,c) { /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ #pragma omp for nowait for (i=0; i<rows_a; i++) { for (k=0; k<cols_a; k++) { for(j=0; j<cols_b; j++) { #ifdef APP_USE_INLINE_MULTIPLY c[i][j] += multiply(a[i][k], b[k][j]); #else /* APP_USE_INLINE_MULTIPLY */ c[i][j] += a[i][k] * b[k][j]; #endif /* APP_USE_INLINE_MULTIPLY */ } } } } /*** End of parallel region ***/ } double do_work(void) { double **a, /* matrix A to be multiplied */ **b, /* matrix B to be multiplied */ **c; /* result matrix C */ a = allocateMatrix(NRA, NCA); b = allocateMatrix(NCA, NCB); c = allocateMatrix(NRA, NCB); /*** Spawn a parallel region explicitly scoping all variables ***/ initialize(a, NRA, NCA); initialize(b, NCA, NCB); initialize(c, NRA, NCB); compute(a, b, c, NRA, NCA, NCB); #if defined(TAU_OPENMP) //if (omp_get_nested()) { compute_nested(a, b, c, NRA, NCA, NCB); //} #endif #ifdef TAU_MPI if (provided == MPI_THREAD_MULTIPLE) { printf("provided is MPI_THREAD_MULTIPLE\n"); } else if (provided == MPI_THREAD_FUNNELED) { printf("provided is MPI_THREAD_FUNNELED\n"); } #endif /* TAU_MPI */ compute_interchange(a, b, c, NRA, NCA, NCB); double result = c[0][1]; freeMatrix(a, NRA, NCA); freeMatrix(b, NCA, NCB); freeMatrix(c, NCA, NCB); return result; } #ifdef PTHREADS int busy_sleep() { int i, sum = 0; for (i = 0 ; i < 100000000 ; i++) { sum = sum+i; } return sum; } void * threaded_func(void *data) { int rc; int sum = 0; // compute do_work(); #ifdef APP_DO_LOCK_TEST // test locking - sampling should catch this if ((rc = pthread_mutex_lock(&mutexsum)) != 0) { errno = rc; perror("thread lock error"); exit(1); } fprintf(stderr,"Thread 'sleeping'...\n"); fflush(stderr); sum += busy_sleep(); fprintf(stderr,"Thread 'awake'...\n"); fflush(stderr); if ((rc = pthread_mutex_unlock(&mutexsum)) != 0) { errno = rc; perror("thread unlock error"); exit(1); } pthread_exit((void*) 0); //return NULL; #endif // APP_DO_LOCK_TEST } #endif // PTHREADS int main (int argc, char *argv[]) { #ifdef PTHREADS int ret; pthread_attr_t attr; pthread_t tid1, tid2, tid3; pthread_mutexattr_t Attr; pthread_mutexattr_init(&Attr); pthread_mutexattr_settype(&Attr, PTHREAD_MUTEX_ERRORCHECK); if (pthread_mutex_init(&mutexsum, &Attr)) { printf("Error while using pthread_mutex_init\n"); } #endif /* PTHREADS */ #ifdef TAU_MPI int rc = MPI_SUCCESS; #if defined(PTHREADS) rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided); printf("MPI_Init_thread: provided = %d, MPI_THREAD_MULTIPLE=%d\n", provided, MPI_THREAD_MULTIPLE); #elif defined(TAU_OPENMP) rc = MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided); printf("MPI_Init_thread: provided = %d, MPI_THREAD_FUNNELED=%d\n", provided, MPI_THREAD_FUNNELED); #else rc = MPI_Init(&argc, &argv); #endif /* THREADS */ if (rc != MPI_SUCCESS) { char *errorstring; int length = 0; MPI_Error_string(rc, errorstring, &length); printf("Error: MPI_Init failed, rc = %d\n%s\n", rc, errorstring); exit(1); } #endif /* TAU_MPI */ #ifdef PTHREADS if (ret = pthread_create(&tid1, NULL, threaded_func, NULL) ) { printf("Error: pthread_create (1) fails ret = %d\n", ret); exit(1); } if (ret = pthread_create(&tid2, NULL, threaded_func, NULL) ) { printf("Error: pthread_create (2) fails ret = %d\n", ret); exit(1); } if (ret = pthread_create(&tid3, NULL, threaded_func, NULL) ) { printf("Error: pthread_create (3) fails ret = %d\n", ret); exit(1); } #endif /* PTHREADS */ /* On thread 0: */ int i; //for (i = 0 ; i < 100 ; i++) { do_work(); //} #ifdef PTHREADS if (ret = pthread_join(tid1, NULL) ) { printf("Error: pthread_join (1) fails ret = %d\n", ret); exit(1); } if (ret = pthread_join(tid2, NULL) ) { printf("Error: pthread_join (2) fails ret = %d\n", ret); exit(1); } if (ret = pthread_join(tid3, NULL) ) { printf("Error: pthread_join (3) fails ret = %d\n", ret); exit(1); } pthread_mutex_destroy(&mutexsum); #endif /* PTHREADS */ #ifdef TAU_MPI MPI_Finalize(); #endif /* TAU_MPI */ printf ("Done.\n"); return 0; }
GB_binop__gt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_int32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__gt_int32) // A.*B function (eWiseMult): GB (_AemultB_03__gt_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_int32) // A*D function (colscale): GB (_AxD__gt_int32) // D*A function (rowscale): GB (_DxB__gt_int32) // C+=B function (dense accum): GB (_Cdense_accumB__gt_int32) // C+=b function (dense accum): GB (_Cdense_accumb__gt_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_int32) // C=scalar+B GB (_bind1st__gt_int32) // C=scalar+B' GB (_bind1st_tran__gt_int32) // C=A+scalar GB (_bind2nd__gt_int32) // C=A'+scalar GB (_bind2nd_tran__gt_int32) // C type: bool // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ int32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_INT32 || GxB_NO_GT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__gt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__gt_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__gt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
trmv_x_dia_u_hi.c
#include "alphasparse/kernel.h" #include "alphasparse/opt.h" #include "alphasparse/util.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { const ALPHA_INT m = A->rows; const ALPHA_INT n = A->cols; if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE; const ALPHA_INT thread_num = alpha_get_thread_num(); ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num); for(int i = 0; i < thread_num; ++i) { tmp[i] = malloc(sizeof(ALPHA_Number) * m); memset(tmp[i], 0, sizeof(ALPHA_Number) * m); } const ALPHA_INT diags = A->ndiag; #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < diags; ++i) { const ALPHA_INT threadId = alpha_get_thread_id(); const ALPHA_INT dis = A->distance[i]; if(dis > 0) { const ALPHA_INT row_start = 0; const ALPHA_INT col_start = dis; const ALPHA_INT nnz = m - dis; const ALPHA_INT start = i * A->lval; for(ALPHA_INT j = 0; j < nnz; ++j) { ALPHA_Number v; alpha_mul(v, alpha, A->values[start + j]); alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]); } } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for(ALPHA_INT i = 0; i < m; ++i) { alpha_mul(y[i], beta, y[i]); alpha_madde(y[i], alpha, x[i]); for(ALPHA_INT j = 0; j < thread_num; ++j) { alpha_add(y[i], y[i], tmp[j][i]); } } #ifdef _OPENMP #pragma omp parallel for num_threads(thread_num) #endif for (ALPHA_INT i = 0; i < thread_num; ++i) { alpha_free(tmp[i]); } alpha_free(tmp); return ALPHA_SPARSE_STATUS_SUCCESS; } alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_DIA* A, const ALPHA_Number* x, const ALPHA_Number beta, ALPHA_Number* y) { return ONAME_omp(alpha, A, x, beta, y); }
linear_master_slave_constraint.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Aditya Ghantasala // #if !defined(LINEAR_MASTER_SLAVE_CONSTRAINT_H) #define LINEAR_MASTER_SLAVE_CONSTRAINT_H // System includes // External includes // Project includes #include "includes/define.h" #include "includes/master_slave_constraint.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class LinearMasterSlaveConstraint * @ingroup KratosCore * @brief This class allows to add a master-slave constraint which is of the form * SlaveDofVector = T * MasterDofVector + ConstantVector. * * or * * SlaveDof = weight * MasterDof + Constant * @details The data T and ConstantVector (or the equivalent scalars) are not stored in the base class, since they can be eventually evaluated runtime. * @author Aditya Ghantasala */ class LinearMasterSlaveConstraint : public MasterSlaveConstraint { public: ///@name Type Definitions ///@{ /// The definition of the base class, we take the rest of the definitions from the base class typedef MasterSlaveConstraint BaseType; /// The index type definition typedef BaseType::IndexType IndexType; /// The DoF type definition typedef BaseType::DofType DofType; /// The DoF pointer vector type definition typedef BaseType::DofPointerVectorType DofPointerVectorType; /// The node type definition typedef BaseType::NodeType NodeType; /// The equation Id vector type definition typedef BaseType::EquationIdVectorType EquationIdVectorType; /// The matrix type definition typedef BaseType::MatrixType MatrixType; /// The vector type definition typedef BaseType::VectorType VectorType; /// The variable type definition (double) typedef BaseType::VariableType VariableType; /// The component variable type definition typedef BaseType::VariableComponentType VariableComponentType; /// Pointer definition of DataValueContainer KRATOS_CLASS_POINTER_DEFINITION(LinearMasterSlaveConstraint); ///@} ///@name Enum's ///@{ ///@} ///@name Life Cycle ///@{ /** * @brief The default constructor * @param IndexType The Id of the new created constraint */ explicit LinearMasterSlaveConstraint(IndexType Id = 0) : BaseType(Id) { } /** * @brief Constructor by passing a vector of Master and slave dofs and corresponding Matrix and constant vector * @param IndexType The Id of the new created constraint * @param rMasterDofsVector The vector containing the DoF of the master side * @param rSlaveDofsVector The vector containing the DoF of the slave side * @param rRelationMatrix The relation matrix between the master/slave DoF * @param rConstantVector The vector containing the additional kinematic relationship */ LinearMasterSlaveConstraint( IndexType Id, DofPointerVectorType& rMasterDofsVector, DofPointerVectorType& rSlaveDofsVector, const MatrixType& rRelationMatrix, const VectorType& rConstantVector ) : BaseType(Id), mSlaveDofsVector(rSlaveDofsVector), mMasterDofsVector(rMasterDofsVector), mRelationMatrix(rRelationMatrix), mConstantVector(rConstantVector) { } /** * @brief Constructor by passing a single Master and slave dofs and corresponding weight and constant for a variable component * @param IndexType The Id of the new created constraint * @param rMasterNode The node of master side * @param rMasterVariable The variable of the master DoF * @param rSlaveNode The node of slave side * @param rSlaveVariable The variable of the slave DoF * @param Weight The relation between the master/slave DoF * @param Constant The additional kinematic relationship */ LinearMasterSlaveConstraint( IndexType Id, NodeType& rMasterNode, const VariableType& rMasterVariable, NodeType& rSlaveNode, const VariableType& rSlaveVariable, const double Weight, const double Constant ) : MasterSlaveConstraint(Id) { // Resizing the memeber variables mRelationMatrix.resize(1,1,false); mConstantVector.resize(1,false); // Obtaining the dofs from the variables mSlaveDofsVector.push_back(rSlaveNode.pGetDof(rSlaveVariable)); mMasterDofsVector.push_back(rMasterNode.pGetDof(rMasterVariable)); mRelationMatrix(0,0) = Weight; mConstantVector(0) = Constant; // Setting the slave flag on the node rSlaveNode.Set(SLAVE); } /** * @brief Constructor by passing a single Master and slave dofs and corresponding weight and constant for a variable component * @param IndexType The Id of the new created constraint * @param rMasterNode The node of master side * @param rMasterVariable The variable of the master DoF * @param rSlaveNode The node of slave side * @param rSlaveVariable The variable of the slave DoF * @param Weight The relation between the master/slave DoF * @param Constant The additional kinematic relationship */ LinearMasterSlaveConstraint( IndexType Id, NodeType& rMasterNode, const VariableComponentType& rMasterVariable, NodeType& rSlaveNode, const VariableComponentType& rSlaveVariable, const double Weight, const double Constant ) : MasterSlaveConstraint(Id) { // Resizing the memeber variables mRelationMatrix.resize(1,1,false); mConstantVector.resize(1,false); // Obtaining the dofs from the variables mSlaveDofsVector.push_back(rSlaveNode.pGetDof(rSlaveVariable)); mMasterDofsVector.push_back(rMasterNode.pGetDof(rMasterVariable)); mRelationMatrix(0,0) = Weight; mConstantVector(0) = Constant; // Setting the slave flag on the node rSlaveNode.Set(SLAVE); } /// Destructor. ~LinearMasterSlaveConstraint() override { } /// Copy Constructor LinearMasterSlaveConstraint(const LinearMasterSlaveConstraint& rOther) : BaseType(rOther), mSlaveDofsVector(rOther.mSlaveDofsVector), mMasterDofsVector(rOther.mMasterDofsVector), mRelationMatrix(rOther.mRelationMatrix), mConstantVector(rOther.mConstantVector) { } /// Assignment operator LinearMasterSlaveConstraint& operator=(const LinearMasterSlaveConstraint& rOther) { BaseType::operator=( rOther ); mSlaveDofsVector = rOther.mSlaveDofsVector; mMasterDofsVector = rOther.mMasterDofsVector; mRelationMatrix = rOther.mRelationMatrix; mConstantVector = rOther.mConstantVector; return *this; } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Create method by passing a single Master and slave dofs and corresponding weight and constant for a variable component * @param IndexType The Id of the new created constraint * @param rMasterDofsVector The DoFs of master side * @param rSlaveDofsVector The DoFs of master side * @param rRelationMatrix The relation matrix between the master/slave DoF * @param rConstantVector The vector containing the additional kinematic relationship * @return A Pointer to the new constraint */ MasterSlaveConstraint::Pointer Create( IndexType Id, DofPointerVectorType& rMasterDofsVector, DofPointerVectorType& rSlaveDofsVector, const MatrixType& rRelationMatrix, const VectorType& rConstantVector ) const override { KRATOS_TRY return Kratos::make_shared<LinearMasterSlaveConstraint>(Id, rMasterDofsVector, rSlaveDofsVector, rRelationMatrix, rConstantVector); KRATOS_CATCH(""); } /** * @brief Create method by passing a single Master and slave dofs and corresponding weight and constant for a variable component * @param IndexType The Id of the new created constraint * @param rMasterNode The node of master side * @param rMasterVariable The variable of the master DoF * @param rSlaveNode The node of slave side * @param rSlaveVariable The variable of the slave DoF * @param Weight The relation between the master/slave DoF * @param Constant The additional kinematic relationship * @return A Pointer to the new constraint */ MasterSlaveConstraint::Pointer Create( IndexType Id, NodeType& rMasterNode, const VariableType& rMasterVariable, NodeType& rSlaveNode, const VariableType& rSlaveVariable, const double Weight, const double Constant ) const override { KRATOS_TRY return Kratos::make_shared<LinearMasterSlaveConstraint>(Id, rMasterNode, rMasterVariable, rSlaveNode, rSlaveVariable, Weight, Constant); KRATOS_CATCH(""); } /** * @brief Create method by passing a single Master and slave dofs and corresponding weight and constant for a variable component * @param IndexType The Id of the new created constraint * @param rMasterNode The node of master side * @param rMasterVariable The variable of the master DoF * @param rSlaveNode The node of slave side * @param rSlaveVariable The variable of the slave DoF * @param Weight The relation between the master/slave DoF * @param Constant The additional kinematic relationship * @return A Pointer to the new constraint */ MasterSlaveConstraint::Pointer Create( IndexType Id, NodeType& rMasterNode, const VariableComponentType& rMasterVariable, NodeType& rSlaveNode, const VariableComponentType& rSlaveVariable, const double Weight, const double Constant ) const override { KRATOS_TRY return Kratos::make_shared<LinearMasterSlaveConstraint>(Id, rMasterNode, rMasterVariable, rSlaveNode, rSlaveVariable, Weight, Constant); KRATOS_CATCH(""); } /** * @brief It creates a new constraint pointer and clones the previous constraint data * @param NewId the ID of the new constraint * @return a Pointer to the new constraint */ MasterSlaveConstraint::Pointer Clone (IndexType NewId) const override { KRATOS_TRY MasterSlaveConstraint::Pointer p_new_const = Kratos::make_shared<LinearMasterSlaveConstraint>(*this); p_new_const->SetId(NewId); p_new_const->SetData(this->GetData()); p_new_const->Set(Flags(*this)); return p_new_const; KRATOS_CATCH(""); } /** * @brief Determines the constrant's slvae and master list of DOFs * @param rSlaveDofsVector The list of slave DOFs * @param rMasterDofsVector The list of slave DOFs * @param rCurrentProcessInfo The current process info instance */ void GetDofList( DofPointerVectorType& rSlaveDofsVector, DofPointerVectorType& rMasterDofsVector, const ProcessInfo& rCurrentProcessInfo ) const override { rSlaveDofsVector = mSlaveDofsVector; rMasterDofsVector = mMasterDofsVector; } /** * @brief Determines the constrant's slave and master list of DOFs * @param rSlaveDofsVector The list of slave DOFs * @param rMasterDofsVector The list of slave DOFs * @param rCurrentProcessInfo The current process info instance */ void SetDofList( const DofPointerVectorType& rSlaveDofsVector, const DofPointerVectorType& rMasterDofsVector, const ProcessInfo& rCurrentProcessInfo ) override { mSlaveDofsVector = rSlaveDofsVector; mMasterDofsVector = rMasterDofsVector; } /** * @brief This determines the master equation IDs connected to this constraint * @param rSlaveEquationIds The vector of slave equation ids. * @param rMasterEquationIds The vector of master equation ids. * @param rCurrentProcessInfo The current process info instance */ void EquationIdVector( EquationIdVectorType& rSlaveEquationIds, EquationIdVectorType& rMasterEquationIds, const ProcessInfo& rCurrentProcessInfo ) const override { if (rSlaveEquationIds.size() != mSlaveDofsVector.size()) rSlaveEquationIds.resize(mSlaveDofsVector.size()); if (rMasterEquationIds.size() != mMasterDofsVector.size()) rMasterEquationIds.resize(mMasterDofsVector.size()); for(IndexType i=0; i<rSlaveEquationIds.size(); ++i) rSlaveEquationIds[i] = mSlaveDofsVector[i]->EquationId(); for(IndexType i=0; i<rMasterEquationIds.size(); ++i) rMasterEquationIds[i] = mMasterDofsVector[i]->EquationId(); } /** * @brief This method returns the slave dof vector * @return The vector containing the slave dofs */ const DofPointerVectorType& GetSlaveDofsVector() const override { return mSlaveDofsVector; } /** * @brief This method returns the slave dof vector * @return The vector containing the slave dofs */ void SetSlaveDofsVector(const DofPointerVectorType& rSlaveDofsVector) override { mSlaveDofsVector = rSlaveDofsVector; } /** * @brief This method returns the slave dof vector * @return The vector containing the slave dofs */ const DofPointerVectorType& GetMasterDofsVector() const override { return mMasterDofsVector; } /** * @brief This method returns the slave dof vector * @return The vector containing the slave dofs */ void SetMasterDofsVector(const DofPointerVectorType& rMasterDofsVector) override { mMasterDofsVector = rMasterDofsVector; } /** * @brief This method resets the values of the slave dofs * @param rCurrentProcessInfo the current process info instance */ void ResetSlaveDofs(const ProcessInfo& rCurrentProcessInfo) override { for (IndexType i = 0; i < mSlaveDofsVector.size(); ++i) { #pragma omp atomic mSlaveDofsVector[i]->GetSolutionStepValue() *= 0.0; } } /** * @brief This method directly applies the master/slave relationship * @param rCurrentProcessInfo the current process info instance */ void Apply(const ProcessInfo& rCurrentProcessInfo) override { // Saving the master dofs values Vector master_dofs_values(mMasterDofsVector.size()); for (IndexType i = 0; i < mMasterDofsVector.size(); ++i) { master_dofs_values[i] = mMasterDofsVector[i]->GetSolutionStepValue(); } // Apply the constraint to the slave dofs for (IndexType i = 0; i < mRelationMatrix.size1(); ++i) { double aux = mConstantVector[i]; for(IndexType j = 0; j < mRelationMatrix.size2(); ++j) { aux += mRelationMatrix(i,j) * master_dofs_values[j]; } #pragma omp atomic mSlaveDofsVector[i]->GetSolutionStepValue() += aux; } } /** * @brief This method allows to set the Local System in case is not computed on tunning time (internal variable) * @param rRelationMatrix the matrix which relates the master and slave degree of freedom * @param rConstant The constant vector (one entry for each slave) * @param rCurrentProcessInfo The current process info instance */ void SetLocalSystem( const MatrixType& rRelationMatrix, const VectorType& rConstantVector, const ProcessInfo& rCurrentProcessInfo ) override { if (mRelationMatrix.size1() != rRelationMatrix.size1() || mRelationMatrix.size2() != rRelationMatrix.size2()) mRelationMatrix.resize(rRelationMatrix.size1(), rRelationMatrix.size2(), false); noalias(mRelationMatrix) = rRelationMatrix; if (mConstantVector.size() != rConstantVector.size()) mConstantVector.resize(rConstantVector.size(), false); noalias(mConstantVector) = rConstantVector; } /** * @brief This is called during the assembling process in order * @details To calculate the relation between the master and slave. * @param rRelationMatrix the matrix which relates the master and slave degree of freedom * @param rConstant The constant vector (one entry for each slave) * @param rCurrentProcessInfo the current process info instance */ void CalculateLocalSystem( MatrixType& rRelationMatrix, VectorType& rConstantVector, const ProcessInfo& rCurrentProcessInfo ) const override { if (rRelationMatrix.size1() != mRelationMatrix.size1() || rRelationMatrix.size2() != mRelationMatrix.size2()) rRelationMatrix.resize(mRelationMatrix.size1(), mRelationMatrix.size2(), false); noalias(rRelationMatrix) = mRelationMatrix; if (rConstantVector.size() != mConstantVector.size()) rConstantVector.resize(mConstantVector.size(), false); noalias(rConstantVector) = mConstantVector; } ///@} ///@name Input and output ///@{ /** * @brief Returns the string containing a detailed description of this object. * @return the string with informations */ std::string GetInfo() const override { return "Linear User Provided Master Slave Constraint class !"; } /** * @brief This method prints the current Constraint Id * @param rOStream The buffer where the information is given */ void PrintInfo(std::ostream &rOStream) const override { rOStream << " LinearMasterSlaveConstraint Id : " << this->Id() << std::endl; rOStream << " Number of Slaves : " << mSlaveDofsVector.size() << std::endl; rOStream << " Number of Masters : " << mMasterDofsVector.size() << std::endl; } ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ DofPointerVectorType mSlaveDofsVector; /// The DoFs of slave side DofPointerVectorType mMasterDofsVector; /// The DoFs of master side MatrixType mRelationMatrix; /// The relation matrix between the master/slave DoF VectorType mConstantVector; /// The vector containing the additional kinematic relationship ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@name Serialization ///@{ friend class Serializer; void save(Serializer &rSerializer) const override { KRATOS_SERIALIZE_SAVE_BASE_CLASS(rSerializer, MasterSlaveConstraint); rSerializer.save("SlaveDofVec", mSlaveDofsVector); rSerializer.save("MasterDofVec", mMasterDofsVector); rSerializer.save("RelationMat", mRelationMatrix); rSerializer.save("ConstantVec", mConstantVector); } void load(Serializer &rSerializer) override { KRATOS_SERIALIZE_LOAD_BASE_CLASS(rSerializer, MasterSlaveConstraint); rSerializer.load("SlaveDofVec", mSlaveDofsVector); rSerializer.load("MasterDofVec", mMasterDofsVector); rSerializer.load("RelationMat", mRelationMatrix); rSerializer.load("ConstantVec", mConstantVector); } }; ///@name Input/Output funcitons ///@{ /// input stream function inline std::istream& operator>>(std::istream& rIStream, LinearMasterSlaveConstraint& rThis); /// output stream function inline std::ostream& operator<<(std::ostream& rOStream, const LinearMasterSlaveConstraint& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; return rOStream; } ///@} } // namespace Kratos #endif // USER_PROVIDED_LINEAR_MASTER_SLAVE_CONSTRAINT_H
GB_concat_sparse.c
//------------------------------------------------------------------------------ // GB_concat_sparse: concatenate an array of matrices into a sparse matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #define GB_FREE_WORKSPACE \ if (S != NULL) \ { \ for (int64_t k = 0 ; k < m * n ; k++) \ { \ GB_Matrix_free (&(S [k])) ; \ } \ } \ GB_FREE_WORK (&S, S_size) ; \ GB_FREE_WORK (&Work, Work_size) ; \ GB_WERK_POP (A_ek_slicing, int64_t) ; #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ GB_phbix_free (C) ; \ } #include "GB_concat.h" GrB_Info GB_concat_sparse // concatenate into a sparse matrix ( GrB_Matrix C, // input/output matrix for results const bool C_iso, // if true, construct C as iso const GB_void *cscalar, // iso value of C, if C is io const int64_t cnz, // # of entries in C const GrB_Matrix *Tiles, // 2D row-major array of size m-by-n, const GrB_Index m, const GrB_Index n, const int64_t *restrict Tile_rows, // size m+1 const int64_t *restrict Tile_cols, // size n+1 GB_Context Context ) { //-------------------------------------------------------------------------- // allocate C as a sparse matrix //-------------------------------------------------------------------------- GrB_Info info ; GrB_Matrix A = NULL ; ASSERT_MATRIX_OK (C, "C input to concat sparse", GB0) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; int64_t *Work = NULL ; size_t Work_size = 0 ; GrB_Matrix *S = NULL ; size_t S_size = 0 ; GrB_Type ctype = C->type ; int64_t cvlen = C->vlen ; int64_t cvdim = C->vdim ; bool csc = C->is_csc ; size_t csize = ctype->size ; GB_Type_code ccode = ctype->code ; float hyper_switch = C->hyper_switch ; float bitmap_switch = C->bitmap_switch ; int sparsity_control = C->sparsity_control ; GB_phbix_free (C) ; // set C->iso = C_iso OK GB_OK (GB_new_bix (&C, // existing header ctype, cvlen, cvdim, GB_Ap_malloc, csc, GxB_SPARSE, false, hyper_switch, cvdim, cnz, true, C_iso, Context)) ; C->bitmap_switch = bitmap_switch ; C->sparsity_control = sparsity_control ; int64_t *restrict Cp = C->p ; int64_t *restrict Ci = C->i ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; if (C_iso) { memcpy (C->x, cscalar, csize) ; } //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- int64_t nouter = csc ? n : m ; int64_t ninner = csc ? m : n ; Work = GB_CALLOC_WORK (ninner * cvdim, int64_t, &Work_size) ; S = GB_CALLOC_WORK (m * n, GrB_Matrix, &S_size) ; if (S == NULL || Work == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // count entries in each vector of each tile //-------------------------------------------------------------------------- for (int64_t outer = 0 ; outer < nouter ; outer++) { for (int64_t inner = 0 ; inner < ninner ; inner++) { //------------------------------------------------------------------ // get the tile A; transpose and typecast, if needed //------------------------------------------------------------------ A = csc ? GB_TILE (Tiles, inner, outer) : GB_TILE (Tiles, outer, inner) ; GrB_Matrix T = NULL ; ASSERT_MATRIX_OK (A, "A tile for concat sparse", GB0) ; if (csc != A->is_csc) { // T = (ctype) A', not in-place, using a dynamic header GB_OK (GB_new (&T, // auto sparsity, new header A->type, A->vdim, A->vlen, GB_Ap_null, csc, GxB_AUTO_SPARSITY, -1, 1, Context)) ; // save T in array S if (csc) { GB_TILE (S, inner, outer) = T ; } else { GB_TILE (S, outer, inner) = T ; } GB_OK (GB_transpose_cast (T, ctype, csc, A, false, Context)) ; A = T ; GB_MATRIX_WAIT (A) ; ASSERT_MATRIX_OK (A, "T=A' for concat sparse", GB0) ; } ASSERT (C->is_csc == A->is_csc) ; ASSERT (!GB_ANY_PENDING_WORK (A)) ; //------------------------------------------------------------------ // ensure the tile is not bitmap //------------------------------------------------------------------ if (GB_IS_BITMAP (A)) { if (T == NULL) { // copy A into T // set T->iso = A->iso OK: no burble needed GB_OK (GB_dup_worker (&T, A->iso, A, true, NULL, Context)) ; // save T in array S if (csc) { GB_TILE (S, inner, outer) = T ; } else { GB_TILE (S, outer, inner) = T ; } ASSERT_MATRIX_OK (T, "T=dup(A) for concat sparse", GB0) ; } // convert T from bitmap to sparse GB_OK (GB_convert_bitmap_to_sparse (T, Context)) ; ASSERT_MATRIX_OK (T, "T bitmap to sparse, concat sparse", GB0) ; A = T ; } ASSERT (!GB_IS_BITMAP (A)) ; //------------------------------------------------------------------ // log the # of entries in each vector of the tile A //------------------------------------------------------------------ const int64_t anvec = A->nvec ; const int64_t avlen = A->vlen ; int64_t cvstart = csc ? Tile_cols [outer] : Tile_rows [outer] ; int64_t *restrict W = Work + inner * cvdim + cvstart ; int nth = GB_nthreads (anvec, chunk, nthreads_max) ; if (GB_IS_FULL (A)) { // A is full int64_t j ; #pragma omp parallel for num_threads(nth) schedule(static) for (j = 0 ; j < anvec ; j++) { // W [j] = # of entries in A(:,j), which is just avlen W [j] = avlen ; } } else { // A is sparse or hyper int64_t k ; int64_t *restrict Ah = A->h ; int64_t *restrict Ap = A->p ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = 0 ; k < anvec ; k++) { // W [j] = # of entries in A(:,j), the kth column of A int64_t j = GBH (Ah, k) ; W [j] = Ap [k+1] - Ap [k] ; } } } } //-------------------------------------------------------------------------- // cumulative sum of entries in each tile //-------------------------------------------------------------------------- int nth = GB_nthreads (ninner*cvdim, chunk, nthreads_max) ; int64_t k ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = 0 ; k < cvdim ; k++) { int64_t s = 0 ; for (int64_t inner = 0 ; inner < ninner ; inner++) { int64_t p = inner * cvdim + k ; int64_t c = Work [p] ; Work [p] = s ; s += c ; } // total number of entries in C(:,k) Cp [k] = s ; } GB_cumsum (Cp, cvdim, &(C->nvec_nonempty), nthreads_max, Context) ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = 0 ; k < cvdim ; k++) { int64_t pC = Cp [k] ; for (int64_t inner = 0 ; inner < ninner ; inner++) { int64_t p = inner * cvdim + k ; Work [p] += pC ; } } //-------------------------------------------------------------------------- // concatenate all matrices into C //-------------------------------------------------------------------------- for (int64_t outer = 0 ; outer < nouter ; outer++) { for (int64_t inner = 0 ; inner < ninner ; inner++) { //------------------------------------------------------------------ // get the tile A, either the temporary matrix T or the original A //------------------------------------------------------------------ A = csc ? GB_TILE (S, inner, outer) : GB_TILE (S, outer, inner) ; if (A == NULL) { A = csc ? GB_TILE (Tiles, inner, outer) : GB_TILE (Tiles, outer, inner) ; } ASSERT_MATRIX_OK (A, "A tile again, concat sparse", GB0) ; ASSERT (!GB_IS_BITMAP (A)) ; ASSERT (C->is_csc == A->is_csc) ; ASSERT (!GB_ANY_PENDING_WORK (A)) ; GB_Type_code acode = A->type->code ; //------------------------------------------------------------------ // determine where to place the tile in C //------------------------------------------------------------------ // The tile A appears in vectors cvstart:cvend-1 of C, and indices // cistart:ciend-1. int64_t cvstart, cvend, cistart, ciend ; if (csc) { // C and A are held by column // Tiles is row-major and accessed in column order cvstart = Tile_cols [outer] ; cvend = Tile_cols [outer+1] ; cistart = Tile_rows [inner] ; ciend = Tile_rows [inner+1] ; } else { // C and A are held by row // Tiles is row-major and accessed in row order cvstart = Tile_rows [outer] ; cvend = Tile_rows [outer+1] ; cistart = Tile_cols [inner] ; ciend = Tile_cols [inner+1] ; } // get the workspace pointer array W for this tile int64_t *restrict W = Work + inner * cvdim + cvstart ; //------------------------------------------------------------------ // slice the tile //------------------------------------------------------------------ int64_t avdim = cvend - cvstart ; int64_t avlen = ciend - cistart ; ASSERT (avdim == A->vdim) ; ASSERT (avlen == A->vlen) ; int A_nthreads, A_ntasks ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const bool A_iso = A->iso ; GB_SLICE_MATRIX (A, 1, chunk) ; //------------------------------------------------------------------ // copy the tile A into C //------------------------------------------------------------------ bool done = false ; if (C_iso) { //-------------------------------------------------------------- // C and A are iso //-------------------------------------------------------------- #define GB_ISO_CONCAT #define GB_COPY(pC,pA,A_iso) ; #include "GB_concat_sparse_template.c" } else { #ifndef GBCOMPACT if (ccode == acode) { // no typecasting needed switch (csize) { #undef GB_COPY #define GB_COPY(pC,pA,A_iso) \ Cx [pC] = GBX (Ax, pA, A_iso) ; case GB_1BYTE : // uint8, int8, bool, or 1-byte user #define GB_CTYPE uint8_t #include "GB_concat_sparse_template.c" break ; case GB_2BYTE : // uint16, int16, or 2-byte user #define GB_CTYPE uint16_t #include "GB_concat_sparse_template.c" break ; case GB_4BYTE : // uint32, int32, float, or 4-byte user #define GB_CTYPE uint32_t #include "GB_concat_sparse_template.c" break ; case GB_8BYTE : // uint64, int64, double, float complex, // or 8-byte user defined #define GB_CTYPE uint64_t #include "GB_concat_sparse_template.c" break ; case GB_16BYTE : // double complex or 16-byte user #define GB_CTYPE GB_blob16 #include "GB_concat_sparse_template.c" break ; default:; } } #endif } if (!done) { // with typecasting or user-defined types GB_cast_function cast_A_to_C = GB_cast_factory (ccode, acode) ; size_t asize = A->type->size ; #define GB_CTYPE GB_void #undef GB_COPY #define GB_COPY(pC,pA,A_iso) \ cast_A_to_C (Cx + (pC)*csize, \ Ax + (A_iso ? 0:(pA)*asize), asize) ; #include "GB_concat_sparse_template.c" } GB_WERK_POP (A_ek_slicing, int64_t) ; } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; C->magic = GB_MAGIC ; ASSERT_MATRIX_OK (C, "C from concat sparse", GB0) ; return (GrB_SUCCESS) ; }
GB_unop__one_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_fp32_fp32 // op(A') function: GB_unop_tran__one_fp32_fp32 // C type: float // A type: float // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; ; ; ; ; Cx [p] = 1 ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
core_dtrssq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/core_blas/core_ztrssq.c, normal z -> d, Fri Sep 28 17:38:24 2018 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "plasma_internal.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ // This computation also shows up in plasma_core_dsyssq() and can be factored out. // LAPACK does real and imag components separately in dlassq. static inline void ssq(double value, double *scale, double *sumsq) { double absa = fabs(value); if (absa != 0.0) { // != propagates nan if (*scale < absa) { *sumsq = 1.0 + *sumsq*((*scale/absa)*(*scale/absa)); *scale = absa; } else { *sumsq = *sumsq + ((absa/(*scale))*(absa/(*scale))); } } } /******************************************************************************/ __attribute__((weak)) void plasma_core_dtrssq(plasma_enum_t uplo, plasma_enum_t diag, int m, int n, const double *A, int lda, double *scale, double *sumsq) { if (uplo == PlasmaUpper) { if (diag == PlasmaNonUnit) { for (int j = 0; j < n; j++) { ssq(A[lda*j], scale, sumsq); for (int i = 1; i < imin(j+1, m); i++) { ssq(A[lda*j+i], scale, sumsq); } } } else { // PlasmaUnit int j; for (j = 0; j < imin(n, m); j++) { ssq(1.0, scale, sumsq); for (int i = 0; i < j; i++) { ssq(A[lda*j+i], scale, sumsq); } } for (; j < n; j++) { ssq(A[lda*j], scale, sumsq); for (int i = 1; i < m; i++) { ssq(A[lda*j+i], scale, sumsq); } } } } else { // PlasmaLower if (diag == PlasmaNonUnit) { for (int j = 0; j < imin(n, m); j++) { ssq(A[lda*j+j], scale, sumsq); for (int i = j+1; i < m; i++) { ssq(A[lda*j+i], scale, sumsq); } } } else { // PlasmaUnit for (int j = 0; j < imin(n, m); j++) { ssq(1.0, scale, sumsq); for (int i = j+1; i < m; i++) { ssq(A[lda*j+i], scale, sumsq); } } } } } /******************************************************************************/ void plasma_core_omp_dtrssq(plasma_enum_t uplo, plasma_enum_t diag, int m, int n, const double *A, int lda, double *scale, double *sumsq, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:scale[0:n]) \ depend(out:sumsq[0:n]) { if (sequence->status == PlasmaSuccess) { *scale = 0.0; *sumsq = 1.0; plasma_core_dtrssq(uplo, diag, m, n, A, lda, scale, sumsq); } } }
parallel_iter.c
#include<stdio.h> #include<stdlib.h> #include<time.h> #include<omp.h> #include<math.h> #include<string.h> #define constant 6.28318530718 #define CLK CLOCK_MONOTONIC struct timespec diff(struct timespec start, struct timespec end){ struct timespec temp; if((end.tv_nsec-start.tv_nsec)<0){ temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else{ temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } typedef struct { unsigned char gs; } PPMPixelGS; typedef struct { int x, y; PPMPixelGS *data; } PPMImageGS; typedef struct{ double real; double imag; } Complex; #define RGB_COMPONENT_COLOR 255 void writePPMGS(const char *filename, PPMImageGS *img); static PPMImageGS *readPPMGS(const char *filename); /*-----------------------------------convert to image complex arrays------------------------------*/ Complex** convert(PPMImageGS *im, int p) { int rows = im->x; int cols = im->y; int i,j,idx; Complex **arr = (Complex **)malloc(rows * sizeof(Complex *)); for (i=0; i<rows; i++) arr[i] = (Complex *)malloc(cols * sizeof(Complex )); # pragma omp parallel \ shared ( arr,im, rows,cols ) \ private ( j,idx ) num_threads(p) # pragma omp for nowait for(i=0;i<rows;i++) { for(j=0; j<cols; j++) { idx = cols*i + j; PPMPixelGS *temp = im->data + idx; arr[i][j].real=(double)temp->gs; arr[i][j].imag=0.0; } } return arr; } /*------------------------------look up table-----------------------------------------------*/ void twiddle( int n, double w[], int p ) { double arg; double aw; int i; int n2; const double pi = constant/2; n2 = n / 2; aw = 2.0 * pi / ( ( double ) n ); # pragma omp parallel \ shared ( aw, n, w ) \ private ( arg, i ) num_threads(p) # pragma omp for nowait for ( i = 0; i < n2; i++ ) { arg = aw * ( ( double ) i ); w[i*2+0] = cos ( arg ); w[i*2+1] = sin ( arg ); } return; } /*-------------------------------------FFT--------------------------------*/ void FFT(double *x, double *y, int n, double w[], int p) { int m,i,j,k,i2; double tx,ty; m=0; i= n; /* m = logN calculation*/ while(i>0) { i/=2; m++; } m-=1; /* Do the bit reversal */ i2 = n >> 1; j = 0; for (i=0;i<n -1;i++) { if (i < j) { tx = x[i]; ty = y[i]; x[i] = x[j]; y[i] = y[j]; x[j] = tx; y[j] = ty; } k = i2; while (k <= j) { j -= k; k >>= 1; } j += k; } /* FFT computation */ int mj, term_i, mi, j2, count2; double u1, u2, t1, t2; mj = 1; //stride of j for(k=0;k<m;k++) { mi = 2*mj; //stride of i term_i = n/mi; #pragma omp parallel \ shared(x,y,mj,mi,n,term_i) \ private(count2,j2,j,t1,t2,u1,u2) num_threads(p) #pragma omp for nowait for(i=0; i<term_i; i++) { count2=0; for(j=i*mi;count2<mj;j++, count2++) { j%=(n-1); j2 = (j+ mj); int twiddle_index = count2*n/mi; u1 = w[twiddle_index*2+0]; u2 = -w[twiddle_index*2+1]; t1 = u1 * x[j2] - u2 * y[j2]; t2 = u1 * y[j2] + u2 * x[j2]; x[j2] = x[j] - t1; y[j2] = y[j] - t2; x[j] += t1; y[j] += t2; } } mj = mj*2; } } /*-----------------------------------2D FFT------------------------------*/ void FFT_2D(Complex **comp_in,int rows, int cols, double *w, int p) { int i,j; for(i=0;i<rows;i++) { double x[rows]; double y[rows]; for(j=0; j<cols; j++) { x[j]=comp_in[i][j].real; y[j]=comp_in[i][j].imag; } FFT(x,y,cols,w,p); for(j=0; j<cols; j++) { comp_in[i][j].real=x[j]; comp_in[i][j].imag=y[j] ; } } } /*-----------------------------------calculate the transpose------------------------------*/ Complex ** transpose(int N,Complex **comp_in,int p){ int blockrow, blockcolumn, i = 0, j = 0; int blocksize; blocksize = 16; Complex **arr = (Complex **)malloc(N * sizeof(Complex *)); for (i=0; i<N; i++) arr[i] = (Complex *)malloc(N * sizeof(Complex )); for (blockrow = 0; blockrow < N; blockrow += blocksize) { #pragma omp parallel \ shared(comp_in,arr,blockrow) \ private(i,j) num_threads(p) #pragma omp for nowait for (blockcolumn = 0; blockcolumn < N; blockcolumn += blocksize) { for (i = blockrow; i < blockrow + blocksize; i++) { for (j = blockcolumn; j < blockcolumn + blocksize; j++) { arr[i][j] = comp_in[j][i]; } } } } return arr; } /*-----------------------------------convert complex arrays to image------------------------------*/ PPMImageGS * convert_comp_img(Complex **comp_in,int rows,int cols, int p) { int i,j; PPMImageGS *im2 = (PPMImageGS *) malloc(sizeof(PPMImageGS)); im2->x = rows; im2->y = cols; im2->data = (PPMPixelGS *) malloc(rows*cols*sizeof(PPMPixelGS)); double temp ; int idx; # pragma omp parallel \ shared (im2, rows,cols ) \ private ( j,idx,temp ) num_threads(p) # pragma omp for nowait for(i=0;i<rows;i++) { for(j=0; j<cols; j++) { idx = cols*i + j; temp = sqrt(comp_in[i][j].real*comp_in[i][j].real + comp_in[i][j].imag*comp_in[i][j].imag); PPMPixelGS *temp2 = im2->data + idx; temp2->gs = floor(temp); } } return im2; } /*-----------------------------------main function------------------------------*/ int main(int argc, char* argv[]) { struct timespec start_e2e, end_e2e, start_alg, end_alg, e2e, alg; clock_gettime(CLK, &start_e2e); int n = atoi(argv[1]); int p = atoi(argv[2]); //int run_id = atoi(argv[3]); char filename[30]; strcpy(filename,"input/"); strcat(filename,"gs_"); strcat(filename,argv[1]); strcat(filename,".ppm"); char *problem_name = "FFT"; char *approach_name = "iterative"; PPMImageGS *image,*transformed_img; Complex **comp_in; image = readPPMGS(filename); int rows= image->x; int cols = image->y; double* w = ( double * ) malloc (rows* sizeof ( double ) ); clock_gettime(CLK, &start_alg); comp_in = convert(image, p); twiddle(rows,w,p); FFT_2D(comp_in, rows, cols,w,p); comp_in = transpose(rows,comp_in,p); FFT_2D(comp_in,rows,cols,w,p); comp_in = transpose(cols,comp_in,p); transformed_img = convert_comp_img(comp_in,rows,cols,p); clock_gettime(CLK, &end_alg); char out_file[30] ; strcpy(out_file,"output/"); strcat(out_file,"gs_"); strcat(out_file,argv[1]); strcat(out_file,"fft_parallel.ppm"); writePPMGS(out_file,transformed_img); free(w); clock_gettime(CLK, &end_e2e); e2e = diff(start_e2e, end_e2e); alg = diff(start_alg, end_alg); //printf("%d,%d,%ld,%ld,%ld,%ld\n", n, p, e2e.tv_sec, e2e.tv_nsec, alg.tv_sec, alg.tv_nsec); printf("%s,%s,%d,%d,%ld,%ld,%ld,%ld\n",problem_name, approach_name, n, p, e2e.tv_sec, e2e.tv_nsec, alg.tv_sec, alg.tv_nsec); return 0; } /*-----------------------------------convert to image array to ppm------------------------------*/ void writePPMGS(const char *filename, PPMImageGS *img) { FILE *fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P5\n"); //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",RGB_COMPONENT_COLOR); // pixel data fwrite(img->data, img->x, img->y, fp); fclose(fp); } /*-----------------------------------convert image(ppm) to array------------------------------*/ static PPMImageGS *readPPMGS(const char *filename) { char buff[16]; PPMImageGS *img; FILE *fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '5') { fprintf(stderr, "Invalid image format (must be 'P5')\n"); exit(1); } //alloc memory form image img = (PPMImageGS *)malloc(sizeof(PPMImageGS)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixelGS*)malloc(img->x * img->y * sizeof(PPMPixelGS)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; }
core_dsyrk_blasfeo.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from core_blas/core_zsyrk.c, normal z -> d, Thu Aug 8 17:24:59 2019 * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" #include "blasfeo_d_aux.h" /***************************************************************************//** * * @ingroup core_syrk * * Performs one of the symmetric rank k operations * * \f[ C = \alpha A \times A^T + \beta C, \f] * or * \f[ C = \alpha A^T \times A + \beta C, \f] * * where alpha and beta are scalars, C is an n-by-n symmetric * matrix, and A is an n-by-k matrix in the first case and a k-by-n * matrix in the second case. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of C is stored; * - PlasmaLower: Lower triangle of C is stored. * * @param[in] trans * - PlasmaNoTrans: \f[ C = \alpha A \times A^T + \beta C; \f] * - PlasmaTrans: \f[ C = \alpha A^T \times A + \beta C. \f] * * @param[in] n * The order of the matrix C. n >= 0. * * @param[in] k * If trans = PlasmaNoTrans, number of columns of the A matrix; * if trans = PlasmaTrans, number of rows of the A matrix. * * @param[in] alpha * The scalar alpha. * * @param[in] A * A is an lda-by-ka matrix. * If trans = PlasmaNoTrans, ka = k; * if trans = PlasmaTrans, ka = n. * * @param[in] lda * The leading dimension of the array A. * If trans = PlasmaNoTrans, lda >= max(1, n); * if trans = PlasmaTrans, lda >= max(1, k). * * @param[in] beta * The scalar beta. * * @param[in,out] C * C is an ldc-by-n matrix. * On exit, the uplo part of the matrix is overwritten * by the uplo part of the updated matrix. * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1, n). * ******************************************************************************/ __attribute__((weak)) void plasma_core_dsyrk_blasfeo(plasma_enum_t uplo, plasma_enum_t trans, int n, int k, double alpha, struct blasfeo_dmat *sA, int ai, int aj, double beta, struct blasfeo_dmat *sC, int ci, int cj) { // cblas_dsyrk(CblasColMajor, // (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans, // n, k, // (alpha), A, lda, // (beta), C, ldc); blasfeo_dsyrk_ln(n, k, alpha, sA, ai, aj, sA, ai, aj, beta, sC, ci, cj, sC, ci, cj); } /******************************************************************************/ void plasma_core_omp_dsyrk_blasfeo( plasma_enum_t uplo, plasma_enum_t trans, int n, int k, double alpha, struct blasfeo_dmat *sA, int ai, int aj, double beta, struct blasfeo_dmat *sC, int ci, int cj, plasma_sequence_t *sequence, plasma_request_t *request) { int ak; if (trans == PlasmaNoTrans) ak = k; else ak = n; struct blasfeo_dmat sA2, sC2; sA2 = *sA; sC2 = *sC; double *A = sA->pA; int sda = sA->cn; double *C = sC->pA; int sdc = sC->cn; // #pragma omp task depend(in:A[0:lda*ak]) \ // depend(inout:C[0:ldc*n]) #pragma omp task depend(in:A[0:sda*ak]) \ depend(inout:C[0:sdc*n]) { if (sequence->status == PlasmaSuccess) plasma_core_dsyrk_blasfeo(uplo, trans, n, k, alpha, &sA2, ai, aj, beta, &sC2, ci, cj); } }
GB_binop__eq_fc32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_fc32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__eq_fc32) // A.*B function (eWiseMult): GB (_AemultB_03__eq_fc32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fc32) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((node)) // C+=B function (dense accum): GB (_Cdense_accumB__eq_fc32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_fc32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fc32) // C=scalar+B GB (_bind1st__eq_fc32) // C=scalar+B' GB (_bind1st_tran__eq_fc32) // C=A+scalar GB (_bind2nd__eq_fc32) // C=A'+scalar GB (_bind2nd_tran__eq_fc32) // C type: bool // A type: GxB_FC32_t // B,b type: GxB_FC32_t // BinaryOp: cij = GB_FC32_eq (aij, bij) #define GB_ATYPE \ GxB_FC32_t #define GB_BTYPE \ GxB_FC32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = (crealf (Ax [pA]) != 0) || (cimagf (Ax [pA]) != 0) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = (crealf (Bx [pB]) != 0) || (cimagf (Bx [pB]) != 0) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC32_eq (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_FC32 || GxB_NO_EQ_FC32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__eq_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_fc32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_fc32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type GxB_FC32_t GxB_FC32_t bwork = (*((GxB_FC32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((node)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_fc32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__eq_fc32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__eq_fc32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_fc32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_fc32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; GxB_FC32_t x = (*((GxB_FC32_t *) x_input)) ; GxB_FC32_t *Bx = (GxB_FC32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC32_t bij = Bx [p] ; Cx [p] = GB_FC32_eq (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_fc32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; GxB_FC32_t *Ax = (GxB_FC32_t *) Ax_input ; GxB_FC32_t y = (*((GxB_FC32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC32_t aij = Ax [p] ; Cx [p] = GB_FC32_eq (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_eq (x, aij) ; \ } GrB_Info GB (_bind1st_tran__eq_fc32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t x = (*((const GxB_FC32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC32_t aij = Ax [pA] ; \ Cx [pC] = GB_FC32_eq (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__eq_fc32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC32_t y = (*((const GxB_FC32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
quantized_conv2d.h
/* Copyright 2018 The Blueoil Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================================================================*/ #ifndef DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED #define DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED #include <vector> #include <memory> #include <stdexcept> #include "global.h" #include "tensor_view.h" #include "tensor_convert.h" #include "operators.h" #include "time_measurement.h" #include "func/impl/quantized_conv2d_tiling.h" #include "func/impl/quantized_conv2d_kn2row.h" #include "func/impl/quantized_conv2d_accelerator.h" #ifdef _OPENMP #include <omp.h> #endif template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void QuantizedConv2D( const TensorView<QuantizedPacked<T_input>, layout_input>& input, const TensorView<QuantizedPacked<T_kernel>, layout_kernel>& kernel, binary_convolution_parameters p) { Measurement::Start("QuantizedConv2D"); constexpr T_UINT TilingInTypeBitWidth = dlk::impl::tiling_input_elem_t::BitCount; T_UINT kh = p.normal_conv_params.kernel_height; T_UINT kw = p.normal_conv_params.kernel_width; T_UINT padding = p.normal_conv_params.padding; T_UINT ih = p.normal_conv_params.input_height; T_UINT iw = p.normal_conv_params.input_width; T_UINT ic = p.normal_conv_params.input_channels; T_UINT oc = p.normal_conv_params.output_channels; T_UINT maxa = (1 << p.n_bit) - 1; auto size = oc * ih * iw; if (p.device_output_buf == nullptr) p.device_output_buf = new BIN_CONV_OUTPUT[size](); assert(kh == kw); // kernel rectangle must be square assert(kh % 2 == 1); // kernel size must be odd assert(1 <= kh && kh <= 5); // Only 1x1, 3x3, 5x5 are supported assert(ic * kh * kw * maxa <= std::numeric_limits<BIN_CONV_OUTPUT>::max()); // overflow check #ifdef RUN_ON_FPGA dlk::impl::tca_input_t::tensor_info_t<std::size_t> shape = { (ic + QUANTIZED_PACKED::BitCount - 1) / QUANTIZED_PACKED::BitCount, ih, iw, p.bin_input_bitwidth, QUANTIZED_PACKED::BitCount }; dlk::impl::tca_input_t tmp((QUANTIZED_PACKED*)p.device_input_buf, shape); convert_tensor(input, tmp); dlk::impl::TCAConv2d(tmp, kernel, p); #elif defined USE_NEON || defined USE_AVX dlk::impl::tiling_input_t::tensor_info_t<std::size_t> shape = { ic / TilingInTypeBitWidth, ih, iw, p.bin_input_bitwidth, TilingInTypeBitWidth }; dlk::impl::tiling_input_t tmp(reinterpret_cast<dlk::impl::tiling_input_elem_t*>(p.device_input_buf), shape); convert_tensor(input, tmp); dlk::impl::QuantizedConv2DTiling(tmp, kernel, p); #else dlk::impl::kn2row_input_t::tensor_info_t<std::size_t> shape = { ih, iw, ic / QUANTIZED_PACKED::BitCount, p.bin_input_bitwidth, QUANTIZED_PACKED::BitCount }; dlk::impl::kn2row_input_t tmp(reinterpret_cast<QUANTIZED_PACKED*>(p.device_input_buf), shape); convert_tensor(input, tmp); dlk::impl::QuantizedConv2DKn2Row(tmp, kernel, p); #endif Measurement::Stop(); } template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void func_QuantizedConv2D( const TensorView<QuantizedPacked<T_input>, layout_input>& input, const TensorView<QuantizedPacked<T_kernel>, layout_kernel>& kernel, const TensorView<T_FLOAT, MemoryLayout::NHWC>& output, const T_FLOAT scaling_factor, const binary_convolution_parameters& p) { QuantizedConv2D(input, kernel, p); Measurement::Start("QuantizedConv2D_ApplyScalingFactor"); unsigned out_elems = p.normal_conv_params.output_height * p.normal_conv_params.output_width * p.normal_conv_params.output_channels; // temporary: (2^n - 1) * (max - min) const T_FLOAT post_qtz_factor = 2.0f / 3.0f; const T_FLOAT coeff = scaling_factor * post_qtz_factor; size_t b = 32; auto &ncp(p.normal_conv_params); auto true_out_channels = output.get_shape()[3]; auto channel_blocks = true_out_channels / b; size_t area = ncp.output_height * ncp.output_width; auto out_buf = reinterpret_cast<VOLATILE_IF_FPGA BIN_CONV_OUTPUT*>(p.device_output_buf); #pragma omp parallel for for (size_t hw = 0; hw < area; ++hw) { size_t out_index = hw * true_out_channels; for (size_t s = 0; s < channel_blocks; ++s) for (size_t d = 0; d < b; ++d) output.data()[out_index++] = coeff * out_buf[hw * b + s * (area * b) + d]; for (size_t d = 0; d < true_out_channels - channel_blocks*b; ++d) output.data()[out_index++] = coeff * out_buf[hw * b + channel_blocks * (area * b) + d]; } Measurement::Stop(); } template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void func_QuantizedConv2D( const TensorView<QuantizedPacked<T_input>, layout_input>& input, const TensorView<QuantizedPacked<T_kernel>, layout_kernel>& kernel, const TensorView<T_FLOAT, MemoryLayout::NHWC>& output, T_FLOAT scaling_factor[], binary_convolution_parameters p) { QuantizedConv2D(input, kernel, p); unsigned out_elems = p.normal_conv_params.output_height * p.normal_conv_params.output_width; unsigned out_channels = p.normal_conv_params.output_channels; size_t b = 32; auto& ncp(p.normal_conv_params); auto true_out_channels = output.get_shape()[3]; auto channel_blocks = true_out_channels / b; // temporary: (2^n - 1) * (max - min) T_FLOAT post_qtz_factor = 2.0 / 3.0; Measurement::Start("QuantizedConv2D_ApplyScalingFactor"); size_t area = ncp.output_height * ncp.output_width; auto out_buf = reinterpret_cast<VOLATILE_IF_FPGA BIN_CONV_OUTPUT*>(p.device_output_buf); #pragma omp parallel for for (size_t hw = 0; hw < area; ++hw) { size_t out_index = hw * true_out_channels; for (size_t s = 0; s < channel_blocks; ++s) for (size_t d = 0; d < b; ++d) output.data()[out_index++] = (scaling_factor[s*b + d] * post_qtz_factor) * out_buf[hw * b + s * (area * b) + d]; for (size_t d = 0; d < true_out_channels - channel_blocks*b; ++d) output.data()[out_index++] = (scaling_factor[channel_blocks*b + d] * post_qtz_factor) * out_buf[hw * b + channel_blocks * (area * b) + d]; } Measurement::Stop(); } template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void func_QuantizedConv2DWithThreshold( const TensorView<QuantizedPacked<T_input>, layout_input>& input, const TensorView<QuantizedPacked<T_kernel>, layout_kernel>& kernel, const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& output, const T_FLOAT scaling_factor, const binary_convolution_parameters& p) { QuantizedConv2D(input, kernel, p); unsigned out_elems = p.normal_conv_params.output_height * p.normal_conv_params.output_width * p.normal_conv_params.output_channels; const auto bytes = out_elems / 8 * p.n_bit; Measurement::Start("Memcpy"); #ifdef _OPENMP const int num_blocks = bytes / sizeof(QUANTIZED_PACKED); const int num_threads = omp_get_max_threads(); const int chunk_size = (num_blocks + num_threads - 1) / num_threads; #pragma omp parallel for for (int i = 0; i < num_blocks; i += chunk_size) { memcpy(output.data() + i, (QUANTIZED_PACKED*)(p.device_output_buf) + i, std::min(chunk_size, num_blocks - i) * sizeof(QUANTIZED_PACKED)); } #else memcpy(output.data(), (void*)p.device_output_buf, bytes); #endif Measurement::Stop(); } template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void func_QuantizedConv2DWithThreshold( const TensorView<QuantizedPacked<T_input>, layout_input>& input, const TensorView<QuantizedPacked<T_kernel>, layout_kernel>& kernel, const TensorView<T_FLOAT, MemoryLayout::NHWC>& output, const T_FLOAT scaling_factor, const binary_convolution_parameters& p) { QuantizedConv2D(input, kernel, p); Measurement::Start("linear_to_float"); T_FLOAT n = (1 << p.n_bit) - 1; const auto& np = p.normal_conv_params; const auto out_height = np.output_height; const auto out_width = np.output_width; const auto out_channels = np.output_channels; const auto true_out_channels = output.get_shape()[3]; auto out_buf = reinterpret_cast<VOLATILE_IF_FPGA QUANTIZED_PACKED::base_t*>(p.device_output_buf); for (unsigned r = 0; r < out_height; ++r) { for (unsigned c = 0; c < out_width; ++c) { for (unsigned d = 0; d < true_out_channels; ++d) { const auto i = r * out_width * p.n_bit + c * p.n_bit; QUANTIZED_PACKED::base_t bits = 0; for (unsigned digit = 0; digit < p.n_bit; ++digit) { bits |= ((out_buf[i + digit] >> d) & 1) << digit; } T_FLOAT tmp = (T_FLOAT)bits; tmp = tmp / n; output(0, r, c, d) = tmp * p.max_value; } } } Measurement::Stop(); } template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void func_QuantizedConv2DWithThreshold( const TensorView<QuantizedPacked<T_input>, layout_input>& input, const TensorView<QuantizedPacked<T_kernel>, layout_kernel>& kernel, const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& output, const T_FLOAT scaling_factor[], const binary_convolution_parameters& p) { func_QuantizedConv2DWithThreshold(input, kernel, output, scaling_factor[0], p); } template <typename T_input, MemoryLayout layout_input, typename T_kernel, MemoryLayout layout_kernel> void func_QuantizedConv2DWithThreshold( const TensorView<T_input, layout_input>& input, const TensorView<T_kernel, layout_kernel>& kernel, const TensorView<T_FLOAT, MemoryLayout::NHWC>& output, T_FLOAT scaling_factor[], binary_convolution_parameters p) { func_QuantizedConv2DWithThreshold(input, kernel, output, scaling_factor[0], p); } #endif // DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED
GB_unop__identity_uint32_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint32_uint8) // op(A') function: GB (_unop_tran__identity_uint32_uint8) // C type: uint32_t // A type: uint8_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint32_t z = (uint32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint32_t z = (uint32_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint32_uint8) ( uint32_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; uint32_t z = (uint32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint32_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_concat_sparse.c
//------------------------------------------------------------------------------ // GB_concat_sparse: concatenate an array of matrices into a sparse matrix //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ #define GB_FREE_WORKSPACE \ if (S != NULL) \ { \ for (int64_t k = 0 ; k < m * n ; k++) \ { \ GB_Matrix_free (&(S [k])) ; \ } \ } \ GB_FREE_WORK (&S, S_size) ; \ GB_FREE_WORK (&Work, Work_size) ; \ GB_WERK_POP (A_ek_slicing, int64_t) ; #define GB_FREE_ALL \ { \ GB_FREE_WORKSPACE ; \ GB_phbix_free (C) ; \ } #include "GB_concat.h" GrB_Info GB_concat_sparse // concatenate into a sparse matrix ( GrB_Matrix C, // input/output matrix for results const bool C_iso, // if true, construct C as iso const GB_void *cscalar, // iso value of C, if C is io const int64_t cnz, // # of entries in C const GrB_Matrix *Tiles, // 2D row-major array of size m-by-n, const GrB_Index m, const GrB_Index n, const int64_t *restrict Tile_rows, // size m+1 const int64_t *restrict Tile_cols, // size n+1 GB_Context Context ) { //-------------------------------------------------------------------------- // allocate C as a sparse matrix //-------------------------------------------------------------------------- GrB_Info info ; GrB_Matrix A = NULL ; ASSERT_MATRIX_OK (C, "C input to concat sparse", GB0) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; int64_t *Work = NULL ; size_t Work_size = 0 ; GrB_Matrix *S = NULL ; size_t S_size = 0 ; GrB_Type ctype = C->type ; int64_t cvlen = C->vlen ; int64_t cvdim = C->vdim ; bool csc = C->is_csc ; size_t csize = ctype->size ; GB_Type_code ccode = ctype->code ; float hyper_switch = C->hyper_switch ; float bitmap_switch = C->bitmap_switch ; int sparsity_control = C->sparsity_control ; bool static_header = C->static_header ; GB_phbix_free (C) ; // set C->iso = C_iso OK GB_OK (GB_new_bix (&C, static_header, // prior static or dynamic header ctype, cvlen, cvdim, GB_Ap_malloc, csc, GxB_SPARSE, false, hyper_switch, cvdim, cnz, true, C_iso, Context)) ; C->bitmap_switch = bitmap_switch ; C->sparsity_control = sparsity_control ; int64_t *restrict Cp = C->p ; int64_t *restrict Ci = C->i ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; if (C_iso) { memcpy (C->x, cscalar, csize) ; } //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- int64_t nouter = csc ? n : m ; int64_t ninner = csc ? m : n ; Work = GB_CALLOC_WORK (ninner * cvdim, int64_t, &Work_size) ; S = GB_CALLOC_WORK (m * n, GrB_Matrix, &S_size) ; if (S == NULL || Work == NULL) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // count entries in each vector of each tile //-------------------------------------------------------------------------- for (int64_t outer = 0 ; outer < nouter ; outer++) { for (int64_t inner = 0 ; inner < ninner ; inner++) { //------------------------------------------------------------------ // get the tile A; transpose and typecast, if needed //------------------------------------------------------------------ A = csc ? GB_TILE (Tiles, inner, outer) : GB_TILE (Tiles, outer, inner) ; GrB_Matrix T = NULL ; ASSERT_MATRIX_OK (A, "A tile for concat sparse", GB0) ; if (csc != A->is_csc) { // T = (ctype) A', not in-place, using a dynamic header GB_OK (GB_new (&T, false, // auto sparsity, new header A->type, A->vdim, A->vlen, GB_Ap_null, csc, GxB_AUTO_SPARSITY, -1, 1, Context)) ; // save T in array S if (csc) { GB_TILE (S, inner, outer) = T ; } else { GB_TILE (S, outer, inner) = T ; } GB_OK (GB_transpose_cast (T, ctype, csc, A, false, Context)) ; A = T ; GB_MATRIX_WAIT (A) ; ASSERT_MATRIX_OK (A, "T=A' for concat sparse", GB0) ; } ASSERT (C->is_csc == A->is_csc) ; ASSERT (!GB_ANY_PENDING_WORK (A)) ; //------------------------------------------------------------------ // ensure the tile is not bitmap //------------------------------------------------------------------ if (GB_IS_BITMAP (A)) { if (T == NULL) { // copy A into T // set T->iso = A->iso OK: no burble needed GB_OK (GB_dup_worker (&T, A->iso, A, true, NULL, Context)) ; // save T in array S if (csc) { GB_TILE (S, inner, outer) = T ; } else { GB_TILE (S, outer, inner) = T ; } ASSERT_MATRIX_OK (T, "T=dup(A) for concat sparse", GB0) ; } // convert T from bitmap to sparse GB_OK (GB_convert_bitmap_to_sparse (T, Context)) ; ASSERT_MATRIX_OK (T, "T bitmap to sparse, concat sparse", GB0) ; A = T ; } ASSERT (!GB_IS_BITMAP (A)) ; //------------------------------------------------------------------ // log the # of entries in each vector of the tile A //------------------------------------------------------------------ const int64_t anvec = A->nvec ; const int64_t avlen = A->vlen ; int64_t cvstart = csc ? Tile_cols [outer] : Tile_rows [outer] ; int64_t *restrict W = Work + inner * cvdim + cvstart ; int nth = GB_nthreads (anvec, chunk, nthreads_max) ; if (GB_IS_FULL (A)) { // A is full int64_t j ; #pragma omp parallel for num_threads(nth) schedule(static) for (j = 0 ; j < anvec ; j++) { // W [j] = # of entries in A(:,j), which is just avlen W [j] = avlen ; } } else { // A is sparse or hyper int64_t k ; int64_t *restrict Ah = A->h ; int64_t *restrict Ap = A->p ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = 0 ; k < anvec ; k++) { // W [j] = # of entries in A(:,j), the kth column of A int64_t j = GBH (Ah, k) ; W [j] = Ap [k+1] - Ap [k] ; } } } } //-------------------------------------------------------------------------- // cumulative sum of entries in each tile //-------------------------------------------------------------------------- int nth = GB_nthreads (ninner*cvdim, chunk, nthreads_max) ; int64_t k ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = 0 ; k < cvdim ; k++) { int64_t s = 0 ; for (int64_t inner = 0 ; inner < ninner ; inner++) { int64_t p = inner * cvdim + k ; int64_t c = Work [p] ; Work [p] = s ; s += c ; } // total number of entries in C(:,k) Cp [k] = s ; } GB_cumsum (Cp, cvdim, &(C->nvec_nonempty), nthreads_max, Context) ; #pragma omp parallel for num_threads(nth) schedule(static) for (k = 0 ; k < cvdim ; k++) { int64_t pC = Cp [k] ; for (int64_t inner = 0 ; inner < ninner ; inner++) { int64_t p = inner * cvdim + k ; Work [p] += pC ; } } //-------------------------------------------------------------------------- // concatenate all matrices into C //-------------------------------------------------------------------------- for (int64_t outer = 0 ; outer < nouter ; outer++) { for (int64_t inner = 0 ; inner < ninner ; inner++) { //------------------------------------------------------------------ // get the tile A, either the temporary matrix T or the original A //------------------------------------------------------------------ A = csc ? GB_TILE (S, inner, outer) : GB_TILE (S, outer, inner) ; if (A == NULL) { A = csc ? GB_TILE (Tiles, inner, outer) : GB_TILE (Tiles, outer, inner) ; } ASSERT_MATRIX_OK (A, "A tile again, concat sparse", GB0) ; ASSERT (!GB_IS_BITMAP (A)) ; ASSERT (C->is_csc == A->is_csc) ; ASSERT (!GB_ANY_PENDING_WORK (A)) ; GB_Type_code acode = A->type->code ; //------------------------------------------------------------------ // determine where to place the tile in C //------------------------------------------------------------------ // The tile A appears in vectors cvstart:cvend-1 of C, and indices // cistart:ciend-1. int64_t cvstart, cvend, cistart, ciend ; if (csc) { // C and A are held by column // Tiles is row-major and accessed in column order cvstart = Tile_cols [outer] ; cvend = Tile_cols [outer+1] ; cistart = Tile_rows [inner] ; ciend = Tile_rows [inner+1] ; } else { // C and A are held by row // Tiles is row-major and accessed in row order cvstart = Tile_rows [outer] ; cvend = Tile_rows [outer+1] ; cistart = Tile_cols [inner] ; ciend = Tile_cols [inner+1] ; } // get the workspace pointer array W for this tile int64_t *restrict W = Work + inner * cvdim + cvstart ; //------------------------------------------------------------------ // slice the tile //------------------------------------------------------------------ int64_t avdim = cvend - cvstart ; int64_t avlen = ciend - cistart ; ASSERT (avdim == A->vdim) ; ASSERT (avlen == A->vlen) ; int A_nthreads, A_ntasks ; const int64_t *restrict Ap = A->p ; const int64_t *restrict Ah = A->h ; const int64_t *restrict Ai = A->i ; const bool A_iso = A->iso ; GB_SLICE_MATRIX (A, 1, chunk) ; //------------------------------------------------------------------ // copy the tile A into C //------------------------------------------------------------------ bool done = false ; if (C_iso) { //-------------------------------------------------------------- // C and A are iso //-------------------------------------------------------------- #define GB_ISO_CONCAT #define GB_COPY(pC,pA,A_iso) ; #include "GB_concat_sparse_template.c" } else { #ifndef GBCOMPACT if (ccode == acode) { // no typecasting needed switch (csize) { #undef GB_COPY #define GB_COPY(pC,pA,A_iso) \ Cx [pC] = GBX (Ax, pA, A_iso) ; case GB_1BYTE : // uint8, int8, bool, or 1-byte user #define GB_CTYPE uint8_t #include "GB_concat_sparse_template.c" break ; case GB_2BYTE : // uint16, int16, or 2-byte user #define GB_CTYPE uint16_t #include "GB_concat_sparse_template.c" break ; case GB_4BYTE : // uint32, int32, float, or 4-byte user #define GB_CTYPE uint32_t #include "GB_concat_sparse_template.c" break ; case GB_8BYTE : // uint64, int64, double, float complex, // or 8-byte user defined #define GB_CTYPE uint64_t #include "GB_concat_sparse_template.c" break ; case GB_16BYTE : // double complex or 16-byte user #define GB_CTYPE GB_blob16 #include "GB_concat_sparse_template.c" break ; default:; } } #endif } if (!done) { // with typecasting or user-defined types GB_cast_function cast_A_to_C = GB_cast_factory (ccode, acode) ; size_t asize = A->type->size ; #define GB_CTYPE GB_void #undef GB_COPY #define GB_COPY(pC,pA,A_iso) \ cast_A_to_C (Cx + (pC)*csize, \ Ax + (A_iso ? 0:(pA)*asize), asize) ; #include "GB_concat_sparse_template.c" } GB_WERK_POP (A_ek_slicing, int64_t) ; } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORKSPACE ; C->magic = GB_MAGIC ; ASSERT_MATRIX_OK (C, "C from concat sparse", GB0) ; return (GrB_SUCCESS) ; }
GB_dense_subassign_25_template.c
//------------------------------------------------------------------------------ // GB_dense_subassign_25_template: C<M> = A where C is empty and A is dense //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // C<M> = A where C starts as empty, M is structural, and A is dense. The // pattern of C is an exact copy of M. A is full, dense, or bitmap. // M is sparse or hypersparse, and C is constructed with the same pattern as M. { //-------------------------------------------------------------------------- // get C, M, and A //-------------------------------------------------------------------------- ASSERT (GB_sparsity (M) == GB_sparsity (C)) ; GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ; int64_t *restrict Ci = C->i ; ASSERT (GB_IS_SPARSE (M) || GB_IS_HYPERSPARSE (M)) ; ASSERT (GB_JUMBLED_OK (M)) ; const int64_t *restrict Mp = M->p ; const int64_t *restrict Mh = M->h ; const int64_t *restrict Mi = M->i ; const int64_t mvlen = M->vlen ; const bool A_is_bitmap = GB_IS_BITMAP (A) ; const GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ; const int8_t *restrict Ab = A->b ; const int64_t avlen = A->vlen ; const int64_t *restrict kfirst_Mslice = M_ek_slicing ; const int64_t *restrict klast_Mslice = M_ek_slicing + M_ntasks ; const int64_t *restrict pstart_Mslice = M_ek_slicing + M_ntasks * 2 ; //-------------------------------------------------------------------------- // C<M> = A //-------------------------------------------------------------------------- if (A_is_bitmap) { //---------------------------------------------------------------------- // A is bitmap, so zombies can be created in C //---------------------------------------------------------------------- int64_t nzombies = 0 ; int tid ; #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (tid = 0 ; tid < M_ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_Mslice [tid] ; int64_t klast = klast_Mslice [tid] ; int64_t task_nzombies = 0 ; //------------------------------------------------------------------ // C<M(:,kfirst:klast)> = A(:,kfirst:klast) //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // find the part of M(:,k) to be operated on by this task //-------------------------------------------------------------- int64_t j = GBH (Mh, k) ; int64_t pM_start, pM_end ; GB_get_pA (&pM_start, &pM_end, tid, k, kfirst, klast, pstart_Mslice, Mp, mvlen) ; //-------------------------------------------------------------- // C<M(:,j)> = A(:,j) //-------------------------------------------------------------- // M is hypersparse or sparse. C is the same as M. // pA points to the start of A(:,j) since A is dense int64_t pA = j * avlen ; for (int64_t pM = pM_start ; pM < pM_end ; pM++) { int64_t i = Mi [pM] ; int64_t p = pA + i ; if (Ab [p]) { // C(i,j) = A(i,j) GB_COPY_A_TO_C (Cx, pM, Ax, p) ; // Cx [pM] = Ax [p] } else { // C(i,j) becomes a zombie task_nzombies++ ; Ci [pM] = GB_FLIP (i) ; } } } nzombies += task_nzombies ; } C->nzombies = nzombies ; } else { //---------------------------------------------------------------------- // A is full, so no zombies will appear in C //---------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(M_nthreads) schedule(dynamic,1) for (tid = 0 ; tid < M_ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_Mslice [tid] ; int64_t klast = klast_Mslice [tid] ; //------------------------------------------------------------------ // C<M(:,kfirst:klast)> = A(:,kfirst:klast) //------------------------------------------------------------------ for (int64_t k = kfirst ; k <= klast ; k++) { //-------------------------------------------------------------- // find the part of M(:,k) to be operated on by this task //-------------------------------------------------------------- int64_t j = GBH (Mh, k) ; int64_t pM_start, pM_end ; GB_get_pA (&pM_start, &pM_end, tid, k, kfirst, klast, pstart_Mslice, Mp, mvlen) ; //-------------------------------------------------------------- // C<M(:,j)> = A(:,j) //-------------------------------------------------------------- // M is hypersparse or sparse. C is the same as M. // pA points to the start of A(:,j) since A is dense int64_t pA = j * avlen ; GB_PRAGMA_SIMD_VECTORIZE for (int64_t pM = pM_start ; pM < pM_end ; pM++) { int64_t p = pA + GBI (Mi, pM, mvlen) ; GB_COPY_A_TO_C (Cx, pM, Ax, p) ; // Cx [pM] = Ax [p] } } } } }
main.c
/************************************************************** The program reads a BMP image file and creates a new image that is the negative or desaturated of the input file. **************************************************************/ // To compile: gcc -c qdbmp.c && gcc -c -fopenmp main.c && gcc -fopenmp qdbmp.o main.o -o main && ./main #include "qdbmp.h" #include <stdio.h> #include <omp.h> typedef enum {desaturate, negative} ImgProcessing ; /* Creates a negative image of the input bitmap file */ int main() { const char* inFile = "okanagan.bmp"; const char* outFile = "okanagan_processed.bmp"; const ImgProcessing processingType = desaturate; //or negative UCHAR r, g, b; UINT width, height; UINT x, y; BMP* bmp; /* Read an image file */ bmp = BMP_ReadFile(inFile); BMP_CHECK_ERROR(stdout, -1); /* Get image's dimensions */ width = BMP_GetWidth(bmp); height = BMP_GetHeight(bmp); /* Input number of threads */ int n_threads; printf("Input number of threads:\n"); scanf("%d", &n_threads); if (height%n_threads != 0) { fprintf(stderr, "warning: Recommend number of threads divisible by image height.\n"); } int seg_lenth = height / n_threads; double t = omp_get_wtime(); omp_set_nested(1); /* Iterate through all the image's pixels */ #pragma omp parallel for num_threads(n_threads) private(r,g,b,x,y) for (x = 0; x < width; ++x) { #pragma omp parallel for num_threads(n_threads) private(r,g,b,x,y) for (y = 0; y < height; ++y) { /* Get pixel's RGB values */ BMP_GetPixelRGB(bmp, x, y, &r, &g, &b); /* Write new RGB values */ if(processingType == negative) BMP_SetPixelRGB(bmp, x, y, 255 - r, 255 - g, 255 - b); else if(processingType == desaturate){ UCHAR gray = r * 0.3 + g * 0.59 + b * 0.11; BMP_SetPixelRGB(bmp, x, y, gray, gray, gray); } } } /* calculate and print processing time*/ t = 1000 * (omp_get_wtime() - t); printf("Finished image processing in %.1f ms.", t); /* Save result */ BMP_WriteFile(bmp, outFile); BMP_CHECK_ERROR(stdout, -2); /* Free all memory allocated for the image */ BMP_Free(bmp); return 0; } /* num_threads no_nest_exe_time nest_exe_time 2 265.7ms 267.4ms 4 242.2ms 993.3ms 8 211.7ms 451.4ms 16 153.1ms 912.5ms 24 151.5ms 1444.5ms Nested solution is not better as it adds overheads. */
dis-ok.c
#include <omp.h> #include <stdio.h> /* for(i=4;i<100;i++){ S1: a[i] = b[i-2] + 1; S2: c[i] = b[i-1] + f[i]; S3: b[i] = a[i-1] + 2; S4: d[i] = d[i+1] + b[i-1]; } */ #define Iter 10000 int a[Iter],b[Iter],c[Iter],d[Iter],f[Iter]; int a1[Iter],b1[Iter],c1[Iter],d1[Iter],f1[Iter]; int main() { int i; int old_d[Iter]; // duplicating array-d to avoid // anti-dependency for(i=0;i<Iter;i++) a[i]=b[i]=c[i]=d[i]=f[i]=old_d[i]=i; for(i=0;i<Iter;i++) a1[i]=b1[i]=c1[i]=d1[i]=f1[i]=i; for(i=4;i<Iter;i++){ a1[i] = b1[i-2] + 1; c1[i] = b1[i-1] + f1[i]; b1[i] = a1[i-1] + 2; d1[i] = d1[i+1] + b1[i-1]; } /* The sequencial Loop below containing Statement S1 and S3 */ for(i=4;i<Iter;i++){ a[i] = b[i-2] + 1; // S1 b[i] = a[i-1] + 2; // S3 } /* The Parallel Loop below only containing both Statement S2 and S4*/ #pragma omp parallel for shared(c,b,f,d,old_d) private(i) for(i=4;i<Iter;i++) { c[i] = b[i-1] + f[i] ; // S2 d[i] = old_d[i+1] + b[i-1] ; // S4 } /* The Parallel Loop below only containing Statement S4 */ //#pragma omp parallel for shared(d,b) private(i) // for(i=4;i<Iter;i++) // d[i] = old_d[i+1] + b[i-1] ; // S4 for(i=4;i<Iter;i++) { if ( a[i]!=a1[i]) printf("a[%d] = %d , a1[%d] = %d\n",i,a[i],i,a1[i]); if ( b[i]!=b1[i]) printf("b[%d] = %d , b1[%d] = %d\n",i,b[i],i,b1[i]); if ( c[i]!=c1[i]) printf("c[%d] = %d , c1[%d] = %d\n",i,c[i],i,c1[i]); if ( d[i]!=d1[i]) printf("d[%d] = %d , d1[%d] = %d\n",i,d[i],i,d1[i]); } return 0; }
nanopore_hdp.c
// // nanopore_hdp.c // // // Created by Jordan Eizenga on 1/8/16. // // // in 0-based index #define ALIGNMENT_KMER_COL 9 #define ALIGNMENT_STRAND_COL 4 #define ALIGNMENT_SIGNAL_COL 13 #define NUM_ALIGNMENT_COLS 15 #define MODEL_ROW_HEADER_LENGTH 0 #define MODEL_MEAN_ENTRY 0 #define MODEL_NOISE_ENTRY 1 #define MODEL_ENTRY_LENGTH 5 #include <stdio.h> #include <stdbool.h> #include <stdlib.h> #include <string.h> #include <inttypes.h> #include "pairwiseAligner.h" #include "hdp_math_utils.h" NanoporeHDP* package_nanopore_hdp(HierarchicalDirichletProcess* hdp, const char* alphabet, int64_t alphabet_size, int64_t kmer_length) { NanoporeHDP* nhdp = (NanoporeHDP*) malloc(sizeof(NanoporeHDP)); // copy and sort alphabet char* internal_alphabet = (char*) malloc(sizeof(char) * (alphabet_size + 1)); for (int64_t i = 0; i < alphabet_size; i++) { internal_alphabet[i] = alphabet[i]; } int64_t min_idx; char temp; for (int64_t i = 0; i < alphabet_size; i++) { min_idx = i; for (int64_t j = i + 1; j < alphabet_size; j++) { if (internal_alphabet[j] < internal_alphabet[min_idx]) { min_idx = j; } } temp = internal_alphabet[i]; internal_alphabet[i] = internal_alphabet[min_idx]; internal_alphabet[min_idx] = temp; } for (int64_t i = 1; i < alphabet_size; i++) { if (alphabet[i - 1] == alphabet[i]) { fprintf(stderr, "Characters of alphabet must be distinct.\n"); exit(EXIT_FAILURE); } } internal_alphabet[alphabet_size] = '\0'; nhdp->hdp = hdp; nhdp->alphabet = internal_alphabet; nhdp->alphabet_size = alphabet_size; nhdp->kmer_length = kmer_length; // note: destroying the HDP housed in the NHDP will destroy the DistributionMetricMemo nhdp->distr_metric_memos = stSet_construct2(&free); return nhdp; } void destroy_nanopore_hdp(NanoporeHDP* nhdp) { destroy_hier_dir_proc(nhdp->hdp); stSet_destruct(nhdp->distr_metric_memos); free(nhdp->alphabet); free(nhdp); } int64_t get_nanopore_hdp_kmer_length(NanoporeHDP* nhdp) { return nhdp->kmer_length; } int64_t get_nanopore_hdp_alphabet_size(NanoporeHDP* nhdp) { return nhdp->alphabet_size; } char* get_nanopore_hdp_alphabet(NanoporeHDP* nhdp) { char* alphabet = nhdp->alphabet; int64_t alphabet_size = nhdp->alphabet_size; char* copy = (char*) malloc(sizeof(char) * (alphabet_size + 1)); for (int64_t i = 0; i < alphabet_size; i++) { copy[i] = alphabet[i]; } copy[alphabet_size] = '\0'; return copy; } // wrappers void execute_nhdp_gibbs_sampling(NanoporeHDP* nhdp, int64_t num_samples, int64_t burn_in, int64_t thinning, bool verbose) { execute_gibbs_sampling(nhdp->hdp, num_samples, burn_in, thinning, verbose); } void execute_nhdp_gibbs_sampling_with_snapshots(NanoporeHDP* nhdp, int64_t num_samples, int64_t burn_in, int64_t thinning, void (*snapshot_func)(HierarchicalDirichletProcess*, void*), void* snapshot_func_args, bool verbose) { execute_gibbs_sampling_with_snapshots(nhdp->hdp, num_samples, burn_in, thinning, snapshot_func, snapshot_func_args, verbose); } void finalize_nhdp_distributions(NanoporeHDP* nhdp) { finalize_distributions(nhdp->hdp); } void normal_inverse_gamma_params_from_minION(const char* model_filepath, double* mu_out, double* nu_out, double* alpha_out, double* beta_out) { // model format: // stateNumber \t alphabetSize \t alphabet \t kmerSize // [level_mean, level_stdv, noise_mean, noise_stdv, noise_lambda] FILE* model_file = fopen(model_filepath, "r"); char* line = stFile_getLineFromFile(model_file); stList* tokens = stString_split(line); if (stList_length(tokens) != 4) { st_errAbort("normal_inverse_gamma_params_from_minION: Model format has changed invalid model" "found here %s\n", model_filepath); } free(line); stList_destruct(tokens); // ignore transitions line line = stFile_getLineFromFile(model_file); tokens = stString_split(line); if (stList_length(tokens) != 10) { st_errnoAbort("More than 3-state hmm transitions parameters found\n"); } line = stFile_getLineFromFile(model_file); tokens = stString_split(line); int64_t table_length = (stList_length(tokens) - MODEL_ROW_HEADER_LENGTH) / MODEL_ENTRY_LENGTH; double* means = (double*) malloc(sizeof(double) * table_length); double* precisions = (double*) malloc(sizeof(double) * table_length); int64_t mean_offset = MODEL_ROW_HEADER_LENGTH + MODEL_MEAN_ENTRY; // 1 int64_t noise_offset = MODEL_ROW_HEADER_LENGTH + MODEL_NOISE_ENTRY; // 2 char* mean_str; char* noise_str; double noise; for (int i = 0; i < table_length; i++) { mean_str = (char*) stList_get(tokens, mean_offset + i * MODEL_ENTRY_LENGTH); sscanf(mean_str, "%lf", &(means[i])); noise_str = (char*) stList_get(tokens, noise_offset + i * MODEL_ENTRY_LENGTH); sscanf(noise_str, "%lf", &noise); precisions[i] = 1.0 / (noise * noise); } free(line); stList_destruct(tokens); mle_normal_inverse_gamma_params(means, precisions, table_length, mu_out, nu_out, alpha_out, beta_out); free(means); free(precisions); fclose(model_file); } // fixed concentration parameters 'gamma' for each depth HierarchicalDirichletProcess* minION_hdp(int64_t num_dps, int64_t depth, double* gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double mu, nu, alpha, beta; normal_inverse_gamma_params_from_minION(model_filepath, &mu, &nu, &alpha, &beta); return new_hier_dir_proc(num_dps, depth, gamma, sampling_grid_start, sampling_grid_stop, sampling_grid_length, mu, nu, alpha, beta); } // Gamma distribution prior on the concentration parameters 'gamma' // must designate vector of 'alpha' and 'beta' parameters of distribution for each depth HierarchicalDirichletProcess* minION_hdp_2(int64_t num_dps, int64_t depth, double* gamma_alpha, double* gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double mu, nu, alpha, beta; normal_inverse_gamma_params_from_minION(model_filepath, &mu, &nu, &alpha, &beta); return new_hier_dir_proc_2(num_dps, depth, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, mu, nu, alpha, beta); } void update_nhdp_from_alignment(NanoporeHDP* nhdp, const char* alignment_filepath, bool has_header) { update_nhdp_from_alignment_with_filter(nhdp, alignment_filepath, has_header, NULL); } void update_nhdp_from_alignment_with_filter(NanoporeHDP* nhdp, const char* alignment_filepath, bool has_header, const char* strand_filter) { stList* signal_list = stList_construct3(0, &free); stList* dp_id_list = stList_construct3(0, &free); FILE* align_file = fopen(alignment_filepath, "r"); if (align_file == NULL) { fprintf(stderr, "Alignment %s file does not exist.\n", alignment_filepath); exit(EXIT_FAILURE); } stList* tokens; int64_t line_length; char* kmer; char* strand; char* signal_str; int64_t* dp_id_ptr; double* signal_ptr; bool warned = false; int proceed = 0; char* line = stFile_getLineFromFile(align_file); if (has_header) { line = stFile_getLineFromFile(align_file); } while (line != NULL) { tokens = stString_split(line); line_length = stList_length(tokens); if (!warned) { if (line_length != NUM_ALIGNMENT_COLS) { fprintf(stderr, "Input format has changed from design period, HDP may receive incorrect data.\n"); warned = true; } } strand = (char*) stList_get(tokens, ALIGNMENT_STRAND_COL); if (strand_filter != NULL) { proceed = strcmp(strand, strand_filter); } if (proceed == 0) { signal_str = (char*) stList_get(tokens, ALIGNMENT_SIGNAL_COL); kmer = (char*) stList_get(tokens, ALIGNMENT_KMER_COL); signal_ptr = (double*) malloc(sizeof(double)); dp_id_ptr = (int64_t*) malloc(sizeof(int64_t)); sscanf(signal_str, "%lf", signal_ptr); *dp_id_ptr = kmer_id(kmer, nhdp->alphabet, nhdp->alphabet_size, nhdp->kmer_length); stList_append(signal_list, signal_ptr); stList_append(dp_id_list, dp_id_ptr); } stList_destruct(tokens); free(line); line = stFile_getLineFromFile(align_file); } fclose(align_file); int64_t data_length; double* signal = stList_toDoublePtr(signal_list, &data_length); int64_t* dp_ids = stList_toIntPtr(dp_id_list, &data_length); stList_destruct(signal_list); stList_destruct(dp_id_list); reset_hdp_data(nhdp->hdp); pass_data_to_hdp(nhdp->hdp, signal, dp_ids, data_length); } // n^k int64_t power(int64_t n, int64_t k) { int64_t num = 1; for (int64_t i = 0; i < k; i++) { num *= n; } return num; } // ((n k)) int64_t multiset_number(int64_t n, int64_t k) { int64_t num = 1; for (int64_t m = n + k - 1; m >= n; m--) { num *= m; } for (int64_t m = k; m >= 2; m--) { num /= m; } return num; } int64_t* get_word(int64_t word_id, int64_t alphabet_size, int64_t word_length) { int64_t* word = (int64_t*) malloc(sizeof(int64_t) * word_length); int64_t id_remainder = word_id; for (int64_t i = 0; i < word_length; i++) { word[word_length - i - 1] = id_remainder % alphabet_size; id_remainder /= alphabet_size; } return word; } int64_t* get_word_multiset(int64_t word_id, int64_t alphabet_size, int64_t word_length) { int64_t* multiset = get_word(word_id, alphabet_size, word_length); // selection sort 'cause whatever int64_t min_idx; int64_t temp; for (int64_t i = 0; i < word_length; i++) { min_idx = i; for (int64_t j = i + 1; j < word_length; j++) { if (multiset[j] < multiset[min_idx]) { min_idx = j; } } temp = multiset[i]; multiset[i] = multiset[min_idx]; multiset[min_idx] = temp; } return multiset; } int64_t multiset_id_internal(int64_t* tail, int64_t tail_length, int64_t alphabet_min, int64_t alphabet_size) { int64_t head = tail[0]; if (tail_length == 1) { return head - alphabet_min; } int64_t step = 0; for (int64_t i = alphabet_min; i < alphabet_size; i++) { if (head > i) { step += multiset_number(alphabet_size - i, tail_length - 1); } else { return step + multiset_id_internal(&(tail[1]), tail_length - 1, i, alphabet_size); } } fprintf(stderr, "Character outside alphabet included in multiset\n"); exit(EXIT_FAILURE); } int64_t multiset_id(int64_t* multiset, int64_t length, int64_t alphabet_size) { return multiset_id_internal(multiset, length, 0, alphabet_size); } int64_t word_id_to_multiset_id(int64_t word_id, int64_t alphabet_size, int64_t word_length) { int64_t* multiset = get_word_multiset(word_id, alphabet_size, word_length); int64_t id = multiset_id(multiset, word_length, alphabet_size); free(multiset); return id; } int64_t word_id(int64_t* word, int64_t alphabet_size, int64_t word_length) { int64_t id = 0; int64_t step = 1; for (int64_t i = word_length - 1; i >= 0; i--) { id += step * word[i]; step *= alphabet_size; } return id; } int64_t* kmer_to_word(char* kmer, char* alphabet, int64_t alphabet_size, int64_t kmer_length) { int64_t* word = (int64_t*) malloc(sizeof(int64_t) * kmer_length); for (int64_t i = 0; i < kmer_length; i++) { int64_t j = 0; while (kmer[i] != alphabet[j]) { j++; if (j == alphabet_size) { fprintf(stderr, "[signalAlign] - ERROR: K-mer contains character outside alphabet. " "Got offending kmer is: %s. alphabet is %s kmer length %"PRId64"\n", kmer, alphabet, kmer_length); exit(EXIT_FAILURE); } } word[i] = j; } return word; } int64_t kmer_id(char* kmer, char* alphabet, int64_t alphabet_size, int64_t kmer_length) { int64_t* word = kmer_to_word(kmer, alphabet, alphabet_size, kmer_length); int64_t id = word_id(word, alphabet_size, kmer_length); free(word); return id; } int64_t standard_kmer_id(char* kmer, int64_t kmer_length) { return kmer_id(kmer, "ACGT", 4, kmer_length); } int64_t nhdp_kmer_id(NanoporeHDP* nhdp, char* kmer) { return kmer_id(kmer, nhdp->alphabet, nhdp->alphabet_size, nhdp->kmer_length); } double get_nanopore_kmer_density(NanoporeHDP* nhdp, void *kmer, void *x) { if (kmer == NULL) { return LOG_ZERO; } else { double u = *(double *)x; //return dir_proc_density(nhdp->hdp, *(double *) x, nhdp_kmer_id(nhdp, (char *)kmer)); return dir_proc_density(nhdp->hdp, u, nhdp_kmer_id(nhdp, (char *)kmer)); } } double get_kmer_distr_distance(NanoporeDistributionMetricMemo* memo, char* kmer_1, char* kmer_2) { NanoporeHDP* nhdp = memo->nhdp; return get_dir_proc_distance(memo->memo, nhdp_kmer_id(nhdp, kmer_1), nhdp_kmer_id(nhdp, kmer_2)); } NanoporeDistributionMetricMemo* package_nanopore_metric_memo(NanoporeHDP* nhdp, DistributionMetricMemo* memo) { NanoporeDistributionMetricMemo* nanopore_memo = (NanoporeDistributionMetricMemo*) malloc(sizeof(NanoporeDistributionMetricMemo)); nanopore_memo->nhdp = nhdp; nanopore_memo->memo = memo; return nanopore_memo; } NanoporeDistributionMetricMemo* new_nhdp_kl_divergence_memo(NanoporeHDP* nhdp) { return package_nanopore_metric_memo(nhdp, new_kl_divergence_memo(nhdp->hdp)); } NanoporeDistributionMetricMemo* new_nhdp_hellinger_distance_memo(NanoporeHDP* nhdp) { return package_nanopore_metric_memo(nhdp, new_hellinger_distance_memo(nhdp->hdp)); } NanoporeDistributionMetricMemo* new_nhdp_l2_distance_memo(NanoporeHDP* nhdp) { return package_nanopore_metric_memo(nhdp, new_l2_distance_memo(nhdp->hdp)); } NanoporeDistributionMetricMemo* new_nhdp_shannon_jensen_distance_memo(NanoporeHDP* nhdp) { return package_nanopore_metric_memo(nhdp, new_shannon_jensen_distance_memo(nhdp->hdp)); } double compare_nhdp_distrs_kl_divergence(NanoporeHDP* nhdp_1, char* kmer_1, NanoporeHDP* nhdp_2, char* kmer_2) { return compare_hdp_distrs_kl_divergence(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1), nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2)); } double compare_nhdp_distrs_l2_distance(NanoporeHDP* nhdp_1, char* kmer_1, NanoporeHDP* nhdp_2, char* kmer_2) { return compare_hdp_distrs_l2_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1), nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2)); } double compare_nhdp_distrs_shannon_jensen_distance(NanoporeHDP* nhdp_1, char* kmer_1, NanoporeHDP* nhdp_2, char* kmer_2) { return compare_hdp_distrs_shannon_jensen_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1), nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2)); } double compare_nhdp_distrs_hellinger_distance(NanoporeHDP* nhdp_1, char* kmer_1, NanoporeHDP* nhdp_2, char* kmer_2) { return compare_hdp_distrs_hellinger_distance(nhdp_1->hdp, nhdp_kmer_id(nhdp_1, kmer_1), nhdp_2->hdp, nhdp_kmer_id(nhdp_2, kmer_2)); } double kmer_distr_expected_val(NanoporeHDP* nhdp, char* kmer) { return dir_proc_expected_val(nhdp->hdp, nhdp_kmer_id(nhdp, kmer)); } double kmer_distr_variance(NanoporeHDP* nhdp, char* kmer) { return dir_proc_variance(nhdp->hdp, nhdp_kmer_id(nhdp, kmer)); } int64_t flat_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); return num_leaves + 1; } void flat_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) { int64_t last_dp_id = power(alphabet_size, kmer_length); for (int64_t id = 0; id < last_dp_id; id++) { set_dir_proc_parent(hdp, id, last_dp_id); } } NanoporeHDP* flat_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_params = (double*) malloc(sizeof(double) * 2); gamma_params[0] = base_gamma; gamma_params[1] = leaf_gamma; int64_t num_dps = flat_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 2, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); flat_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } NanoporeHDP* flat_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_alpha = (double*) malloc(sizeof(double) * 2); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = leaf_gamma_alpha; double* gamma_beta = (double*) malloc(sizeof(double) * 2); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = leaf_gamma_beta; int64_t num_dps = flat_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 2, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); flat_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } int64_t multiset_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = multiset_number(alphabet_size, kmer_length); return num_leaves + num_middle_dps + 1; } void multiset_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = multiset_number(alphabet_size, kmer_length); // set kmer parents to multisets int64_t multiset_id; for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) { multiset_id = word_id_to_multiset_id(kmer_id, alphabet_size, kmer_length); set_dir_proc_parent(hdp, kmer_id, num_leaves + multiset_id); } // set multiset parents to base dp int64_t last_dp_id = num_leaves + num_middle_dps; for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) { set_dir_proc_parent(hdp, middle_dp_id, last_dp_id); } } NanoporeHDP* multiset_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma, double middle_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_params = (double*) malloc(sizeof(double) * 3); gamma_params[0] = base_gamma; gamma_params[1] = middle_gamma; gamma_params[2] = leaf_gamma; int64_t num_dps = multiset_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); multiset_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } NanoporeHDP* multiset_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha, double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_alpha = (double*) malloc(sizeof(double) * 3); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = middle_gamma_alpha; gamma_alpha[2] = leaf_gamma_alpha; double* gamma_beta = (double*) malloc(sizeof(double) * 3); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = middle_gamma_beta; gamma_beta[2] = leaf_gamma_beta; int64_t num_dps = multiset_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); multiset_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } int64_t middle_2_nts_hdp_num_dps(int64_t alphabet_size, int64_t kmer_length) { if (kmer_length <= 2) { fprintf(stderr, "k-mer is not long enough for middle 2 nucleotides HDP\n"); exit(EXIT_FAILURE); } return power(alphabet_size, kmer_length) + power(alphabet_size, 2) + 1; } int64_t kmer_id_to_middle_nts_id(int64_t kmer_id, int64_t alphabet_size, int64_t kmer_length) { int64_t* kmer = get_word(kmer_id, alphabet_size, kmer_length); int64_t id = alphabet_size * kmer[kmer_length / 2 - 1] + kmer[kmer_length / 2]; free(kmer); return id; } void middle_2_nts_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = power(alphabet_size, 2); int64_t middle_dp_id; for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) { middle_dp_id = kmer_id_to_middle_nts_id(kmer_id, alphabet_size, kmer_length); set_dir_proc_parent(hdp, kmer_id, middle_dp_id + num_leaves); } int64_t last_dp_id = num_leaves + num_middle_dps; for (int64_t id = num_leaves; id < last_dp_id; id++) { set_dir_proc_parent(hdp, id, last_dp_id); } } NanoporeHDP* middle_2_nts_hdp_model(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma, double middle_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { if (kmer_length % 2 != 0) { fprintf(stderr, "Warning: middle two nucleotides of odd length kmer is ambiguous. Resolving arbitrarily.\n"); } double* gamma_params = (double*) malloc(sizeof(double) * 3); gamma_params[0] = base_gamma; gamma_params[1] = middle_gamma; gamma_params[2] = leaf_gamma; int64_t num_dps = middle_2_nts_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); middle_2_nts_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } int64_t word_id_to_group_multiset_id(int64_t word_id, int64_t* char_groups, int64_t alphabet_size, int64_t word_length, int64_t num_groups) { int64_t* word = get_word(word_id, alphabet_size, word_length); for (int64_t i = 0; i < word_length; i++) { word[i] = char_groups[word[i]]; } int64_t min_idx; int64_t temp; for (int64_t i = 0; i < word_length; i++) { min_idx = i; for (int64_t j = i + 1; j < word_length; j++) { if (word[j] < word[min_idx]) { min_idx = j; } } temp = word[i]; word[i] = word[min_idx]; word[min_idx] = temp; } int64_t id = multiset_id(word, word_length, num_groups); free(word); return id; } int64_t group_multiset_hdp_num_dps(int64_t alphabet_size, int64_t* char_groups, int64_t kmer_length) { int64_t num_groups = 0; for (int64_t i = 0; i < alphabet_size; i++) { if (char_groups[i] + 1 > num_groups) { num_groups = char_groups[i] + 1; } } int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = multiset_number(num_groups, kmer_length); return num_leaves + num_middle_dps + 1; } void group_multiset_hdp_model_internal(HierarchicalDirichletProcess* hdp, int64_t* char_groups, int64_t alphabet_size, int64_t kmer_length) { int64_t num_groups = 0; for (int64_t i = 0; i < alphabet_size; i++) { if (char_groups[i] + 1 > num_groups) { num_groups = char_groups[i] + 1; } } int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = multiset_number(num_groups, kmer_length); // set kmer parents to multisets int64_t multiset_id; for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) { multiset_id = word_id_to_group_multiset_id(kmer_id, char_groups, alphabet_size, kmer_length, num_groups); set_dir_proc_parent(hdp, kmer_id, num_leaves + multiset_id); } // set multiset parents to base dp int64_t last_dp_id = num_leaves + num_middle_dps; for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) { set_dir_proc_parent(hdp, middle_dp_id, last_dp_id); } } void confirm_valid_groupings(int64_t* char_groups, int64_t alphabet_size) { for (int64_t i = 0; i < alphabet_size; i++) { if (char_groups[i] < 0) { fprintf(stderr, "Group numbers must be non-negative.\n"); exit(EXIT_FAILURE); } } int64_t num_groups = 0; for (int64_t i = 0; i < alphabet_size; i++) { if (char_groups[i] + 1 > num_groups) { num_groups = char_groups[i] + 1; } } for (int64_t i = 0; i < num_groups; i++) { bool found_group = false; for (int64_t j = 0; j < alphabet_size; j++) { if (char_groups[j] == i) { found_group = true; break; } } if (!found_group) { fprintf(stderr, "Groups must be consecutively numbered starting with 0.\n"); exit(EXIT_FAILURE); } } } int64_t* alphabet_sort_groups(const char* alphabet, int64_t* char_groups, int64_t alphabet_size) { char* aux_alphabet = (char*) malloc(sizeof(char) * alphabet_size); int64_t* sorted_char_groups = (int64_t*) malloc(sizeof(int64_t) * alphabet_size); for (int64_t i = 0; i < alphabet_size; i++) { aux_alphabet[i] = alphabet[i]; sorted_char_groups[i] = char_groups[i]; } int64_t temp_group; char temp_char; int64_t min_idx; for (int64_t i = 0; i < alphabet_size; i++) { min_idx = i; for (int64_t j = i + 1; j < alphabet_size; j++) { if (aux_alphabet[j] < aux_alphabet[min_idx]) { min_idx = j; } } temp_char = aux_alphabet[i]; aux_alphabet[i] = aux_alphabet[min_idx]; aux_alphabet[min_idx] = temp_char; temp_group = sorted_char_groups[i]; sorted_char_groups[i] = sorted_char_groups[min_idx]; sorted_char_groups[min_idx] = temp_group; } free(aux_alphabet); return sorted_char_groups; } // assumes char_groups are 0-based and consecutively numbered NanoporeHDP* group_multiset_hdp_model(const char* alphabet, int64_t* char_groups, int64_t alphabet_size, int64_t kmer_length, double base_gamma, double middle_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { confirm_valid_groupings(char_groups, alphabet_size); double* gamma_params = (double*) malloc(sizeof(double) * 3); gamma_params[0] = base_gamma; gamma_params[1] = middle_gamma; gamma_params[2] = leaf_gamma; int64_t num_dps = group_multiset_hdp_num_dps(alphabet_size, char_groups, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); int64_t* sorted_char_groups = alphabet_sort_groups(alphabet, char_groups, alphabet_size); group_multiset_hdp_model_internal(hdp, sorted_char_groups, alphabet_size, kmer_length); free(sorted_char_groups); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } // assumes char_groups are 0-based and consecutively numbered NanoporeHDP* group_multiset_hdp_model_2(const char* alphabet, int64_t* char_groups, int64_t alphabet_size, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha, double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { confirm_valid_groupings(char_groups, alphabet_size); double *gamma_alpha = (double *) malloc(sizeof(double) * 3); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = middle_gamma_alpha; gamma_alpha[2] = leaf_gamma_alpha; double *gamma_beta = (double *) malloc(sizeof(double) * 3); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = middle_gamma_beta; gamma_beta[2] = leaf_gamma_beta; int64_t num_dps = group_multiset_hdp_num_dps(alphabet_size, char_groups, kmer_length); HierarchicalDirichletProcess *hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); int64_t *sorted_char_groups = alphabet_sort_groups(alphabet, char_groups, alphabet_size); group_multiset_hdp_model_internal(hdp, sorted_char_groups, alphabet_size, kmer_length); free(sorted_char_groups); finalize_hdp_structure(hdp); NanoporeHDP *nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } NanoporeHDP* middle_2_nts_hdp_model_2(const char* alphabet, int64_t alphabet_size, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha, double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { if (kmer_length % 2 != 0) { fprintf(stderr, "Warning: middle 2 nucleotides of odd length kmer is ambiguous. Resolving arbitrarily.\n"); } double* gamma_alpha = (double*) malloc(sizeof(double) * 3); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = middle_gamma_alpha; gamma_alpha[2] = leaf_gamma_alpha; double* gamma_beta = (double*) malloc(sizeof(double) * 3); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = middle_gamma_beta; gamma_beta[2] = leaf_gamma_beta; int64_t num_dps = middle_2_nts_hdp_num_dps(alphabet_size, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); middle_2_nts_hdp_model_internal(hdp, alphabet_size, kmer_length); finalize_hdp_structure(hdp); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); return nhdp; } int64_t purine_composition_hdp_num_dps(int64_t num_purines, int64_t num_pyrimidines, int64_t kmer_length) { int64_t num_leaves = power(num_purines + num_pyrimidines, kmer_length); int64_t num_middle_dps = kmer_length + 1; return num_leaves + num_middle_dps + 1; } void purine_composition_hdp_model_internal(HierarchicalDirichletProcess* hdp, bool* purine_alphabet, int64_t alphabet_size, int64_t kmer_length) { int64_t num_leaves = power(alphabet_size, kmer_length); int64_t num_middle_dps = kmer_length + 1; // set kmer parents to purine multisets int64_t num_purines; int64_t* word; for (int64_t kmer_id = 0; kmer_id < num_leaves; kmer_id++) { word = get_word(kmer_id, alphabet_size, kmer_length); num_purines = 0; for (int64_t i = 0; i < kmer_length; i++) { if (purine_alphabet[word[i]]) { num_purines++; } } free(word); set_dir_proc_parent(hdp, kmer_id, num_leaves + num_purines); } // set purine set parents to base dp int64_t last_dp_id = num_leaves + num_middle_dps; for (int64_t middle_dp_id = num_leaves; middle_dp_id < last_dp_id; middle_dp_id++) { set_dir_proc_parent(hdp, middle_dp_id, last_dp_id); } } NanoporeHDP* purine_composition_hdp_model(char* purine_alphabet, int64_t num_purines, char* pyrimidine_alphabet, int64_t num_pyrimidines, int64_t kmer_length, double base_gamma, double middle_gamma, double leaf_gamma, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_params = (double*) malloc(sizeof(double) * 3); gamma_params[0] = base_gamma; gamma_params[1] = middle_gamma; gamma_params[2] = leaf_gamma; int64_t num_dps = purine_composition_hdp_num_dps(num_purines, num_pyrimidines, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp(num_dps, 3, gamma_params, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); int64_t alphabet_size = num_purines + num_pyrimidines; char* alphabet = (char*) malloc(sizeof(char) * alphabet_size); for (int64_t i = 0; i < num_purines; i++) { alphabet[i] = purine_alphabet[i]; } for (int64_t i = 0; i < num_pyrimidines; i++) { alphabet[i + num_purines] = pyrimidine_alphabet[i]; } NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); // get back the alphabet in the internal ordering free(alphabet); alphabet = get_nanopore_hdp_alphabet(nhdp); bool* purines = (bool*) malloc(sizeof(bool) * alphabet_size); for (int64_t i = 0; i < num_purines; i++) { purines[i] = false; for (int64_t j = 0; j < num_purines; j++) { if (alphabet[i] == purine_alphabet[j]) { purines[i] = true; break; } } } free(alphabet); purine_composition_hdp_model_internal(hdp, purines, alphabet_size, kmer_length); free(purines); finalize_hdp_structure(hdp); return nhdp; } NanoporeHDP* purine_composition_hdp_model_2(char* purine_alphabet, int64_t num_purines, char* pyrimidine_alphabet, int64_t num_pyrimidines, int64_t kmer_length, double base_gamma_alpha, double base_gamma_beta, double middle_gamma_alpha, double middle_gamma_beta, double leaf_gamma_alpha, double leaf_gamma_beta, double sampling_grid_start, double sampling_grid_stop, int64_t sampling_grid_length, const char* model_filepath) { double* gamma_alpha = (double*) malloc(sizeof(double) * 3); gamma_alpha[0] = base_gamma_alpha; gamma_alpha[1] = middle_gamma_alpha; gamma_alpha[2] = leaf_gamma_alpha; double* gamma_beta = (double*) malloc(sizeof(double) * 3); gamma_beta[0] = base_gamma_beta; gamma_beta[1] = middle_gamma_beta; gamma_beta[2] = leaf_gamma_beta; int64_t num_dps = purine_composition_hdp_num_dps(num_purines, num_pyrimidines, kmer_length); HierarchicalDirichletProcess* hdp = minION_hdp_2(num_dps, 3, gamma_alpha, gamma_beta, sampling_grid_start, sampling_grid_stop, sampling_grid_length, model_filepath); int64_t alphabet_size = num_purines + num_pyrimidines; char* alphabet = (char*) malloc(sizeof(char) * alphabet_size); for (int64_t i = 0; i < num_purines; i++) { alphabet[i] = purine_alphabet[i]; } for (int64_t i = 0; i < num_pyrimidines; i++) { alphabet[i + num_purines] = pyrimidine_alphabet[i]; } NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); // get back the alphabet in the internal ordering free(alphabet); alphabet = get_nanopore_hdp_alphabet(nhdp); bool* purines = (bool*) malloc(sizeof(bool) * alphabet_size); for (int64_t i = 0; i < alphabet_size; i++) { purines[i] = false; for (int64_t j = 0; j < num_purines; j++) { if (alphabet[i] == purine_alphabet[j]) { purines[i] = true; break; } } } free(alphabet); purine_composition_hdp_model_internal(hdp, purines, alphabet_size, kmer_length); free(purines); finalize_hdp_structure(hdp); return nhdp; } void serialize_nhdp(NanoporeHDP* nhdp, const char* filepath) { FILE* out = fopen(filepath, "w"); fprintf(out, "%"PRId64"\n", nhdp->alphabet_size); fprintf(out, "%s\n", nhdp->alphabet); fprintf(out, "%"PRId64"\n", nhdp->kmer_length); serialize_hdp(nhdp->hdp, out); fclose(out); } NanoporeHDP* deserialize_nhdp(const char* filepath) { FILE* in = fopen(filepath, "r"); char* line = stFile_getLineFromFile(in); int64_t alphabet_size; sscanf(line, "%"SCNd64, &alphabet_size); free(line); line = stFile_getLineFromFile(in); char* alphabet = (char*) malloc(sizeof(char) * alphabet_size); sscanf(line, "%s", alphabet); free(line); line = stFile_getLineFromFile(in); int64_t kmer_length; sscanf(line, "%"SCNd64, &kmer_length); free(line); HierarchicalDirichletProcess* hdp = deserialize_hdp(in); fclose(in); NanoporeHDP* nhdp = package_nanopore_hdp(hdp, alphabet, alphabet_size, kmer_length); free(alphabet); return nhdp; } static void nanoporeHdp_checkThreeLevelPriorParameters(double baseGammaAlpha, double baseGammaBeta, double middleGammaAlpha, double middleGammaBeta, double leafGammaAlpha, double leafGammaBeta) { if ((baseGammaAlpha == NULL_HYPERPARAMETER) || (baseGammaBeta == NULL_HYPERPARAMETER) || (middleGammaAlpha == NULL_HYPERPARAMETER) || (middleGammaBeta == NULL_HYPERPARAMETER) || (leafGammaAlpha == NULL_HYPERPARAMETER) || (leafGammaBeta == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a alphas and betas for the base, middle, " "and the leaf distributions for the prior for this NanoporeHdp"); } } static void nanoporeHdp_checkThreeLevelFixedParameters(double baseGamma, double middleGamma, double leafGamma) { if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER) || (middleGamma == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma, middle gamma, and leaf gamma " "for this NanoporeHdpType\n"); } } static void nanoporeHdp_checkTwoLevelPriorParameters(double baseGammaAlpha, double baseGammaBeta, double leafGammaAlpha, double leafGammaBeta) { if ((baseGammaAlpha == NULL_HYPERPARAMETER) || (baseGammaBeta == NULL_HYPERPARAMETER) || (leafGammaAlpha == NULL_HYPERPARAMETER) || (leafGammaBeta == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a alphas and betas for the base and the leaf" "distributions for the prior for this NanoporeHdp"); } } static NanoporeHDP *loadNanoporeHdpFromScratch(NanoporeHdpType nHdpType, const char *modelFile, int64_t kmerLength, double baseGamma, double middleGamma, double leafGamma, double baseGammaAlpha, double baseGammaBeta, double middleGammaAlpha, double middleGammaBeta, double leafGammaAlpha, double leafGammaBeta, double samplingGridStart, double samplingGridEnd, int64_t samplingGridLength) { if (nHdpType == singleLevelFixed) { if ((baseGamma == NULL_HYPERPARAMETER) || (leafGamma == NULL_HYPERPARAMETER)) { st_errAbort("loadNanoporeHdpFromScratch: You need to provide a base gamma and leaf gamma " "for this NanoporeHdpType\n"); } NanoporeHDP *nHdp = flat_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelPrior) { nanoporeHdp_checkTwoLevelPriorParameters(baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = flat_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelPrior2) { nanoporeHdp_checkTwoLevelPriorParameters(baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = flat_hdp_model_2(METHYL_CYTOSINE_ALPHA, SYMBOL_NUMBER, kmerLength, baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == singleLevelPriorEcoli) { nanoporeHdp_checkTwoLevelPriorParameters(baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = flat_hdp_model_2(METHYL_CYTOSINE_ADENOSINE_ALPHA, SYMBOL_NUMBER_METHYL_CA, kmerLength, baseGammaAlpha, baseGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == multisetFixed) { nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma); NanoporeHDP *nHdp = multiset_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGamma, middleGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == multisetPrior) { nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = multiset_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == multisetPrior2) { nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = multiset_hdp_model_2(METHYL_CYTOSINE_ALPHA, SYMBOL_NUMBER, kmerLength, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == multisetPriorEcoli) { nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = multiset_hdp_model_2(METHYL_CYTOSINE_ADENOSINE_ALPHA, SYMBOL_NUMBER_METHYL_CA, kmerLength, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == compFixed) { nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma); NanoporeHDP *nHdp = purine_composition_hdp_model(PURINES, 2, PYRIMIDINES, 4, kmerLength, baseGamma, middleGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == compPrior) { nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = purine_composition_hdp_model_2(PURINES, 2, PYRIMIDINES, 4, kmerLength, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == middleNtsFixed) { nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma); NanoporeHDP *nHdp = middle_2_nts_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGamma, middleGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == middleNtsPrior) { nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta); NanoporeHDP *nHdp = middle_2_nts_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == groupMultisetFixed) { nanoporeHdp_checkThreeLevelFixedParameters(baseGamma, middleGamma, leafGamma); // ACEGOT // {0, 1, 1, 2, 1, 3} int64_t groups[6] = {0, 1, 1, 2, 1, 3}; NanoporeHDP *nHdp = group_multiset_hdp_model(METHYL_HYDROXY_CYTOSINE_ALPHA, groups, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGamma, middleGamma, leafGamma, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } if (nHdpType == groupMultisetPrior) { nanoporeHdp_checkThreeLevelPriorParameters(baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta); // ACEGOT // {0, 1, 1, 2, 1, 3} int64_t groups[6] = {0, 1, 1, 2, 1, 3}; NanoporeHDP *nHdp = group_multiset_hdp_model_2(METHYL_HYDROXY_CYTOSINE_ALPHA, groups, SYMBOL_NUMBER_EPIGENETIC_C, kmerLength, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength, modelFile); return nHdp; } else { fprintf(stderr, "loadNanoporeHdpFromScratch: - error making HDP from scratch\n"); exit(EXIT_FAILURE); } } void nanoporeHdp_buildNanoporeHdpFromAlignment(NanoporeHdpType type, int64_t kmerLength, const char *templateModelFile, const char* complementModelFile, const char *alignments, const char *templateHDP, const char *complementHDP, int64_t nbSamples, int64_t burnIn, int64_t thinning, bool verbose, double baseGamma, double middleGamma, double leafGamma, double baseGammaAlpha, double baseGammaBeta, double middleGammaAlpha, double middleGammaBeta, double leafGammaAlpha, double leafGammaBeta, double samplingGridStart, double samplingGridEnd, int64_t samplingGridLength) { fprintf(stderr, "Building Nanopore HDP\n"); #pragma omp parallel sections { { fprintf(stderr, "Updating Template HDP from alignments...\n"); NanoporeHDP *nHdpT = loadNanoporeHdpFromScratch(type, templateModelFile, kmerLength, baseGamma, middleGamma, leafGamma, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength); update_nhdp_from_alignment_with_filter(nHdpT, alignments, FALSE, "t"); fprintf(stderr, "Running Gibbs for template doing %"PRId64"samples, %"PRId64"burn in, %"PRId64"thinning.\n", nbSamples, burnIn, thinning); execute_nhdp_gibbs_sampling(nHdpT, nbSamples, burnIn, thinning, verbose); finalize_nhdp_distributions(nHdpT); fprintf(stderr, "Serializing template to %s...\n", templateHDP); serialize_nhdp(nHdpT, templateHDP); destroy_nanopore_hdp(nHdpT); } #pragma omp section { fprintf(stderr, "Updating Complement HDP from alignments...\n"); NanoporeHDP *nHdpC = loadNanoporeHdpFromScratch(type, complementModelFile, kmerLength, baseGamma, middleGamma, leafGamma, baseGammaAlpha, baseGammaBeta, middleGammaAlpha, middleGammaBeta, leafGammaAlpha, leafGammaBeta, samplingGridStart, samplingGridEnd, samplingGridLength); update_nhdp_from_alignment_with_filter(nHdpC, alignments, FALSE, "c"); fprintf(stderr, "Running Gibbs for complement doing %"PRId64"samples, %"PRId64"burn in, %"PRId64"thinning.\n", nbSamples, burnIn, thinning); execute_nhdp_gibbs_sampling(nHdpC, nbSamples, burnIn, thinning, verbose); finalize_nhdp_distributions(nHdpC); fprintf(stderr, "Serializing complement to %s...\n", complementHDP); serialize_nhdp(nHdpC, complementHDP); destroy_nanopore_hdp(nHdpC); } } }
GB_binop__land_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_int32) // A.*B function (eWiseMult): GB (_AemultB_08__land_int32) // A.*B function (eWiseMult): GB (_AemultB_02__land_int32) // A.*B function (eWiseMult): GB (_AemultB_04__land_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int32) // A*D function (colscale): GB (_AxD__land_int32) // D*A function (rowscale): GB (_DxB__land_int32) // C+=B function (dense accum): GB (_Cdense_accumB__land_int32) // C+=b function (dense accum): GB (_Cdense_accumb__land_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int32) // C=scalar+B GB (_bind1st__land_int32) // C=scalar+B' GB (_bind1st_tran__land_int32) // C=A+scalar GB (_bind2nd__land_int32) // C=A'+scalar GB (_bind2nd_tran__land_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_INT32 || GxB_NO_LAND_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__land_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
OMPIRBuilder.h
//===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the OpenMPIRBuilder class and helpers used as a convenient // way to create LLVM instructions for OpenMP directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/IRBuilder.h" #include "llvm/Support/Allocator.h" #include <forward_list> namespace llvm { class CanonicalLoopInfo; /// An interface to create LLVM-IR for OpenMP directives. /// /// Each OpenMP directive has a corresponding public generator method. class OpenMPIRBuilder { public: /// Create a new OpenMPIRBuilder operating on the given module \p M. This will /// not have an effect on \p M (see initialize). OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {} ~OpenMPIRBuilder(); /// Initialize the internal state, this will put structures types and /// potentially other helpers into the underlying module. Must be called /// before any other method and only once! void initialize(); /// Finalize the underlying module, e.g., by outlining regions. /// \param Fn The function to be finalized. If not used, /// all functions are finalized. void finalize(Function *Fn = nullptr); /// Add attributes known for \p FnID to \p Fn. void addAttributes(omp::RuntimeFunction FnID, Function &Fn); /// Type used throughout for insertion points. using InsertPointTy = IRBuilder<>::InsertPoint; /// Callback type for variable finalization (think destructors). /// /// \param CodeGenIP is the insertion point at which the finalization code /// should be placed. /// /// A finalize callback knows about all objects that need finalization, e.g. /// destruction, when the scope of the currently generated construct is left /// at the time, and location, the callback is invoked. using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>; struct FinalizationInfo { /// The finalization callback provided by the last in-flight invocation of /// createXXXX for the directive of kind DK. FinalizeCallbackTy FiniCB; /// The directive kind of the innermost directive that has an associated /// region which might require finalization when it is left. omp::Directive DK; /// Flag to indicate if the directive is cancellable. bool IsCancellable; }; /// Push a finalization callback on the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void pushFinalizationCB(const FinalizationInfo &FI) { FinalizationStack.push_back(FI); } /// Pop the last finalization callback from the finalization stack. /// /// NOTE: Temporary solution until Clang CG is gone. void popFinalizationCB() { FinalizationStack.pop_back(); } /// Callback type for body (=inner region) code generation /// /// The callback takes code locations as arguments, each describing a /// location at which code might need to be generated or a location that is /// the target of control transfer. /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the body code should be /// placed. /// \param ContinuationBB is the basic block target to leave the body. /// /// Note that all blocks pointed to by the arguments have terminators. using BodyGenCallbackTy = function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationBB)>; // This is created primarily for sections construct as llvm::function_ref // (BodyGenCallbackTy) is not storable (as described in the comments of // function_ref class - function_ref contains non-ownable reference // to the callable. using StorableBodyGenCallbackTy = std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP, BasicBlock &ContinuationBB)>; /// Callback type for loop body code generation. /// /// \param CodeGenIP is the insertion point where the loop's body code must be /// placed. This will be a dedicated BasicBlock with a /// conditional branch from the loop condition check and /// terminated with an unconditional branch to the loop /// latch. /// \param IndVar is the induction variable usable at the insertion point. using LoopBodyGenCallbackTy = function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>; /// Callback type for variable privatization (think copy & default /// constructor). /// /// \param AllocaIP is the insertion point at which new alloca instructions /// should be placed. /// \param CodeGenIP is the insertion point at which the privatization code /// should be placed. /// \param Original The value being copied/created, should not be used in the /// generated IR. /// \param Inner The equivalent of \p Original that should be used in the /// generated IR; this is equal to \p Original if the value is /// a pointer and can thus be passed directly, otherwise it is /// an equivalent but different value. /// \param ReplVal The replacement value, thus a copy or new created version /// of \p Inner. /// /// \returns The new insertion point where code generation continues and /// \p ReplVal the replacement value. using PrivatizeCallbackTy = function_ref<InsertPointTy( InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original, Value &Inner, Value *&ReplVal)>; /// Description of a LLVM-IR insertion point (IP) and a debug/source location /// (filename, line, column, ...). struct LocationDescription { LocationDescription(const IRBuilderBase &IRB) : IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {} LocationDescription(const InsertPointTy &IP) : IP(IP) {} LocationDescription(const InsertPointTy &IP, const DebugLoc &DL) : IP(IP), DL(DL) {} InsertPointTy IP; DebugLoc DL; }; /// Emitter methods for OpenMP directives. /// ///{ /// Generator for '#omp barrier' /// /// \param Loc The location where the barrier directive was encountered. /// \param DK The kind of directive that caused the barrier. /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall = false, bool CheckCancelFlag = true); /// Generator for '#omp cancel' /// /// \param Loc The location where the directive was encountered. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param CanceledDirective The kind of directive that is cancled. /// /// \returns The insertion point after the barrier. InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition, omp::Directive CanceledDirective); /// Generator for '#omp parallel' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param BodyGenCB Callback that will generate the region code. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IfCondition The evaluated 'if' clause expression, if any. /// \param NumThreads The evaluated 'num_threads' clause expression, if any. /// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind). /// \param IsCancellable Flag to indicate a cancellable parallel region. /// /// \returns The insertion position *after* the parallel. IRBuilder<>::InsertPoint createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP, BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, Value *IfCondition, Value *NumThreads, omp::ProcBindKind ProcBind, bool IsCancellable); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// This generator operates on the logical iteration space of the loop, i.e. /// the caller only has to provide a loop trip count of the loop as defined by /// base language semantics. The trip count is interpreted as an unsigned /// integer. The induction variable passed to \p BodyGenCB will be of the same /// type and run from 0 to \p TripCount - 1. It is up to the callback to /// convert the logical iteration variable to the loop counter variable in the /// loop body. /// /// \param Loc The insert and source location description. The insert /// location can be between two instructions or the end of a /// degenerate block (e.g. a BB under construction). /// \param BodyGenCB Callback that will generate the loop body code. /// \param TripCount Number of iterations the loop body is executed. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *TripCount, const Twine &Name = "loop"); /// Generator for the control flow structure of an OpenMP canonical loop. /// /// Instead of a logical iteration space, this allows specifying user-defined /// loop counter values using increment, upper- and lower bounds. To /// disambiguate the terminology when counting downwards, instead of lower /// bounds we use \p Start for the loop counter value in the first body /// iteration. /// /// Consider the following limitations: /// /// * A loop counter space over all integer values of its bit-width cannot be /// represented. E.g using uint8_t, its loop trip count of 256 cannot be /// stored into an 8 bit integer): /// /// DO I = 0, 255, 1 /// /// * Unsigned wrapping is only supported when wrapping only "once"; E.g. /// effectively counting downwards: /// /// for (uint8_t i = 100u; i > 0; i += 127u) /// /// /// TODO: May need to add additional parameters to represent: /// /// * Allow representing downcounting with unsigned integers. /// /// * Sign of the step and the comparison operator might disagree: /// /// for (int i = 0; i < 42; i -= 1u) /// // /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the loop body code. /// \param Start Value of the loop counter for the first iterations. /// \param Stop Loop counter values past this will stop the loop. /// \param Step Loop counter increment after each iteration; negative /// means counting down. /// \param IsSigned Whether Start, Stop and Step are signed integers. /// \param InclusiveStop Whether \p Stop itself is a valid value for the loop /// counter. /// \param ComputeIP Insertion point for instructions computing the trip /// count. Can be used to ensure the trip count is available /// at the outermost loop of a loop nest. If not set, /// defaults to the preheader of the generated loop. /// \param Name Base name used to derive BB and instruction names. /// /// \returns An object representing the created control flow structure which /// can be used for loop-associated directives. CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc, LoopBodyGenCallbackTy BodyGenCB, Value *Start, Value *Stop, Value *Step, bool IsSigned, bool InclusiveStop, InsertPointTy ComputeIP = {}, const Twine &Name = "loop"); /// Collapse a loop nest into a single loop. /// /// Merges loops of a loop nest into a single CanonicalLoopNest representation /// that has the same number of innermost loop iterations as the origin loop /// nest. The induction variables of the input loops are derived from the /// collapsed loop's induction variable. This is intended to be used to /// implement OpenMP's collapse clause. Before applying a directive, /// collapseLoops normalizes a loop nest to contain only a single loop and the /// directive's implementation does not need to handle multiple loops itself. /// This does not remove the need to handle all loop nest handling by /// directives, such as the ordered(<n>) clause or the simd schedule-clause /// modifier of the worksharing-loop directive. /// /// Example: /// \code /// for (int i = 0; i < 7; ++i) // Canonical loop "i" /// for (int j = 0; j < 9; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After collapsing with Loops={i,j}, the loop is changed to /// \code /// for (int ij = 0; ij < 63; ++ij) { /// int i = ij / 9; /// int j = ij % 9; /// body(i, j); /// } /// \endcode /// /// In the current implementation, the following limitations apply: /// /// * All input loops have an induction variable of the same type. /// /// * The collapsed loop will have the same trip count integer type as the /// input loops. Therefore it is possible that the collapsed loop cannot /// represent all iterations of the input loops. For instance, assuming a /// 32 bit integer type, and two input loops both iterating 2^16 times, the /// theoretical trip count of the collapsed loop would be 2^32 iteration, /// which cannot be represented in an 32-bit integer. Behavior is undefined /// in this case. /// /// * The trip counts of every input loop must be available at \p ComputeIP. /// Non-rectangular loops are not yet supported. /// /// * At each nest level, code between a surrounding loop and its nested loop /// is hoisted into the loop body, and such code will be executed more /// often than before collapsing (or not at all if any inner loop iteration /// has a trip count of 0). This is permitted by the OpenMP specification. /// /// \param DL Debug location for instructions added for collapsing, /// such as instructions to compute/derive the input loop's /// induction variables. /// \param Loops Loops in the loop nest to collapse. Loops are specified /// from outermost-to-innermost and every control flow of a /// loop's body must pass through its directly nested loop. /// \param ComputeIP Where additional instruction that compute the collapsed /// trip count. If not set, defaults to before the generated /// loop. /// /// \returns The CanonicalLoopInfo object representing the collapsed loop. CanonicalLoopInfo *collapseLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, InsertPointTy ComputeIP); /// Modifies the canonical loop to be a statically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// TODO: Workshare loops with static scheduling may contain up to two loops /// that fulfill the requirements of an OpenMP canonical loop. One for /// iterating over all iterations of a chunk and another one for iterating /// over all chunks that are executed on the same thread. Returning /// CanonicalLoopInfo objects representing them may eventually be useful for /// the apply clause planned in OpenMP 6.0, but currently whether these are /// canonical loops is irrelevant. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be inserted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier, Value *Chunk = nullptr); /// Modifies the canonical loop to be a dynamically-scheduled workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain, and then in each iteration /// to update the loop counter. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param SchedType Type of scheduling to be passed to the init function. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// \param Chunk The size of loop chunk considered as a unit when /// scheduling. If \p nullptr, defaults to 1. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, omp::OMPScheduleType SchedType, bool NeedsBarrier, Value *Chunk = nullptr); /// Modifies the canonical loop to be a workshare loop. /// /// This takes a \p LoopInfo representing a canonical loop, such as the one /// created by \p createCanonicalLoop and emits additional instructions to /// turn it into a workshare loop. In particular, it calls to an OpenMP /// runtime function in the preheader to obtain the loop bounds to be used in /// the current thread, updates the relevant instructions in the canonical /// loop and calls to an OpenMP runtime finalization function after the loop. /// /// \param DL Debug location for instructions added for the /// workshare-loop construct itself. /// \param CLI A descriptor of the canonical loop to workshare. /// \param AllocaIP An insertion point for Alloca instructions usable in the /// preheader of the loop. /// \param NeedsBarrier Indicates whether a barrier must be insterted after /// the loop. /// /// \returns Point where to insert code after the workshare construct. InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI, InsertPointTy AllocaIP, bool NeedsBarrier); /// Tile a loop nest. /// /// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in /// \p/ Loops must be perfectly nested, from outermost to innermost loop /// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value /// of every loop and every tile sizes must be usable in the outermost /// loop's preheader. This implies that the loop nest is rectangular. /// /// Example: /// \code /// for (int i = 0; i < 15; ++i) // Canonical loop "i" /// for (int j = 0; j < 14; ++j) // Canonical loop "j" /// body(i, j); /// \endcode /// /// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to /// \code /// for (int i1 = 0; i1 < 3; ++i1) /// for (int j1 = 0; j1 < 2; ++j1) /// for (int i2 = 0; i2 < 5; ++i2) /// for (int j2 = 0; j2 < 7; ++j2) /// body(i1*3+i2, j1*3+j2); /// \endcode /// /// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are /// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also /// handles non-constant trip counts, non-constant tile sizes and trip counts /// that are not multiples of the tile size. In the latter case the tile loop /// of the last floor-loop iteration will have fewer iterations than specified /// as its tile size. /// /// /// @param DL Debug location for instructions added by tiling, for /// instance the floor- and tile trip count computation. /// @param Loops Loops to tile. The CanonicalLoopInfo objects are /// invalidated by this method, i.e. should not used after /// tiling. /// @param TileSizes For each loop in \p Loops, the tile size for that /// dimensions. /// /// \returns A list of generated loops. Contains twice as many loops as the /// input loop nest; the first half are the floor loops and the /// second half are the tile loops. std::vector<CanonicalLoopInfo *> tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops, ArrayRef<Value *> TileSizes); /// Fully unroll a loop. /// /// Instead of unrolling the loop immediately (and duplicating its body /// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop /// metadata. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop); /// Fully or partially unroll a loop. How the loop is unrolled is determined /// using LLVM's LoopUnrollPass. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop); /// Partially unroll a loop. /// /// The CanonicalLoopInfo of the unrolled loop for use with chained /// loop-associated directive can be requested using \p UnrolledCLI. Not /// needing the CanonicalLoopInfo allows more efficient code generation by /// deferring the actual unrolling to the LoopUnrollPass using loop metadata. /// A loop-associated directive applied to the unrolled loop needs to know the /// new trip count which means that if using a heuristically determined unroll /// factor (\p Factor == 0), that factor must be computed immediately. We are /// using the same logic as the LoopUnrollPass to derived the unroll factor, /// but which assumes that some canonicalization has taken place (e.g. /// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform /// better when the unrolled loop's CanonicalLoopInfo is not needed. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to unroll. The loop will be invalidated. /// \param Factor The factor to unroll the loop by. A factor of 0 /// indicates that a heuristic should be used to determine /// the unroll-factor. /// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the /// partially unrolled loop. Otherwise, uses loop metadata /// to defer unrolling to the LoopUnrollPass. void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor, CanonicalLoopInfo **UnrolledCLI); /// Add metadata to simd-ize a loop. /// /// \param DL Debug location for instructions added by unrolling. /// \param Loop The loop to simd-ize. void applySimd(DebugLoc DL, CanonicalLoopInfo *Loop); /// Generator for '#omp flush' /// /// \param Loc The location where the flush directive was encountered void createFlush(const LocationDescription &Loc); /// Generator for '#omp taskwait' /// /// \param Loc The location where the taskwait directive was encountered. void createTaskwait(const LocationDescription &Loc); /// Generator for '#omp taskyield' /// /// \param Loc The location where the taskyield directive was encountered. void createTaskyield(const LocationDescription &Loc); /// Functions used to generate reductions. Such functions take two Values /// representing LHS and RHS of the reduction, respectively, and a reference /// to the value that is updated to refer to the reduction result. using ReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>; /// Functions used to generate atomic reductions. Such functions take two /// Values representing pointers to LHS and RHS of the reduction, as well as /// the element type of these pointers. They are expected to atomically /// update the LHS to the reduced value. using AtomicReductionGenTy = function_ref<InsertPointTy(InsertPointTy, Type *, Value *, Value *)>; /// Information about an OpenMP reduction. struct ReductionInfo { ReductionInfo(Type *ElementType, Value *Variable, Value *PrivateVariable, ReductionGenTy ReductionGen, AtomicReductionGenTy AtomicReductionGen) : ElementType(ElementType), Variable(Variable), PrivateVariable(PrivateVariable), ReductionGen(ReductionGen), AtomicReductionGen(AtomicReductionGen) { assert(cast<PointerType>(Variable->getType()) ->isOpaqueOrPointeeTypeMatches(ElementType) && "Invalid elem type"); } /// Reduction element type, must match pointee type of variable. Type *ElementType; /// Reduction variable of pointer type. Value *Variable; /// Thread-private partial reduction variable. Value *PrivateVariable; /// Callback for generating the reduction body. The IR produced by this will /// be used to combine two values in a thread-safe context, e.g., under /// lock or within the same thread, and therefore need not be atomic. ReductionGenTy ReductionGen; /// Callback for generating the atomic reduction body, may be null. The IR /// produced by this will be used to atomically combine two values during /// reduction. If null, the implementation will use the non-atomic version /// along with the appropriate synchronization mechanisms. AtomicReductionGenTy AtomicReductionGen; }; // TODO: provide atomic and non-atomic reduction generators for reduction // operators defined by the OpenMP specification. /// Generator for '#omp reduction'. /// /// Emits the IR instructing the runtime to perform the specific kind of /// reductions. Expects reduction variables to have been privatized and /// initialized to reduction-neutral values separately. Emits the calls to /// runtime functions as well as the reduction function and the basic blocks /// performing the reduction atomically and non-atomically. /// /// The code emitted for the following: /// /// \code /// type var_1; /// type var_2; /// #pragma omp <directive> reduction(reduction-op:var_1,var_2) /// /* body */; /// \endcode /// /// corresponds to the following sketch. /// /// \code /// void _outlined_par() { /// // N is the number of different reductions. /// void *red_array[] = {privatized_var_1, privatized_var_2, ...}; /// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array, /// _omp_reduction_func, /// _gomp_critical_user.reduction.var)) { /// case 1: { /// var_1 = var_1 <reduction-op> privatized_var_1; /// var_2 = var_2 <reduction-op> privatized_var_2; /// // ... /// __kmpc_end_reduce(...); /// break; /// } /// case 2: { /// _Atomic<ReductionOp>(var_1, privatized_var_1); /// _Atomic<ReductionOp>(var_2, privatized_var_2); /// // ... /// break; /// } /// default: break; /// } /// } /// /// void _omp_reduction_func(void **lhs, void **rhs) { /// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0]; /// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1]; /// // ... /// } /// \endcode /// /// \param Loc The location where the reduction was /// encountered. Must be within the associate /// directive and after the last local access to the /// reduction variables. /// \param AllocaIP An insertion point suitable for allocas usable /// in reductions. /// \param ReductionInfos A list of info on each reduction variable. /// \param IsNoWait A flag set if the reduction is marked as nowait. InsertPointTy createReductions(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<ReductionInfo> ReductionInfos, bool IsNoWait = false); ///} /// Return the insertion point used by the underlying IRBuilder. InsertPointTy getInsertionPoint() { return Builder.saveIP(); } /// Update the internal location to \p Loc. bool updateToLocation(const LocationDescription &Loc) { Builder.restoreIP(Loc.IP); Builder.SetCurrentDebugLocation(Loc.DL); return Loc.IP.getBlock() != nullptr; } /// Return the function declaration for the runtime function with \p FnID. FunctionCallee getOrCreateRuntimeFunction(Module &M, omp::RuntimeFunction FnID); Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID); /// Return the (LLVM-IR) string describing the source location \p LocStr. Constant *getOrCreateSrcLocStr(StringRef LocStr, uint32_t &SrcLocStrSize); /// Return the (LLVM-IR) string describing the default source location. Constant *getOrCreateDefaultSrcLocStr(uint32_t &SrcLocStrSize); /// Return the (LLVM-IR) string describing the source location identified by /// the arguments. Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName, unsigned Line, unsigned Column, uint32_t &SrcLocStrSize); /// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as /// fallback if \p DL does not specify the function name. Constant *getOrCreateSrcLocStr(DebugLoc DL, uint32_t &SrcLocStrSize, Function *F = nullptr); /// Return the (LLVM-IR) string describing the source location \p Loc. Constant *getOrCreateSrcLocStr(const LocationDescription &Loc, uint32_t &SrcLocStrSize); /// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags. /// TODO: Create a enum class for the Reserve2Flags Constant *getOrCreateIdent(Constant *SrcLocStr, uint32_t SrcLocStrSize, omp::IdentFlag Flags = omp::IdentFlag(0), unsigned Reserve2Flags = 0); /// Create a hidden global flag \p Name in the module with initial value \p /// Value. GlobalValue *createGlobalFlag(unsigned Value, StringRef Name); /// Generate control flow and cleanup for cancellation. /// /// \param CancelFlag Flag indicating if the cancellation is performed. /// \param CanceledDirective The kind of directive that is cancled. /// \param ExitCB Extra code to be generated in the exit block. void emitCancelationCheckImpl(Value *CancelFlag, omp::Directive CanceledDirective, FinalizeCallbackTy ExitCB = {}); /// Generate a barrier runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. /// \param DK The directive which caused the barrier /// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier. /// \param CheckCancelFlag Flag to indicate a cancel barrier return value /// should be checked and acted upon. /// /// \returns The insertion point after the barrier. InsertPointTy emitBarrierImpl(const LocationDescription &Loc, omp::Directive DK, bool ForceSimpleCall, bool CheckCancelFlag); /// Generate a flush runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitFlush(const LocationDescription &Loc); /// The finalization stack made up of finalize callbacks currently in-flight, /// wrapped into FinalizationInfo objects that reference also the finalization /// target block and the kind of cancellable directive. SmallVector<FinalizationInfo, 8> FinalizationStack; /// Return true if the last entry in the finalization stack is of kind \p DK /// and cancellable. bool isLastFinalizationInfoCancellable(omp::Directive DK) { return !FinalizationStack.empty() && FinalizationStack.back().IsCancellable && FinalizationStack.back().DK == DK; } /// Generate a taskwait runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskwaitImpl(const LocationDescription &Loc); /// Generate a taskyield runtime call. /// /// \param Loc The location at which the request originated and is fulfilled. void emitTaskyieldImpl(const LocationDescription &Loc); /// Return the current thread ID. /// /// \param Ident The ident (ident_t*) describing the query origin. Value *getOrCreateThreadID(Value *Ident); /// The underlying LLVM-IR module Module &M; /// The LLVM-IR Builder used to create IR. IRBuilder<> Builder; /// Map to remember source location strings StringMap<Constant *> SrcLocStrMap; /// Map to remember existing ident_t*. DenseMap<std::pair<Constant *, uint64_t>, Constant *> IdentMap; /// Helper that contains information about regions we need to outline /// during finalization. struct OutlineInfo { using PostOutlineCBTy = std::function<void(Function &)>; PostOutlineCBTy PostOutlineCB; BasicBlock *EntryBB, *ExitBB; SmallVector<Value *, 2> ExcludeArgsFromAggregate; /// Collect all blocks in between EntryBB and ExitBB in both the given /// vector and set. void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet, SmallVectorImpl<BasicBlock *> &BlockVector); /// Return the function that contains the region to be outlined. Function *getFunction() const { return EntryBB->getParent(); } }; /// Collection of regions that need to be outlined during finalization. SmallVector<OutlineInfo, 16> OutlineInfos; /// Collection of owned canonical loop objects that eventually need to be /// free'd. std::forward_list<CanonicalLoopInfo> LoopInfos; /// Add a new region that will be outlined later. void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); } /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars; /// Create the global variable holding the offload mappings information. GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings, std::string VarName); /// Create the global variable holding the offload names information. GlobalVariable * createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names, std::string VarName); struct MapperAllocas { AllocaInst *ArgsBase = nullptr; AllocaInst *Args = nullptr; AllocaInst *ArgSizes = nullptr; }; /// Create the allocas instruction used in call to mapper functions. void createMapperAllocas(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumOperands, struct MapperAllocas &MapperAllocas); /// Create the call for the target mapper function. /// \param Loc The source location description. /// \param MapperFunc Function to be called. /// \param SrcLocInfo Source location information global. /// \param MaptypesArg The argument types. /// \param MapnamesArg The argument names. /// \param MapperAllocas The AllocaInst used for the call. /// \param DeviceID Device ID for the call. /// \param NumOperands Number of operands in the call. void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc, Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg, struct MapperAllocas &MapperAllocas, int64_t DeviceID, unsigned NumOperands); public: /// Generator for __kmpc_copyprivate /// /// \param Loc The source location description. /// \param BufSize Number of elements in the buffer. /// \param CpyBuf List of pointers to data to be copied. /// \param CpyFn function to call for copying data. /// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise. /// /// \return The insertion position *after* the CopyPrivate call. InsertPointTy createCopyPrivate(const LocationDescription &Loc, llvm::Value *BufSize, llvm::Value *CpyBuf, llvm::Value *CpyFn, llvm::Value *DidIt); /// Generator for '#omp single' /// /// \param Loc The source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// \param DidIt Local variable used as a flag to indicate 'single' thread /// /// \returns The insertion position *after* the single call. InsertPointTy createSingle(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, llvm::Value *DidIt); /// Generator for '#omp master' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// /// \returns The insertion position *after* the master. InsertPointTy createMaster(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generator for '#omp masked' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finialize variable copies. /// /// \returns The insertion position *after* the masked. InsertPointTy createMasked(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, Value *Filter); /// Generator for '#omp critical' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \param CriticalName name of the lock used by the critical directive /// \param HintInst Hint Instruction for hint clause associated with critical /// /// \returns The insertion position *after* the critical. InsertPointTy createCritical(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, StringRef CriticalName, Value *HintInst); /// Generator for '#omp ordered depend (source | sink)' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion point to be used for alloca instructions. /// \param NumLoops The number of loops in depend clause. /// \param StoreValues The value will be stored in vector address. /// \param Name The name of alloca instruction. /// \param IsDependSource If true, depend source; otherwise, depend sink. /// /// \return The insertion position *after* the ordered. InsertPointTy createOrderedDepend(const LocationDescription &Loc, InsertPointTy AllocaIP, unsigned NumLoops, ArrayRef<llvm::Value *> StoreValues, const Twine &Name, bool IsDependSource); /// Generator for '#omp ordered [threads | simd]' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region code. /// \param FiniCB Callback to finalize variable copies. /// \param IsThreads If true, with threads clause or without clause; /// otherwise, with simd clause; /// /// \returns The insertion position *after* the ordered. InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool IsThreads); /// Generator for '#omp sections' /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion points to be used for alloca instructions. /// \param SectionCBs Callbacks that will generate body of each section. /// \param PrivCB Callback to copy a given variable (think copy constructor). /// \param FiniCB Callback to finalize variable copies. /// \param IsCancellable Flag to indicate a cancellable parallel region. /// \param IsNowait If true, barrier - to ensure all sections are executed /// before moving forward will not be generated. /// \returns The insertion position *after* the sections. InsertPointTy createSections(const LocationDescription &Loc, InsertPointTy AllocaIP, ArrayRef<StorableBodyGenCallbackTy> SectionCBs, PrivatizeCallbackTy PrivCB, FinalizeCallbackTy FiniCB, bool IsCancellable, bool IsNowait); /// Generator for '#omp section' /// /// \param Loc The insert and source location description. /// \param BodyGenCB Callback that will generate the region body code. /// \param FiniCB Callback to finalize variable copies. /// \returns The insertion position *after* the section. InsertPointTy createSection(const LocationDescription &Loc, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB); /// Generate conditional branch and relevant BasicBlocks through which private /// threads copy the 'copyin' variables from Master copy to threadprivate /// copies. /// /// \param IP insertion block for copyin conditional /// \param MasterVarPtr a pointer to the master variable /// \param PrivateVarPtr a pointer to the threadprivate variable /// \param IntPtrTy Pointer size type /// \param BranchtoEnd Create a branch between the copyin.not.master blocks // and copy.in.end block /// /// \returns The insertion point where copying operation to be emitted. InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr, Value *PrivateAddr, llvm::IntegerType *IntPtrTy, bool BranchtoEnd = true); /// Create a runtime call for kmpc_Alloc /// /// \param Loc The insert and source location description. /// \param Size Size of allocated memory space /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_alloc /// /// \returns CallInst to the OMP_Alloc call CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_free /// /// \param Loc The insert and source location description. /// \param Addr Address of memory space to be freed /// \param Allocator Allocator information instruction /// \param Name Name of call Instruction for OMP_Free /// /// \returns CallInst to the OMP_Free call CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr, Value *Allocator, std::string Name = ""); /// Create a runtime call for kmpc_threadprivate_cached /// /// \param Loc The insert and source location description. /// \param Pointer pointer to data to be cached /// \param Size size of data to be cached /// \param Name Name of call Instruction for callinst /// /// \returns CallInst to the thread private cache call. CallInst *createCachedThreadPrivate(const LocationDescription &Loc, llvm::Value *Pointer, llvm::ConstantInt *Size, const llvm::Twine &Name = Twine("")); /// Create a runtime call for __tgt_interop_init /// /// \param Loc The insert and source location description. /// \param InteropVar variable to be allocated /// \param InteropType type of interop operation /// \param Device devide to which offloading will occur /// \param NumDependences number of dependence variables /// \param DependenceAddress pointer to dependence variables /// \param HaveNowaitClause does nowait clause exist /// /// \returns CallInst to the __tgt_interop_init call CallInst *createOMPInteropInit(const LocationDescription &Loc, Value *InteropVar, omp::OMPInteropType InteropType, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause); /// Create a runtime call for __tgt_interop_destroy /// /// \param Loc The insert and source location description. /// \param InteropVar variable to be allocated /// \param Device devide to which offloading will occur /// \param NumDependences number of dependence variables /// \param DependenceAddress pointer to dependence variables /// \param HaveNowaitClause does nowait clause exist /// /// \returns CallInst to the __tgt_interop_destroy call CallInst *createOMPInteropDestroy(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause); /// Create a runtime call for __tgt_interop_use /// /// \param Loc The insert and source location description. /// \param InteropVar variable to be allocated /// \param Device devide to which offloading will occur /// \param NumDependences number of dependence variables /// \param DependenceAddress pointer to dependence variables /// \param HaveNowaitClause does nowait clause exist /// /// \returns CallInst to the __tgt_interop_use call CallInst *createOMPInteropUse(const LocationDescription &Loc, Value *InteropVar, Value *Device, Value *NumDependences, Value *DependenceAddress, bool HaveNowaitClause); /// The `omp target` interface /// /// For more information about the usage of this interface, /// \see openmp/libomptarget/deviceRTLs/common/include/target.h /// ///{ /// Create a runtime call for kmpc_target_init /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); /// Create a runtime call for kmpc_target_deinit /// /// \param Loc The insert and source location description. /// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not. /// \param RequiresFullRuntime Indicate if a full device runtime is necessary. void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD, bool RequiresFullRuntime); ///} /// Declarations for LLVM-IR types (simple, array, function and structure) are /// generated below. Their names are defined and used in OpenMPKinds.def. Here /// we provide the declarations, the initializeTypes function will provide the /// values. /// ///{ #define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr; #define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \ ArrayType *VarName##Ty = nullptr; \ PointerType *VarName##PtrTy = nullptr; #define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \ FunctionType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #define OMP_STRUCT_TYPE(VarName, StrName, ...) \ StructType *VarName = nullptr; \ PointerType *VarName##Ptr = nullptr; #include "llvm/Frontend/OpenMP/OMPKinds.def" ///} private: /// Create all simple and struct types exposed by the runtime and remember /// the llvm::PointerTypes of them for easy access later. void initializeTypes(Module &M); /// Common interface for generating entry calls for OMP Directives. /// if the directive has a region/body, It will set the insertion /// point to the body /// /// \param OMPD Directive to generate entry blocks for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitBB block where the region ends. /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall, BasicBlock *ExitBB, bool Conditional = false); /// Common interface to finalize the region /// /// \param OMPD Directive to generate exiting code for /// \param FinIP Insertion point for emitting Finalization code and exit call /// \param ExitCall Call to the ending OMP Runtime Function /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// /// \return The insertion position in exit block InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD, InsertPointTy FinIP, Instruction *ExitCall, bool HasFinalize = true); /// Common Interface to generate OMP inlined regions /// /// \param OMPD Directive to generate inlined region for /// \param EntryCall Call to the entry OMP Runtime Function /// \param ExitCall Call to the ending OMP Runtime Function /// \param BodyGenCB Body code generation callback. /// \param FiniCB Finalization Callback. Will be called when finalizing region /// \param Conditional indicate if the entry call result will be used /// to evaluate a conditional of whether a thread will execute /// body code or not. /// \param HasFinalize indicate if the directive will require finalization /// and has a finalization callback in the stack that /// should be called. /// \param IsCancellable if HasFinalize is set to true, indicate if the /// the directive should be cancellable. /// \return The insertion point after the region InsertPointTy EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall, Instruction *ExitCall, BodyGenCallbackTy BodyGenCB, FinalizeCallbackTy FiniCB, bool Conditional = false, bool HasFinalize = true, bool IsCancellable = false); /// Get the platform-specific name separator. /// \param Parts different parts of the final name that needs separation /// \param FirstSeparator First separator used between the initial two /// parts of the name. /// \param Separator separator used between all of the rest consecutive /// parts of the name static std::string getNameWithSeparators(ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name, unsigned AddressSpace = 0); /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// Value *getOMPCriticalRegionLock(StringRef CriticalName); /// Callback type for Atomic Expression update /// ex: /// \code{.cpp} /// unsigned x = 0; /// #pragma omp atomic update /// x = Expr(x_old); //Expr() is any legal operation /// \endcode /// /// \param XOld the value of the atomic memory address to use for update /// \param IRB reference to the IRBuilder to use /// /// \returns Value to update X to. using AtomicUpdateCallbackTy = const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>; private: enum AtomicKind { Read, Write, Update, Capture, Compare }; /// Determine whether to emit flush or not /// /// \param Loc The insert and source location description. /// \param AO The required atomic ordering /// \param AK The OpenMP atomic operation kind used. /// /// \returns wether a flush was emitted or not bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc, AtomicOrdering AO, AtomicKind AK); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param AllocaIP The insertion point to be used for alloca /// instructions. /// \param X The target atomic pointer to be updated /// \param XElemTy The element type of the atomic pointer. /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic /// instructions. /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, /// or belong to {FADD, FSUB, BAD_BINOP}. /// Then a `cmpExch` based atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param VolatileX true if \a X volatile? /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \returns A pair of the old value of X before the update, and the value /// used for the update. std::pair<Value *, Value *> emitAtomicUpdate(InsertPointTy AllocaIP, Value *X, Type *XElemTy, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool VolatileX, bool IsXBinopExpr); /// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 . /// /// \Return The instruction Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2, AtomicRMWInst::BinOp RMWOp); public: /// a struct to pack relevant information while generating atomic Ops struct AtomicOpValue { Value *Var = nullptr; Type *ElemTy = nullptr; bool IsSigned = false; bool IsVolatile = false; }; /// Emit atomic Read for : V = X --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically read /// \param V Memory address where to store atomically read /// value /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic read IR. InsertPointTy createAtomicRead(const LocationDescription &Loc, AtomicOpValue &X, AtomicOpValue &V, AtomicOrdering AO); /// Emit atomic write for : X = Expr --- Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param X The target pointer to be atomically written to /// \param Expr The value to store. /// \param AO Atomic ordering of the generated atomic /// instructions. /// /// \return Insertion point after generated atomic Write IR. InsertPointTy createAtomicWrite(const LocationDescription &Loc, AtomicOpValue &X, Value *Expr, AtomicOrdering AO); /// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X /// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X) /// Only Scalar data types. /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion point to be used for alloca instructions. /// \param X The target atomic pointer to be updated /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions. /// \param RMWOp The binary operation used for update. If operation /// is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param IsXBinopExpr true if \a X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// /// \return Insertion point after generated atomic update IR. InsertPointTy createAtomicUpdate(const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool IsXBinopExpr); /// Emit atomic update for constructs: --- Only Scalar data types /// V = X; X = X BinOp Expr , /// X = X BinOp Expr; V = X, /// V = X; X = Expr BinOp X, /// X = Expr BinOp X; V = X, /// V = X; X = UpdateOp(X), /// X = UpdateOp(X); V = X, /// /// \param Loc The insert and source location description. /// \param AllocaIP The insertion point to be used for alloca instructions. /// \param X The target atomic pointer to be updated /// \param V Memory address where to store captured value /// \param Expr The value to update X with. /// \param AO Atomic ordering of the generated atomic instructions /// \param RMWOp The binary operation used for update. If /// operation is not supported by atomicRMW, or belong to /// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based /// atomic will be generated. /// \param UpdateOp Code generator for complex expressions that cannot be /// expressed through atomicrmw instruction. /// \param UpdateExpr true if X is an in place update of the form /// X = X BinOp Expr or X = Expr BinOp X /// \param IsXBinopExpr true if X is Left H.S. in Right H.S. part of the /// update expression, false otherwise. /// (e.g. true for X = X BinOp Expr) /// \param IsPostfixUpdate true if original value of 'x' must be stored in /// 'v', not an updated one. /// /// \return Insertion point after generated atomic capture IR. InsertPointTy createAtomicCapture(const LocationDescription &Loc, InsertPointTy AllocaIP, AtomicOpValue &X, AtomicOpValue &V, Value *Expr, AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp, AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr, bool IsPostfixUpdate, bool IsXBinopExpr); /// Emit atomic compare for constructs: --- Only scalar data types /// cond-update-atomic: /// x = x ordop expr ? expr : x; /// x = expr ordop x ? expr : x; /// x = x == e ? d : x; /// x = e == x ? d : x; (this one is not in the spec) /// cond-update-stmt: /// if (x ordop expr) { x = expr; } /// if (expr ordop x) { x = expr; } /// if (x == e) { x = d; } /// if (e == x) { x = d; } (this one is not in the spec) /// /// \param Loc The insert and source location description. /// \param X The target atomic pointer to be updated. /// \param E The expected value ('e') for forms that use an /// equality comparison or an expression ('expr') for /// forms that use 'ordop' (logically an atomic maximum or /// minimum). /// \param D The desired value for forms that use an equality /// comparison. If forms that use 'ordop', it should be /// \p nullptr. /// \param AO Atomic ordering of the generated atomic instructions. /// \param OP Atomic compare operation. It can only be ==, <, or >. /// \param IsXBinopExpr True if the conditional statement is in the form where /// x is on LHS. It only matters for < or >. /// /// \return Insertion point after generated atomic capture IR. InsertPointTy createAtomicCompare(const LocationDescription &Loc, AtomicOpValue &X, Value *E, Value *D, AtomicOrdering AO, omp::OMPAtomicCompareOp Op, bool IsXBinopExpr); /// Create the control flow structure of a canonical OpenMP loop. /// /// The emitted loop will be disconnected, i.e. no edge to the loop's /// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's /// IRBuilder location is not preserved. /// /// \param DL DebugLoc used for the instructions in the skeleton. /// \param TripCount Value to be used for the trip count. /// \param F Function in which to insert the BasicBlocks. /// \param PreInsertBefore Where to insert BBs that execute before the body, /// typically the body itself. /// \param PostInsertBefore Where to insert BBs that execute after the body. /// \param Name Base name used to derive BB /// and instruction names. /// /// \returns The CanonicalLoopInfo that represents the emitted loop. CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount, Function *F, BasicBlock *PreInsertBefore, BasicBlock *PostInsertBefore, const Twine &Name = {}); }; /// Class to represented the control flow structure of an OpenMP canonical loop. /// /// The control-flow structure is standardized for easy consumption by /// directives associated with loops. For instance, the worksharing-loop /// construct may change this control flow such that each loop iteration is /// executed on only one thread. The constraints of a canonical loop in brief /// are: /// /// * The number of loop iterations must have been computed before entering the /// loop. /// /// * Has an (unsigned) logical induction variable that starts at zero and /// increments by one. /// /// * The loop's CFG itself has no side-effects. The OpenMP specification /// itself allows side-effects, but the order in which they happen, including /// how often or whether at all, is unspecified. We expect that the frontend /// will emit those side-effect instructions somewhere (e.g. before the loop) /// such that the CanonicalLoopInfo itself can be side-effect free. /// /// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated /// execution of a loop body that satifies these constraints. It does NOT /// represent arbitrary SESE regions that happen to contain a loop. Do not use /// CanonicalLoopInfo for such purposes. /// /// The control flow can be described as follows: /// /// Preheader /// | /// /-> Header /// | | /// | Cond---\ /// | | | /// | Body | /// | | | | /// | <...> | /// | | | | /// \--Latch | /// | /// Exit /// | /// After /// /// The loop is thought to start at PreheaderIP (at the Preheader's terminator, /// including) and end at AfterIP (at the After's first instruction, excluding). /// That is, instructions in the Preheader and After blocks (except the /// Preheader's terminator) are out of CanonicalLoopInfo's control and may have /// side-effects. Typically, the Preheader is used to compute the loop's trip /// count. The instructions from BodyIP (at the Body block's first instruction, /// excluding) until the Latch are also considered outside CanonicalLoopInfo's /// control and thus can have side-effects. The body block is the single entry /// point into the loop body, which may contain arbitrary control flow as long /// as all control paths eventually branch to the Latch block. /// /// TODO: Consider adding another standardized BasicBlock between Body CFG and /// Latch to guarantee that there is only a single edge to the latch. It would /// make loop transformations easier to not needing to consider multiple /// predecessors of the latch (See redirectAllPredecessorsTo) and would give us /// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that /// executes after each body iteration. /// /// There must be no loop-carried dependencies through llvm::Values. This is /// equivalant to that the Latch has no PHINode and the Header's only PHINode is /// for the induction variable. /// /// All code in Header, Cond, Latch and Exit (plus the terminator of the /// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked /// by assertOK(). They are expected to not be modified unless explicitly /// modifying the CanonicalLoopInfo through a methods that applies a OpenMP /// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop, /// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its /// basic blocks. After invalidation, the CanonicalLoopInfo must not be used /// anymore as its underlying control flow may not exist anymore. /// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop /// may also return a new CanonicalLoopInfo that can be passed to other /// loop-associated construct implementing methods. These loop-transforming /// methods may either create a new CanonicalLoopInfo usually using /// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and /// modify one of the input CanonicalLoopInfo and return it as representing the /// modified loop. What is done is an implementation detail of /// transformation-implementing method and callers should always assume that the /// CanonicalLoopInfo passed to it is invalidated and a new object is returned. /// Returned CanonicalLoopInfo have the same structure and guarantees as the one /// created by createCanonicalLoop, such that transforming methods do not have /// to special case where the CanonicalLoopInfo originated from. /// /// Generally, methods consuming CanonicalLoopInfo do not need an /// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the /// CanonicalLoopInfo to insert new or modify existing instructions. Unless /// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate /// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically, /// any InsertPoint in the Preheader, After or Block can still be used after /// calling such a method. /// /// TODO: Provide mechanisms for exception handling and cancellation points. /// /// Defined outside OpenMPIRBuilder because nested classes cannot be /// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h. class CanonicalLoopInfo { friend class OpenMPIRBuilder; private: BasicBlock *Header = nullptr; BasicBlock *Cond = nullptr; BasicBlock *Latch = nullptr; BasicBlock *Exit = nullptr; /// Add the control blocks of this loop to \p BBs. /// /// This does not include any block from the body, including the one returned /// by getBody(). /// /// FIXME: This currently includes the Preheader and After blocks even though /// their content is (mostly) not under CanonicalLoopInfo's control. /// Re-evaluated whether this makes sense. void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs); public: /// Returns whether this object currently represents the IR of a loop. If /// returning false, it may have been consumed by a loop transformation or not /// been intialized. Do not use in this case; bool isValid() const { return Header; } /// The preheader ensures that there is only a single edge entering the loop. /// Code that must be execute before any loop iteration can be emitted here, /// such as computing the loop trip count and begin lifetime markers. Code in /// the preheader is not considered part of the canonical loop. BasicBlock *getPreheader() const; /// The header is the entry for each iteration. In the canonical control flow, /// it only contains the PHINode for the induction variable. BasicBlock *getHeader() const { assert(isValid() && "Requires a valid canonical loop"); return Header; } /// The condition block computes whether there is another loop iteration. If /// yes, branches to the body; otherwise to the exit block. BasicBlock *getCond() const { assert(isValid() && "Requires a valid canonical loop"); return Cond; } /// The body block is the single entry for a loop iteration and not controlled /// by CanonicalLoopInfo. It can contain arbitrary control flow but must /// eventually branch to the \p Latch block. BasicBlock *getBody() const { assert(isValid() && "Requires a valid canonical loop"); return cast<BranchInst>(Cond->getTerminator())->getSuccessor(0); } /// Reaching the latch indicates the end of the loop body code. In the /// canonical control flow, it only contains the increment of the induction /// variable. BasicBlock *getLatch() const { assert(isValid() && "Requires a valid canonical loop"); return Latch; } /// Reaching the exit indicates no more iterations are being executed. BasicBlock *getExit() const { assert(isValid() && "Requires a valid canonical loop"); return Exit; } /// The after block is intended for clean-up code such as lifetime end /// markers. It is separate from the exit block to ensure, analogous to the /// preheader, it having just a single entry edge and being free from PHI /// nodes should there be multiple loop exits (such as from break /// statements/cancellations). BasicBlock *getAfter() const { assert(isValid() && "Requires a valid canonical loop"); return Exit->getSingleSuccessor(); } /// Returns the llvm::Value containing the number of loop iterations. It must /// be valid in the preheader and always interpreted as an unsigned integer of /// any bit-width. Value *getTripCount() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *CmpI = &Cond->front(); assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount"); return CmpI->getOperand(1); } /// Returns the instruction representing the current logical induction /// variable. Always unsigned, always starting at 0 with an increment of one. Instruction *getIndVar() const { assert(isValid() && "Requires a valid canonical loop"); Instruction *IndVarPHI = &Header->front(); assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI"); return IndVarPHI; } /// Return the type of the induction variable (and the trip count). Type *getIndVarType() const { assert(isValid() && "Requires a valid canonical loop"); return getIndVar()->getType(); } /// Return the insertion point for user code before the loop. OpenMPIRBuilder::InsertPointTy getPreheaderIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *Preheader = getPreheader(); return {Preheader, std::prev(Preheader->end())}; }; /// Return the insertion point for user code in the body. OpenMPIRBuilder::InsertPointTy getBodyIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *Body = getBody(); return {Body, Body->begin()}; }; /// Return the insertion point for user code after the loop. OpenMPIRBuilder::InsertPointTy getAfterIP() const { assert(isValid() && "Requires a valid canonical loop"); BasicBlock *After = getAfter(); return {After, After->begin()}; }; Function *getFunction() const { assert(isValid() && "Requires a valid canonical loop"); return Header->getParent(); } /// Consistency self-check. void assertOK() const; /// Invalidate this loop. That is, the underlying IR does not fulfill the /// requirements of an OpenMP canonical loop anymore. void invalidate(); }; } // end namespace llvm #endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
main.c
// Copyright (c) 2017 ORPECOMP Project // Fabian Schuiki <fschuiki@iis.ee.ethz.ch> #include "oprecomp.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> int main(int argc, char **argv) { // Parse the parameters that determine the benchmark's execution. if (argc != 3) { fprintf(stderr, "usage: %s PROBLEM_SIZE NUM_THREADS\n", argv[0]); return 1; } int problem_size = 1 << atoi(argv[1]); int num_threads = atoi(argv[2]); printf("# problem_size: %d\n", problem_size); printf("# num_threads: %d\n", num_threads); // Initialize the problem. Don't measure this. double *data = calloc(problem_size, sizeof(double)); int i; for (i = 0; i < problem_size; ++i) { data[i] = (double)(rand() % (1 << 16)) / (1 << 16); } // Set the number of threads we want for solving the problem. omp_set_dynamic(0); // enforce number of threads omp_set_num_threads(num_threads); // Start measuring performance. fprintf(stderr, "oprecomp_start\n"); oprecomp_start(); do { #pragma omp parallel for for (i = 0; i < problem_size; ++i) { double f = data[i]; f = sqrt(f) * 2; f = f*f / 2 + 1.0; data[i] = f; } } while (oprecomp_iterate()); // Stop measuring performance. oprecomp_stop(); fprintf(stderr, "oprecomp_stop\n"); // Pretend that we have some other data to be logged. All lines starting // with a "# <header>: <value>" will be concatenated into a result by the // benchmark wrapper script. printf("# a: %d\n", rand()); printf("# b: %d\n", rand()); printf("# c: %f\n", data[0]); // Clean up. free(data); return 0; }
GB_binop__max_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__max_int64) // A.*B function (eWiseMult): GB (_AemultB_08__max_int64) // A.*B function (eWiseMult): GB (_AemultB_02__max_int64) // A.*B function (eWiseMult): GB (_AemultB_04__max_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__max_int64) // A*D function (colscale): GB (_AxD__max_int64) // D*A function (rowscale): GB (_DxB__max_int64) // C+=B function (dense accum): GB (_Cdense_accumB__max_int64) // C+=b function (dense accum): GB (_Cdense_accumb__max_int64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__max_int64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__max_int64) // C=scalar+B GB (_bind1st__max_int64) // C=scalar+B' GB (_bind1st_tran__max_int64) // C=A+scalar GB (_bind2nd__max_int64) // C=A'+scalar GB (_bind2nd_tran__max_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = GB_IMAX (aij, bij) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMAX (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MAX || GxB_NO_INT64 || GxB_NO_MAX_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__max_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__max_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__max_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__max_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__max_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__max_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__max_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__max_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__max_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__max_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__max_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__max_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMAX (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__max_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMAX (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (x, aij) ; \ } GrB_Info GB (_bind1st_tran__max_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMAX (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__max_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
AutoRelease.c
/** * C Object System * COS Autorelease pool * * Copyright 2006+ Laurent Deniau <laurent.deniau@gmail.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cos/Object.h> #include <cos/gen/object.h> #include <cos/gen/value.h> #include <stdlib.h> #include <string.h> /* NOTE-CONF: AutoRelease storage size * Init specifies the number of initial slots allocated for * autoreleased objects. Rate specifies the rate at which this storage * must grow (e.g. 2.0 means double the size each time it is * full). Warn specifies the thresthold for warning about the number of * objects autoreleased in a single pool (during expansion only). */ #define COS_AUTORELEASE_INIT 500 #define COS_AUTORELEASE_RATE 1618 // golden ratio * 1000 #define COS_AUTORELEASE_WARN 10000000 // 0 = *never* // private class defclass(AutoRelease) struct AutoRelease *prv; OBJ *stk; OBJ *top; OBJ *end; OBJ tmp; OBJ _stk[16]; endclass makclass(AutoRelease); // ----- useclass(ExBadValue, ExBadAlloc, ExBadMessage); // ----- STATIC_ASSERT(COS_AUTORELEASE_RATE_must_be_greater_than_3_div_2, COS_AUTORELEASE_RATE >= 1500); STATIC_ASSERT(COS_AUTORELEASE_INIT_must_be_greater_than_100, COS_AUTORELEASE_INIT >= 100); STATIC_ASSERT(COS_AUTORELEASE_WARN_is_too_small, COS_AUTORELEASE_WARN >= 10000); /* NOTE-INFO: AutoRelease and threads * This code assumes the creation of a new pool for each new thread */ static struct AutoRelease _pool0; // sentinel #if defined(_OPENMP) || COS_HAS_TLS || !COS_HAS_POSIX // -------------------- static __thread struct AutoRelease *_pool = &_pool0; #ifdef _OPENMP #pragma omp threadprivate(_pool) #endif static inline struct AutoRelease* pool_get(void) { return _pool; } static inline void pool_set(struct AutoRelease *pool) { _pool = pool; } static void _pool_init(void) { } #else // !defined(_OPENMP) && !COS_HAS_TLS && COS_HAS_POSIX ----------------- static pthread_key_t _pool_key; static void pool_set(struct AutoRelease *pool) { ensure( pthread_setspecific(_pool_key, pool) == 0 ); } static cos_inline struct AutoRelease* pool_get(void) { struct AutoRelease *pool = pthread_getspecific(_pool_key); if (pool) return pool; pool_set(&_pool0); return &_pool0; } static void _pool_deinit(void *pool_) { struct AutoRelease *pool = pool_; while (pool->prv != &_pool0) pool = pool->prv; grelease((OBJ)pool); } static void _pool_init(void) { ensure( pthread_key_create(&_pool_key, _pool_deinit) == 0 ); } #endif // ------------------------------------------------ static void enlarge(struct AutoRelease* p) { U32 size = p->top - p->stk; U32 new_size; OBJ *stk; if (p->stk == p->_stk) { new_size = COS_AUTORELEASE_INIT; stk = malloc(sizeof *stk * new_size); if (stk) memcpy(stk, p->stk, sizeof *stk * size); } else { new_size = size * (COS_AUTORELEASE_RATE/1000.0); stk = realloc(p->stk, sizeof *stk * new_size); if (size >= COS_AUTORELEASE_WARN) cos_debug("pool at %p hold %u autoreleased objects", (void*)p, size); } if (!stk) THROW(ExBadAlloc); p->stk = stk; p->top = stk + size; p->end = stk + new_size; } static cos_inline void clear(struct AutoRelease *p) { if (p->tmp) grelease(p->tmp), p->tmp = 0; while (p->top-- > p->stk) grelease(*p->top); } static cos_inline OBJ push(OBJ obj) { struct AutoRelease *pool = pool_get(); if (pool->top == pool->end) pool->tmp = obj, enlarge(pool), pool->tmp = 0; return *pool->top++ = obj; } // ----- Any ownership defmethod(OBJ, gretain, Any) if (cos_object_rc(_1) >= COS_RC_UNIT) retmethod( cos_object_incRc(_1) ); if (cos_object_rc(_1) == COS_RC_AUTO) retmethod( gcopy(_1) ); if (cos_object_rc(_1) == COS_RC_STATIC) retmethod(_1); // cos_object_rc(_1) < COS_RC_STATIC THROW( gnewWithStr(ExBadValue, "invalid reference counting") ); endmethod defmethod(OBJ, gautoRelease, Any) if (cos_object_rc(_1) >= COS_RC_UNIT) retmethod( push(_1) ); if (cos_object_rc(_1) == COS_RC_AUTO) retmethod( push(gcopy(_1)) ); if (cos_object_rc(_1) == COS_RC_STATIC) retmethod(_1); // cos_object_rc(_1) < COS_RC_STATIC THROW( gnewWithStr(ExBadValue, "invalid reference counting") ); endmethod defmethod(void, grelease, Any) if (cos_object_rc(_1) > COS_RC_UNIT) cos_object_decRc(_1); else if (cos_object_rc(_1) == COS_RC_UNIT) // take care of cyclic dependencies gdealloc(gdeinit(cos_object_setRc(_1, COS_RC_STATIC))); else if (cos_object_rc(_1) < COS_RC_STATIC) // insensitive to STATIC and AUTO THROW( gnewWithStr(ExBadValue, "invalid reference counting") ); endmethod // ----- Class ownership (always static) defmethod(OBJ, gretain, Class) retmethod(_1); endmethod defmethod(OBJ, gautoRelease, Class) retmethod(_1); endmethod defmethod(void, grelease, Class) endmethod // ----- AutoRelease ownership defmethod(OBJ, gretain, AutoRelease) THROW( gnewWithStr(ExBadMessage, "AutoRelease pool cannot be retained") ); COS_UNUSED(_ret); endmethod defmethod(OBJ, gautoRelease, AutoRelease) COS_UNUSED(_ret); // insensitive, already chained endmethod defmethod(void, grelease, AutoRelease) cos_trace("destroying pool at %p [%u objects]", (void*)_1, gsize(_1)); gdealloc(gdeinit(_1)); // cannot be auto, static or retained endmethod // ----- defmethod(U32, gsize, AutoRelease) retmethod(self->top - self->stk); endmethod // ----- defmethod(OBJ, ginit, AutoRelease) cos_object_setRc(_1, COS_RC_AUTO); // AutoRelease pools are "linked" to the stack self->stk = self->_stk; self->top = self->_stk; self->end = self->_stk + COS_ARRLEN(self->_stk); self->prv = pool_get(); self->tmp = 0; pool_set(self); retmethod(_1); endmethod defmethod(OBJ, gdeinit, AutoRelease) struct AutoRelease *pool; // safer to release pool(s) above self while ((pool = pool_get()) != self) grelease((OBJ)pool); // release autoReleased objects clear(self); // free stack if (self->stk != self->_stk) free(self->stk), self->stk = 0; // remove from top pool_set(self->prv); retmethod(_1); endmethod // ----- defmethod(void, ginitialize, pmAutoRelease) if (!_pool0.prv) { OBJ pool = (OBJ)(void*)&_pool0; // cos_trace("ginitialize(pmAutoRelease)"); cos_object_setId(pool, cos_class_id(classref(AutoRelease))); cos_object_setRc(pool, COS_RC_STATIC); _pool0.prv = &_pool0; _pool_init(); ginit((OBJ)(void*)&_pool0); } endmethod defmethod(void, gdeinitialize, pmAutoRelease) if (_pool0.prv) { OBJ pool = (OBJ)(void*)&_pool0; // cos_trace("gdeinitialize(pmAutoRelease)"); gdeinit(pool); _pool0.prv = 0; } endmethod /* * ---------------------------------------------------------------------------- * Debug Functions * ---------------------------------------------------------------------------- */ #include <cos/debug.h> void cos_autorelease_showStack(FILE *fp) { struct AutoRelease *pool = pool_get(); OBJ *top = pool->top; U32 i; if (!fp) fp = stderr; for (i=0; top-- > pool->stk; i++) fprintf(fp, "AutoRelease[%4u] = %-25s (%4u refs)\n", i, gclassName(*top), gretainCount(*top)); }
GB_unop__identity_int32_int16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int32_int16) // op(A') function: GB (_unop_tran__identity_int32_int16) // C type: int32_t // A type: int16_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = (int32_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int32_int16) ( int32_t *Cx, // Cx and Ax may be aliased const int16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int16_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int16_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int16_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_int16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nbody.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include "timer.h" #define SOFTENING 1e-9f typedef struct { float x, y, z, vx, vy, vz; } Body; void randomizeBodies(float *data, int n) { for (int i = 0; i < n; i++) { data[i] = 2.0f * (rand() / (float)RAND_MAX) - 1.0f; } } void bodyForce(Body *p, float dt, int n) { #pragma omp parallel for schedule(dynamic) for (int i = 0; i < n; i++) { float Fx = 0.0f; float Fy = 0.0f; float Fz = 0.0f; for (int j = 0; j < n; j++) { float dx = p[j].x - p[i].x; float dy = p[j].y - p[i].y; float dz = p[j].z - p[i].z; float distSqr = dx*dx + dy*dy + dz*dz + SOFTENING; float invDist = 1.0f / sqrtf(distSqr); float invDist3 = invDist * invDist * invDist; Fx += dx * invDist3; Fy += dy * invDist3; Fz += dz * invDist3; } p[i].vx += dt*Fx; p[i].vy += dt*Fy; p[i].vz += dt*Fz; } } int main(const int argc, const char** argv) { int nBodies = 32768; if (argc > 1) nBodies = atoi(argv[1]); const float dt = 0.01f; // time step const int nIters = 10; // simulation iterations int bytes = nBodies*sizeof(Body); float *buf = (float*)malloc(bytes); Body *p = (Body*)buf; randomizeBodies(buf, 6*nBodies); // Init pos / vel data double totalTime = 0.0; for (int iter = 1; iter <= nIters; iter++) { StartTimer(); bodyForce(p, dt, nBodies); // compute interbody forces for (int i = 0 ; i < nBodies; i++) { // integrate position p[i].x += p[i].vx*dt; p[i].y += p[i].vy*dt; p[i].z += p[i].vz*dt; } const double tElapsed = GetTimer() / 1000.0; if (iter > 1) { // First iter is warm up totalTime += tElapsed; } #ifndef SHMOO printf("Iteration %d: %.3f seconds\n", iter, tElapsed); #endif } double avgTime = totalTime / (double)(nIters-1); #ifdef SHMOO printf("%d, %0.3f\n", nBodies, 1e-9 * nBodies * nBodies / avgTime); #else printf("Average rate for iterations 2 through %d: %.3f steps per second.\n", nIters); printf("%d Bodies: average %0.3f Billion Interactions / second\n", nBodies, 1e-9 * nBodies * nBodies / avgTime); #endif free(buf); }
target_update.c
// -------------------------------------------------- // Check 'to' // -------------------------------------------------- // RUN: %libomptarget-compile-generic \ // RUN: -fopenmp-version=51 -DCLAUSE=to // RUN: %libomptarget-run-fail-generic 2>&1 \ // RUN: | %fcheck-generic // -------------------------------------------------- // Check 'from' // -------------------------------------------------- // RUN: %libomptarget-compile-generic \ // RUN: -fopenmp-version=51 -DCLAUSE=from // RUN: %libomptarget-run-fail-generic 2>&1 \ // RUN: | %fcheck-generic #include <stdio.h> int main() { int i; // CHECK: addr=0x[[#%x,HOST_ADDR:]], size=[[#%u,SIZE:]] fprintf(stderr, "addr=%p, size=%ld\n", &i, sizeof i); // CHECK-NOT: Libomptarget #pragma omp target enter data map(alloc: i) #pragma omp target update CLAUSE(present: i) #pragma omp target exit data map(delete: i) // CHECK: i is present fprintf(stderr, "i is present\n"); // CHECK: Libomptarget message: device mapping required by 'present' motion modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] ([[#SIZE]] bytes) // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory #pragma omp target update CLAUSE(present: i) // CHECK-NOT: i is present fprintf(stderr, "i is present\n"); return 0; }
gsrb.omptask.c
//------------------------------------------------------------------------------------------------------------------------------ // Samuel Williams // SWWilliams@lbl.gov // Lawrence Berkeley National Lab //------------------------------------------------------------------------------------------------------------------------------ #if defined(GSRB_FP) #warning Overriding default GSRB implementation and using pre-computed 1.0/0.0 FP array for Red-Black to facilitate vectorization... #elif defined(GSRB_STRIDE2) #if defined(GSRB_OOP) #warning Overriding default GSRB implementation and using out-of-place and stride-2 accesses to minimize the number of flops #else #warning Overriding default GSRB implementation and using stride-2 accesses to minimize the number of flops #endif #elif defined(GSRB_BRANCH) #if defined(GSRB_OOP) #warning Overriding default GSRB implementation and using out-of-place implementation with an if-then-else on loop indices... #else #warning Overriding default GSRB implementation and using if-then-else on loop indices... #endif #else #define GSRB_STRIDE2 // default implementation #endif //------------------------------------------------------------------------------------------------------------------------------ #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #define MIN(a, b) (((a) < (b)) ? (a) : (b)) //------------------------------------------------------------------------------------------------------------------------------ void smooth(level_type * level, int x_id, int rhs_id, double a, double b){ int s; for(s=0;s<2*NUM_SMOOTHS;s++){ // there are two sweeps per GSRB smooth // exchange the ghost zone... #ifdef GSRB_OOP // out-of-place GSRB ping pongs between x and VECTOR_TEMP if((s&1)==0){exchange_boundary(level, x_id,stencil_get_shape());apply_BCs(level, x_id,stencil_get_shape());} else{exchange_boundary(level,VECTOR_TEMP,stencil_get_shape());apply_BCs(level,VECTOR_TEMP,stencil_get_shape());} #else // in-place GSRB only operates on x exchange_boundary(level, x_id,stencil_get_shape());apply_BCs(level, x_id,stencil_get_shape()); #endif // apply the smoother... double _timeStart = getTime(); #pragma omp parallel if( (level->num_my_boxes>1) || (level->box_dim > BLOCKCOPY_TILE_J) || (level->box_dim > BLOCKCOPY_TILE_K) ) { int box; #pragma omp for private(box) nowait schedule(static,1) // omp for to maximize task injection for(box=0;box<level->num_my_boxes;box++){ // loop over all boxes this process owns... int kk,jj; for(kk=0;kk<level->box_dim;kk+=BLOCKCOPY_TILE_K){ for(jj=0;jj<level->box_dim;jj+=BLOCKCOPY_TILE_J){ #pragma omp task firstprivate(kk,jj,box) if( (level->box_dim > BLOCKCOPY_TILE_J) || (level->box_dim > BLOCKCOPY_TILE_K) ) { // either define these variables here, or make them all firstprivate const double h2inv = 1.0/(level->h*level->h); const int ghosts = level->box_ghosts; const int jStride = level->box_jStride; const int kStride = level->box_kStride; const int color000 = (level->my_boxes[box].low.i^level->my_boxes[box].low.j^level->my_boxes[box].low.k^s)&1; // is element 000 red or black on *THIS* sweep const double * __restrict__ rhs = level->my_boxes[box].vectors[ rhs_id] + ghosts*(1+jStride+kStride); const double * __restrict__ alpha = level->my_boxes[box].vectors[VECTOR_ALPHA ] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_i = level->my_boxes[box].vectors[VECTOR_BETA_I] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_j = level->my_boxes[box].vectors[VECTOR_BETA_J] + ghosts*(1+jStride+kStride); const double * __restrict__ beta_k = level->my_boxes[box].vectors[VECTOR_BETA_K] + ghosts*(1+jStride+kStride); const double * __restrict__ Dinv = level->my_boxes[box].vectors[VECTOR_DINV ] + ghosts*(1+jStride+kStride); #ifdef GSRB_OOP const double * __restrict__ x_n; double * __restrict__ x_np1; if((s&1)==0){x_n = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride); x_np1 = level->my_boxes[box].vectors[VECTOR_TEMP ] + ghosts*(1+jStride+kStride);} else{x_n = level->my_boxes[box].vectors[VECTOR_TEMP ] + ghosts*(1+jStride+kStride); x_np1 = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride);} #else const double * __restrict__ x_n = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point double * __restrict__ x_np1 = level->my_boxes[box].vectors[ x_id] + ghosts*(1+jStride+kStride); // i.e. [0] = first non ghost zone point #endif // - - - - const int khi = MIN(kk+BLOCKCOPY_TILE_K,level->box_dim); const int jhi = MIN(jj+BLOCKCOPY_TILE_J,level->box_dim); int i,j,k; #if defined(GSRB_FP) for(k=kk;k<khi;k++){ for(j=jj;j<jhi;j++){ const double * __restrict__ RedBlack = level->RedBlack_FP + ghosts*(1+jStride) + kStride*((k^color000)&0x1); for(i=0;i<level->box_dim;i++){ int ij = i + j*jStride; int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(x_n); double lambda = Dinv_ijk(); x_np1[ijk] = x_n[ijk] + RedBlack[ij]*lambda*(rhs[ijk]-Ax); //x_np1[ijk] = ((i^j^k^color000)&1) ? x_n[ijk] : x_n[ijk] + lambda*(rhs[ijk]-Ax); }}} // i,j,k #elif defined(GSRB_STRIDE2) for(k=kk;k<khi;k++){ for(j=jj;j<jhi;j++){ #ifdef GSRB_OOP // out-of-place must copy old value... for(i=0;i<level->box_dim;i++){ int ijk = i + j*jStride + k*kStride; x_np1[ijk] = x_n[ijk]; } // i copy #endif for(i=((j^k^color000)&1);i<level->box_dim;i+=2){ // stride-2 GSRB int ijk = i + j*jStride + k*kStride; double Ax = apply_op_ijk(x_n); double lambda = Dinv_ijk(); x_np1[ijk] = x_n[ijk] + lambda*(rhs[ijk]-Ax); } // i stencil }} // j,k #elif defined(GSRB_BRANCH) for(k=kk;k<khi;k++){ for(j=jj;j<jhi;j++){ for(i=0;i<level->box_dim;i++){ int ijk = i + j*jStride + k*kStride; if((i^j^k^color000^1)&1){ // looks very clean when [0] is i,j,k=0,0,0 double Ax = apply_op_ijk(x_n); double lambda = Dinv_ijk(); x_np1[ijk] = x_n[ijk] + lambda*(rhs[ijk]-Ax); #ifdef GSRB_OOP }else{ x_np1[ijk] = x_n[ijk]; // copy old value when sweep color != cell color #endif } }}} // i,j,k #else #error no GSRB implementation was specified #endif }}} // JJ,KK,task } // boxes } // parallel level->timers.smooth += (double)(getTime()-_timeStart); } // s-loop } //------------------------------------------------------------------------------------------------------------------------------
starrynight-montecarlo-core.c
/* Starry Night - a Monte Carlo code to simulate ferroelectric domain formation * and behaviour in hybrid perovskite solar cells. * * By Jarvist Moore Frost * University of Bath * * File begun 16th January 2014 */ // Prototypes... static int rand_int(int SPAN); static void gen_neighbour(); static double site_energy(int x, int y, int z, struct dipole *newdipole, struct dipole *olddipole); static void MC_moves(int moves); static void MC_move(); static void MC_move_openmp(); static int rand_int(int SPAN) // TODO: profile this to make sure it runs at an OK speed. { return((int)( (unsigned long) genrand_int32() % (unsigned long)SPAN)); } // The following code builds a neighbour list (of the delta dx,dy,dzs) for // speedy evaluation of energy; results in a speedup as it avoids the for loops // + 'ifs' during Monte Carlo; instead you just pull the deltas from the lookup // table enum {MAXNEIGHBOURS=10000}; struct { int dx; int dy; int dz; float d; } neighbours[MAXNEIGHBOURS]; int neighbour=0; //count of neighbours static void gen_neighbour() { int dx,dy,dz=0; float d; int ZCutOff=DipoleCutOff; if (Z==1) ZCutOff=0; for (dx=-DipoleCutOff;dx<=DipoleCutOff;dx++) for (dy=-DipoleCutOff;dy<=DipoleCutOff;dy++) for (dz=-ZCutOff;dz<=ZCutOff;dz++) //NB: conditional zDipoleCutOff to allow for 2D version { if (dx==0 && dy==0 && dz==0) continue; //no infinities / self interactions please! d=sqrt((float) dx*dx + dy*dy + dz*dz); //that old chestnut; distance in Euler space if (d>(float)DipoleCutOff) continue; // Cutoff in d // store precomputed list of neighbours neighbours[neighbour].dx=dx; neighbours[neighbour].dy=dy; neighbours[neighbour].dz=dz; neighbours[neighbour].d=d; neighbour++; fprintf(stderr,"Neighbour: %d %d %d\n",dx,dy,dz); if (neighbour>MAXNEIGHBOURS) // bounds check { fprintf(stderr,"Run out of space for the neighbour list with %d neighbours. FAILING TO EXIT!\n\n", neighbour); exit(-1); } } fprintf(stderr,"\nNeighbour list generated: %d neighbours found with DipoleCutOff=%d.\n",neighbour,DipoleCutOff); } // Calculate change in site energy of changing from olddipole -> newdipole static double site_energy(int x, int y, int z, struct dipole *newdipole, struct dipole *olddipole) { int dx,dy,dz=0; float d; double dE=0.0; struct dipole *testdipole, n; // This now iterates over the neighbour list of neighbours[0..neighbour] // Which contains all the precomputed dx,dy,dz for a spherical cutoff, and // the sqrt distances etc. // Sum over near neighbours for dipole-dipole interaction int i; //#pragma omp parallel for private(dx,dy,dz,d,n) reduction(+:dE) schedule(static,1) // NB: Works, but only modest speed gains! for (i=0;i<neighbour;i++) { // read in dirn to neighbours + precomputed values dx=neighbours[i].dx; dy=neighbours[i].dy; dz=neighbours[i].dz; d=neighbours[i].d; testdipole=& lattice[(X+x+dx)%X][(Y+y+dy)%Y][(Z+z+dz)%Z]; n.x=(float)dx/d; n.y=(float)dy/d; n.z=(float)dz/d; //normalised diff. vector //True dipole like dE+= (olddipole->length * testdipole->length) * ( ( dot(newdipole,testdipole) - 3*dot(&n,newdipole)*dot(&n,testdipole) ) - ( dot(olddipole,testdipole) - 3*dot(&n,olddipole)*dot(&n,testdipole) ) ) / (d*d*d); // Ferroelectric / Potts model - vector form // dE+= - Dipole * dot(newdipole,testdipole) / (d*d*d) // + Dipole * dot(olddipole,testdipole) / (d*d*d); // Now reborn as our cage-strain term! if ((dx*dx+dy*dy+dz*dz)==1) //only nearest neighbour dE+= - CageStrain* dot(newdipole,testdipole) + CageStrain * dot(olddipole,testdipole); // signs to energetic drive alignment of vectors (dot product = more +ve, dE = -ve) } // Interaction of dipole with (unshielded) E-field dE+= + dot(newdipole, & Efield) - dot(olddipole, & Efield); //fprintf(stderr,"%f\n",dot(newdipole, & Efield)); if (K>0.0) // rarely used anymore; 2D lattice epitaxial strain term { // along .x projection, squared n.x=1.0; n.y=0.0; n.z=0.0; dE += - K*fabs(dot(newdipole,&n)) + K*fabs(dot(olddipole,&n)); // along .y projection, squared n.x=0.0; n.y=1.0; n.z=0.0; dE += - K*fabs(dot(newdipole,&n)) + K*fabs(dot(olddipole,&n)); } // point charge at centre of space // n.x=x-(X/2); n.y=y-(Y/2); n.z=z-(Z/2); // dE += 1.0 * (dot(newdipole,&n) - dot(olddipole,&n) ) / ((x-X/2)^2 - (y-Y/2)^2 - (z-Z/2)^2); return(dE); } static void MC_moves(int moves) { int i; //moves/=8; //hard coded domain decomp. for (i=0;i<moves;i++) MC_move(); } static void MC_move() { int x, y, z; float dE=0.0; struct dipole newdipole, *olddipole; // Choose random dipole / lattice location x=rand_int(X); y=rand_int(Y); z=rand_int(Z); if (lattice[x][y][z].length==0.0) return; //dipole zero length .'. not present // random new orientation. // Nb: this is the definition of a MC move - might want to consider // alternative / global / less disruptive moves as well if (ConstrainToX) random_X_point(& newdipole); //consider any <100> vector else random_sphere_point(& newdipole); newdipole.length = lattice[x][y][z].length; // preserve length / i.d. of dipole olddipole=& lattice[x][y][z]; //calc site energy dE=site_energy(x,y,z, & newdipole,olddipole); if (dE < 0.0 || exp(-dE * beta) > genrand_real2() ) { lattice[x][y][z].x=newdipole.x; lattice[x][y][z].y=newdipole.y; lattice[x][y][z].z=newdipole.z; // lattice[x][y][z].length=newdipole.length; // never changes with current // algorithms. ACCEPT++; } else REJECT++; }
valid.mob7.src.h
#pragma once #include "ukr.h" #include "omp.h" #include "transpose.h" #include "gen_ukr_A6B2gemm_1_512_14_14_512_3_3.h" #include "gen_ukr_A4B2gemm_1_512_14_14_512_3_3.h" void testrun(float* A ,float*B, float*C, float*oriB ){ int tid = omp_get_thread_num(); int Nx = 14; int Ny = 14; int Nh = 3; long long Astrides[6] = {0,1,2,3,4,5}; int b1 = 0; for (int fpck = (tid%1)*16; fpck < uNf; fpck+=1*16){ for(int cwh = (tid/1)*8; cwh < uNc*uNw*uNh/8*8; cwh+=8*1){ transpose8x8_avx(oriB+ (fpck+0)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 0, uNc*uNw*uNh, 16); transpose8x8_avx(oriB+ (fpck+8)*uNc*uNw*uNh + cwh, B + fpck*uNc*uNw*uNh + cwh* 16 + 8, uNc*uNw*uNh, 16); } } #pragma omp barrier// begin push button generated block for(int c5=0;c5<512+0;c5+=512) { for(int xy5=0;xy5<196+0;xy5+=196) { for(int f5=0;f5<512+0;f5+=512) { for(int c4=c5;c4<min(512, 512+c5);c4+=512) { for(int f4=f5;f4<min(512, 512+f5);f4+=512) { for(int xy4=xy5;xy4<min(196, 196+xy5);xy4+=196) { for(int c3=c4;c3<min(512, 512+c4);c3+=Tc1) { for(int f3=f4;f3<min(512, 512+f4);f3+=Tf2) { for(int xy3=xy4;xy3<min(196, 196+xy4);xy3+=Txy3) { for(int xy2=xy3;xy2<min(196, Txy3+xy3);xy2+=6) { for(int f2=f3;f2<min(512, Tf2+f3);f2+=16) { for(int c2=c3;c2<min(512, Tc1+c3);c2+=Tc1) { for(int c1=c2;c1<min(512, Tc1+c2);c1+=Tc1) { for(int xy1=xy2;xy1<min(196, 6+xy2);xy1+=6) { for(int f1=f2;f1<min(512, 16+f2);f1+=16) { int ctile=min(Tc1, 512-c1); int x1=xy1/14; int y1=xy1%14/1; int c1_1=c1/1; int c1_2=c1%1/1; int kf1_1=f1/16; int kf1_2=f1%16/1; int of1_1=f1/1; int of1_2=f1%1/1; int offsetA=0+b1*131072+c1_1*256+1*x1*16+1*y1*1+c1_2*1; int offsetB=0+kf1_1*73728+c1*144+0*48+0*16+kf1_2*1; int offsetC=0+b1*100352+of1_1*196+x1*14+y1*1+of1_2*1; if(14-y1>=6){ cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } else if(14*14-xy1>=6){ for(int sti=14-y1;sti<6;sti+=1) { Astrides[sti]+=2; } cnn_ukr_float_scatter_6x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); for(int sti=14-y1;sti<6;sti+=1) { Astrides[sti]-=2; } } else{ cnn_ukr_float_scatter_4x2v_cxycgemm(A+offsetA, B+offsetB, C+offsetC, ctile, Astrides); } } } } } } } } } } } } } } } } // end push button generated block }
kCDensestMem.c
/* Info: This program corresponds to the exact algorithm in the PVLDB 2020 paper. Feel free to use these lines as you wish. This program enumerates all k-cliques, store them in main memory, and apply the "++" operator repeatedly to find the k-clique densest subgraph, until the suspected k-clique densest subgraph passes the optimality test based on either the improved Goldberg's condition or a max-flow. This program can handle both the case k = 2 (where all edges are treated as the k-cliques) and the case k >= 3 (where the subroutine to list all k-cliques, kClist, is executed once). Note again that all k-cliques are stored in main memory, consuming super-linear space. One advantage, however, is that we can shuffle all the cliques to prevent the cliques containing the same node from coming in batch. To compile: "g++ kCDensestMem.c BinaryHeap.c Graph.c MaxFlow.cpp -O3 -o kCDensestMem -lm -fopenmp" To execute: "./kCDensestMem p k edgeListFileName tag" p is the number of threads. k is the size of a clique considered as in "k-clique". edgeListFileName is the name of the file that contains the graph. Each line of the file contains one edge represented by two integers separated by a space. tag is a string specifying the dataset (e.g., "dblp"), which is used to generate the output file name. Output: A series of suspected k-clique densest subgraphs. One record per line, containing - number of iterations of sequential updates run so far (always a power of 2); - the number of nodes in the suspected k-clique densest subset; - the k-clique density of the suspected k-clique densest subset; - the time elapsed since the beginning of the execution. When the exact solution is eventually found, the program additionally prints - the number of edges in the k-clique densest subgraph; - the optimality test that is passed ("Goldberg" or "Max Flow"); - the number of max-flow calls. */ #include <stdlib.h> #include <stdio.h> #include <stdbool.h> #include <string.h> #include <time.h> #include <math.h> #include <omp.h> #include <limits.h> #include "Graph.h" #include "MaxFlow.hpp" unsigned MAX_CLIQUES = 100000000; // Maximum number of cliques for memory allocation; will increase if needed static int UnsignedCmp(const void *a, const void *b) { return (long long)*(unsigned *)a - (long long)*(unsigned *)b; } Subgraph *AllocSubgraph(Graph *g, unsigned char k) { Subgraph *sg = (Subgraph *)malloc(sizeof(Subgraph)); sg->n = (unsigned *)calloc(k, sizeof(unsigned)); sg->d = (unsigned **)malloc(k * sizeof(unsigned *)); sg->adj = (unsigned *)malloc(g->core * g->core * sizeof(unsigned)); sg->label = (unsigned char *)calloc(g->core, sizeof(unsigned char)); sg->nodes = (unsigned **)malloc(k * sizeof(unsigned *)); sg->core = g->core; for (unsigned i = 1; i < k; ++i){ sg->d[i] = (unsigned *)malloc(g->core * sizeof(unsigned)); sg->nodes[i] = (unsigned *)malloc(g->core * sizeof(unsigned)); } return sg; } static unsigned *id_sg2g = NULL, *id_g2sg = NULL; // to improve (???) #pragma omp threadprivate(id_g2sg, id_sg2g) void MakeSubgraph(Graph *g, unsigned u, unsigned v, Subgraph *sg, unsigned char k) { if (id_sg2g == NULL){ id_g2sg = (unsigned *)malloc(g->n * sizeof(unsigned)); id_sg2g = (unsigned *)malloc(g->core * sizeof(unsigned)); for (unsigned i = 0; i < g->n; ++i) { id_g2sg[i] = UINT_MAX; } } for (unsigned i = 0; i < sg->n[k - 1]; ++i) { sg->label[i] = 0; } for (unsigned i = g->cd[v]; i < g->cd[v + 1]; ++i) { // For each out-neighbor of v id_g2sg[g->adj[i]] = UINT_MAX - 1; } unsigned j = 0; for (unsigned i = g->cd[u]; i < g->cd[u + 1]; ++i) { // For each out-neighbor of u unsigned x = g->adj[i]; if (id_g2sg[x] == UINT_MAX - 1) { id_g2sg[x] = j; id_sg2g[j] = x; sg->label[j] = k - 2; sg->nodes[k - 2][j] = j; sg->d[k - 2][j] = 0; // New degrees ++j; } } sg->n[k - 2] = j; for (unsigned i = 0; i < sg->n[k - 2]; ++i) { // Reorder adjacency list and compute new degrees unsigned x = id_sg2g[i]; for (unsigned l = g->cd[x]; l < g->cd[x + 1]; ++l) { unsigned y = g->adj[l]; j = id_g2sg[y]; if (j < UINT_MAX - 1) { sg->adj[sg->core * i + sg->d[k - 2][i]++] = j; } } } for (unsigned i = g->cd[v]; i < g->cd[v + 1]; ++i) { id_g2sg[g->adj[i]] = -1; } } // Clique-density-friendly decomposition unsigned *cknodes; // Nodes of a clique #pragma omp threadprivate(cknodes) double *rho; double *alpha; double *rho_tentative; unsigned *level; unsigned *reordered; unsigned *ck; // List of all cliques unsigned *p_ckend; // Pointer to the end of ck[] unsigned long long cnt_clique; typedef enum {FRANK_WOLFE = 2, PAVA_PREPROCESS = 3} task_t; void AllocCdf(Graph *g, unsigned k) { rho = (double *)malloc(g->n * sizeof(double)); rho_tentative = (double *)malloc(g->n * sizeof(double)); level = (unsigned *)malloc(g->n * sizeof(unsigned)); reordered = (unsigned *)malloc(g->n * sizeof(unsigned)); } inline int CDF_RerunFrankWolfeCmp(const unsigned u, const unsigned v) { if (level[u] < level[v]) return -1; if (level[u] > level[v]) return 1; if (rho[u] > rho[v]) return -1; // A node with larger rho value is "smaller"! if (rho[u] < rho[v]) return 1; return 0; } void CDF_FrankWolfeUpdateRates(int clique_size, unsigned *p_cknodes, double *p_alpha) { // Water-filling /*for (unsigned i = clique_size; i > 0; --i) for (unsigned j = 0; j + 1 < i; ++j) if (rho[cknodes[j]] > rho[cknodes[j + 1]]) { unsigned tmp = cknodes[j]; cknodes[j] = cknodes[j + 1]; cknodes[j + 1] = tmp; } double budget = 1.0; for (unsigned i = 0; i < clique_size; ++i) { double val = budget / (i + 1); if (i + 1 < clique_size && (rho[cknodes[i + 1]] - rho[cknodes[i]]) * (i + 1) < budget) val = rho[cknodes[i + 1]] - rho[cknodes[i]]; for (unsigned j = 0; j <= i; ++j) { #pragma omp atomic rho[cknodes[j]] += val; } budget -= val * (i + 1); }*/ unsigned node_index = 0; for (unsigned i = 1; i < clique_size; ++i) { if (rho[p_cknodes[node_index]] > rho[p_cknodes[i]]) node_index = i; } #pragma omp atomic rho[p_cknodes[node_index]] += 1.0; #pragma omp atomic p_alpha[node_index] += 1.0; } void CDF_PavaPreprocessUpdateRates(int clique_size, unsigned *cknodes) { unsigned node_getting_weight = cknodes[0]; for (unsigned l = 1; l < clique_size; ++l) if (level[cknodes[l]] > level[node_getting_weight]) node_getting_weight = cknodes[l]; #pragma omp atomic rho_tentative[level[node_getting_weight]] += 1.0; } void CDF_CliqueScan(Graph *g, unsigned char k, task_t task) { #pragma omp parallel for for (unsigned long long i = 0; i < cnt_clique; ++i) { // for (unsigned j = 0; j < k; ++j) // cknodes[j] = ck[i * k + j]; switch (task) { case FRANK_WOLFE: { CDF_FrankWolfeUpdateRates(k, ck + i * k, alpha + i * k); break; } case PAVA_PREPROCESS: { CDF_PavaPreprocessUpdateRates(k, ck + i * k); break; } } } } void CDF_CliqueEnumThread(Subgraph *sg, unsigned char clique_size, unsigned char l) { if (clique_size == 3) { for (unsigned i = 0; i < sg->n[1]; ++i) { unsigned u = sg->nodes[1][i]; cknodes[0] = id_sg2g[u]; #pragma omp critical { if (cnt_clique >= MAX_CLIQUES) { MAX_CLIQUES *= 2; ck = (unsigned *)realloc(ck, MAX_CLIQUES * clique_size * sizeof(unsigned)); p_ckend = ck + cnt_clique * clique_size; } for (unsigned j = 0; j < clique_size; ++j) *(p_ckend++) = cknodes[j]; ++cnt_clique; } } return; } if (l == 2) { for (unsigned i = 0; i < sg->n[2]; ++i) { unsigned u = sg->nodes[2][i]; cknodes[1] = id_sg2g[u]; for (unsigned j = u * sg->core, end = u * sg->core + sg->d[2][u]; j < end; ++j) { unsigned v = sg->adj[j]; cknodes[0] = id_sg2g[v]; #pragma omp critical { if (cnt_clique >= MAX_CLIQUES) { MAX_CLIQUES *= 2; ck = (unsigned *)realloc(ck, MAX_CLIQUES * clique_size * sizeof(unsigned)); p_ckend = ck + cnt_clique * clique_size; } for (unsigned k = 0; k < clique_size; ++k) *(p_ckend++) = cknodes[k]; ++cnt_clique; } } } return; } for (unsigned i = 0; i < sg->n[l]; ++i) { // Enumerate in reverse order. Very confusing! "++i" is actually the reverse order. unsigned u = sg->nodes[l][i]; cknodes[l - 1] = id_sg2g[u]; sg->n[l - 1] = 0; unsigned end = u * sg->core + sg->d[l][u]; for (unsigned j = u * sg->core; j < end; ++j) { // Relabel nodes and forming U'. unsigned v = sg->adj[j]; if (sg->label[v] == l) { sg->label[v] = l - 1; sg->nodes[l - 1][sg->n[l - 1]++] = v; sg->d[l - 1][v] = 0; // New degrees } } for (unsigned j = 0; j < sg->n[l - 1]; ++j) { // Reorder adjacency list and compute new degrees unsigned v = sg->nodes[l - 1][j]; for (unsigned k = sg->core * v, end = sg->core * v + sg->d[l][v]; k < end; ++k) { unsigned w = sg->adj[k]; if (sg->label[w] == l - 1) { ++sg->d[l - 1][v]; } else{ sg->adj[k--] = sg->adj[--end]; sg->adj[end] = w; } } qsort(sg->adj + sg->core * v, sg->d[l - 1][v], sizeof(unsigned), UnsignedCmp); // Sort the nodes in reverse order } CDF_CliqueEnumThread(sg, clique_size, l - 1); for (unsigned j = 0; j < sg->n[l - 1]; ++j) { // Restore labels unsigned v = sg->nodes[l - 1][j]; sg->label[v] = l; } } } void CDF_CliqueEnum(Graph *g, unsigned char k) { Subgraph *sg; cnt_clique = 0; p_ckend = ck = (unsigned *)malloc(MAX_CLIQUES * k * sizeof(unsigned)); #pragma omp parallel private(sg) reduction(+: cnt_clique) { cknodes = (unsigned *)malloc(k * sizeof(unsigned)); sg = AllocSubgraph(g, k); #pragma omp for schedule(dynamic, 1) nowait for(unsigned i = 0; i < g->e; ++i) { cknodes[k - 1] = g->edges[i].s; cknodes[k - 2] = g->edges[i].t; MakeSubgraph(g, g->edges[i].s, g->edges[i].t, sg, k); CDF_CliqueEnumThread(sg, k, k - 2); } FreeSubgraph(sg, k); } ck = (unsigned *)realloc(ck, cnt_clique * k * sizeof(unsigned)); alpha = (double *)malloc(cnt_clique * k * sizeof(double)); } static int CDF_NodeCmp(const void *a, const void *b) { double d = rho[*(const unsigned *)a] - rho[*(const unsigned *)b]; if (d > 0) return -1; if (d < 0) return 1; return 0; } typedef struct { unsigned n; // Total number of aggregated points unsigned *nag; // nag[i]: number of points aggregated in i double *val; // val[i]: value of the aggregated points // double *ub; } IsotonicRegression; // Pool Adjacent Violators Algorithm. Values to fit are stored in vect and n is the size of vect. IsotonicRegression *CDF_Pava(double *vect, unsigned n) { IsotonicRegression *fit = (IsotonicRegression *)malloc(sizeof(IsotonicRegression)); unsigned *nag = (unsigned *)malloc(n * sizeof(unsigned)); double *val = (double *)malloc(n * sizeof(double)); nag[0] = 1; val[0] = vect[0]; unsigned j = 0; for (unsigned i = 1; i < n; ++i) { j += 1; val[j] = vect[i]; nag[j] = 1; while (j > 0 && val[j] >= val[j - 1] * 0.999999) { val[j - 1] = (nag[j] * val[j] + nag[j - 1] * val[j - 1]) / (nag[j] + nag[j - 1]); nag[j - 1] += nag[j]; --j; } } fit->n = j + 1; fit->nag = nag; fit->val = val; return fit; } IsotonicRegression *CDF_PavaPreprocess(Graph *g, unsigned char k) { for (unsigned i = 0; i < g->n; ++i) reordered[i] = i; qsort(reordered, g->n, sizeof(unsigned), CDF_NodeCmp); // Reorder the nodes by decreasing rho values for (unsigned i = 0; i < g->n; ++i) level[reordered[i]] = i; CDF_CliqueScan(g, k, PAVA_PREPROCESS); IsotonicRegression *partition = CDF_Pava(rho_tentative, g->n); for (unsigned j = 0, i = 0; j < partition->n; ++j) for (unsigned l = 0; l < partition->nag[j]; ++l, ++i) level[reordered[i]] = j; return partition; } bool CDF_CheckStability(Graph *g, unsigned subset_size, const unsigned char k) { for (unsigned i = 0; i < cnt_clique; ++i) { unsigned max_level = 0, max_level_cnt = 0; for (unsigned j = 0; j < k; ++j) { if (level[ck[i * k + j]] > max_level) { max_level = level[ck[i * k + j]]; max_level_cnt = 1; } else if (level[ck[i * k + j]] == max_level) { ++max_level_cnt; } } double sum = 0; for (unsigned j = 0; j < k; ++j) { if (level[ck[i * k + j]] < max_level) { sum += alpha[i * k + j]; #pragma omp atomic rho[ck[i * k + j]] -= alpha[i * k + j]; alpha[i * k + j] = 0; } } for (unsigned j = 0; j < k; ++j) { if (level[ck[i * k + j]] == max_level) { #pragma omp atomic rho[ck[i * k + j]] += sum / max_level_cnt; alpha[i * k + j] += sum / max_level_cnt; } } } double prefix_min_rho = rho[reordered[0]]; double suffix_max_rho = -1; for (unsigned i = 1; i < subset_size; ++i) if (prefix_min_rho > rho[reordered[i]]) prefix_min_rho = rho[reordered[i]]; for (unsigned i = g->n - 1; i >= subset_size; --i) if (suffix_max_rho < rho[reordered[i]]) suffix_max_rho = rho[reordered[i]]; return prefix_min_rho * 0.999999 > suffix_max_rho; } bool CDF_CheckDensestGoldberg(Graph *g, const unsigned n, const unsigned char clique_size) { qsort(reordered, n, sizeof(unsigned), CDF_NodeCmp); // Reorder the nodes by decreasing rho values unsigned long long m = 0; for (unsigned j = 0; j < n; ++j) m += (unsigned long long)(rho_tentative[j] + 0.5); double sum_rho = 0; double jck = 0; // j choose k bool skip = true; for (unsigned j = 1; j < n; ++j) { sum_rho += rho[reordered[j - 1]]; if (skip) { if (j == clique_size) jck = 1; else if (j > clique_size) jck = (jck * j) / (j - clique_size); if (jck / j > (double)m / (double)n) skip = false, fprintf(stderr, "Jump to j = %u\n", j); else continue; } double ub = sum_rho / j; if (ub - (double)m / (double)n >= 1.0 / n / j && ub - (double)m / (double)n >= (ceil((double)m * j / n) - (double)m * j / n) / j) { return false; } } return true; } bool CDF_CheckDensestMaxFlow(Graph *g, const unsigned n, const unsigned char clique_size, unsigned *p_cnt_max_flow) { ++(*p_cnt_max_flow); Network network; unsigned *id_in_network = (unsigned *)malloc(g->n * sizeof(unsigned)); unsigned long long m = 0; vector<Network::Vertex> R; Network::Vertex s = network.AddVertex(), t = network.AddVertex(); for (unsigned i = 0; i < g->n; ++i) id_in_network[i] = n; for (unsigned i = 0; i < n; ++i) { id_in_network[reordered[i]] = i; R.push_back(network.AddVertex()); } for (unsigned i = 0; i < cnt_clique; ++i) { bool flag = true; for (unsigned j = 0; j < clique_size; ++j) { if (id_in_network[ck[i * clique_size + j]] >= n) { flag = false; break; } } if (flag) { ++m; Network::Vertex v = network.AddVertex(); for (unsigned j = 0; j < clique_size; ++j) network.AddEdge(v, R[id_in_network[ck[i * clique_size + j]]], n); network.AddEdge(s, v, n); } } for (unsigned i = 0; i < n; ++i) network.AddEdge(R[i], t, m); free(id_in_network); return network.MaxFlow(s, t) >= m * n; } void ShuffleCliques(const unsigned k) { for (unsigned i = 1; i < cnt_clique; ++i) { unsigned rand_index = rand() % (i + 1); for (unsigned j = 0; j < k; ++j) { unsigned temp = ck[i * k + j]; ck[i * k + j] = ck[rand_index * k + j]; ck[rand_index * k + j] = temp; } } } void CDF_Main(const unsigned char k, Graph *g, FILE *ofp, time_t t0) { unsigned cnt_max_flow = 0; fprintf(ofp, "[Number of Iterations]\t[Number of Nodes]\t[k-Clique Density]\t[Time (seconds)]\t[Info]\n"); AllocCdf(g, k); if (k >= 3) { CDF_CliqueEnum(g, k); // Collect all k-cliques } else { ck = (unsigned *)malloc((unsigned long long)(g->e) * k * sizeof(unsigned)); alpha = (double *)malloc(g->e * k * sizeof(double)); cnt_clique = g->e; for (unsigned long long i = 0; i < (unsigned long long)(g->e); ++i) { ck[i << 1] = g->edges[i].s; ck[(i << 1) + 1] = g->edges[i].t; } } ShuffleCliques(k); for (unsigned i = 0; i < g->n; ++i) rho[i] = 0; for (unsigned long long i = 0; i < cnt_clique * k; ++i) alpha[i] = 0; for (unsigned num_iter = 1; ; num_iter <<= 1) { fprintf(stderr, "Start: number of iterations = %u.\n", num_iter); // Step 1: run the Frank-Wolfe based algorithm for num_iter rounds for (unsigned t = num_iter / 2 + 1; t <= num_iter; ++t) { if (t % 10 == 0) fprintf(stderr, "Run round %u...\n", t); CDF_CliqueScan(g, k, FRANK_WOLFE); } // Step 2: give a tentative decomposition for (unsigned i = 0; i < g->n; ++i) rho_tentative[i] = 0; IsotonicRegression *partition = CDF_PavaPreprocess(g, k); fprintf(stderr, "Approximate densest subgraph: %u nodes, density = %f.\n", partition->nag[0], partition->val[0]); /* FILE *ofp = fopen("rates.txt", "w"); for (unsigned i = 0; i < g->n; ++i) fprintf(ofp, "r[%u] = %.12f\n", reordered[i], rho[reordered[i]]); fclose(ofp);*/ // Step 3: Check stability and optimality if (CDF_CheckStability(g, partition->nag[0], k)) { fprintf(stderr, "The potential densest set is stable!\n"); if (CDF_CheckDensestGoldberg(g, partition->nag[0], k)) { fprintf(stderr, "The first %u nodes forms a densest subgraph by criteria A!\n", partition->nag[0]); fprintf(ofp, "[Number of Iterations]\t[Stopping Condition]\t[Number of Nodes]\t[Number of Edges]\t[k-Clique Density]\t[Number of Max-Flow Calls]\t[Time (seconds)]\n"); fprintf(ofp, "%u\tGoldberg\t%u\t%u\t%.12f\t%u\t", num_iter, partition->nag[0], CountEdges(g, partition->nag[0], reordered), partition->val[0], cnt_max_flow); break; } else if (CDF_CheckDensestMaxFlow(g, partition->nag[0], k, &cnt_max_flow)) { fprintf(stderr, "The first %u nodes forms a densest subgraph by criteria B!\n", partition->nag[0]); fprintf(ofp, "[Number of Iterations]\t[Stopping Condition]\t[Number of Nodes]\t[Number of Edges]\t[k-Clique Density]\t[Number of Max-Flow Calls]\t[Time (seconds)]\n"); fprintf(ofp, "%u\tMax Flow\t%u\t%u\t%.12f\t%u\t", num_iter, partition->nag[0], CountEdges(g, partition->nag[0], reordered), partition->val[0], cnt_max_flow); break; } else { fprintf(stderr, "Cannot guarantee it is densest by either criteria A or criteria B.\n"); fprintf(ofp, "%u\t%u\t%.12f\t%ld\tSTABLE BUT NOT DENSEST\n", num_iter, partition->nag[0], partition->val[0], time(NULL) - t0); } } else { fprintf(stderr, "The potential densest subset is not stable!\n"); fprintf(ofp, "%u\t%u\t%.12f\t%ld\tNOT STABLE\n", num_iter, partition->nag[0], partition->val[0], time(NULL) - t0); } /* ofp = fopen("rates_rerun.txt", "w"); for (int i = 0; i < partition->nag[0]; ++i) fprintf(ofp, "r[%u] = %.12f\n", reordered[i], rho[reordered[i]]); fclose(ofp);*/ } } int main(int argc, char **argv) { EdgeList *el; Graph *g; unsigned char k = atoi(argv[2]); char *file_name = argv[3]; unsigned num_threads = atoi(argv[1]); omp_set_num_threads(num_threads); time_t t0, t1, t2; t0 = t1 = time(NULL); printf("Reading edgelist from file %s\n", file_name); el = ReadEdgeList(file_name); printf("Number of nodes = %u\n", el->n); printf("Number of edges = %u\n", el->e); t2 = time(NULL); printf("- Time = %ldh%ldm%lds\n",(t2 - t1) / 3600, ((t2 - t1) % 3600) / 60, ((t2 - t1) % 60)); t1 = t2; printf("Building the graph structure\n"); SortByCore(el); // Do core decomposition and render degeneracy ordering to the nodes Relabel(el); g = MakeGraph(el); printf("Number of nodes (degree > 0) = %u\n", g->n); t2 = time(NULL); printf("- Time = %ldh%ldm%lds\n", (t2 - t1) / 3600, ((t2 - t1) % 3600) / 60, ((t2 - t1) % 60)); t1 = t2; printf("Iterate over all cliques\n"); char output_file_name[100] = "stat_exact_"; strcat(output_file_name, argv[4]); strcat(output_file_name, "_"); strcat(output_file_name, argv[1]); strcat(output_file_name, "_"); strcat(output_file_name, argv[2]); strcat(output_file_name, ".txt"); FILE *ofp = fopen(output_file_name, "w"); try { CDF_Main(k, g, ofp, t0); } catch(std::exception &e) { fprintf(ofp, "%s\n", e.what()); } printf("Number of %u-cliques: %llu\n", k, cnt_clique); t2 = time(NULL); printf("- Time = %ldh%ldm%lds\n", (t2 - t1) / 3600, ((t2 - t1) % 3600) / 60, ((t2 - t1) % 60)); t1 = t2; FreeGraph(g); printf("- Overall time = %ldh%ldm%lds\n", (t2 - t0) / 3600, ((t2 - t0) % 3600) / 60, ((t2 - t0) % 60)); fprintf(ofp, "%ld\n", t2 - t0); fclose(ofp); return 0; }
test_omp.c
#include <stdio.h> #include <assert.h> #define N 100 int A[N]; int B[N]; int main() { for(int i=0; i<N; i++){ A[i] =0; B[i] =i; } #pragma omp target map(A,B) for(int i=0; i<N; i++){ A[i] = B[i]; } for(int i=0; i<N; i++){ assert(A[i] == B[i]); } printf("PASSED\n"); return 0; }
remarks_parallel_in_multiple_target_state_machines.c
// RUN: %clang_cc1 -verify=host -Rpass=openmp -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc // RUN: %clang_cc1 -verify=all,safe -Rpass=openmp -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // RUN: %clang_cc1 -fexperimental-new-pass-manager -verify=all,safe -Rpass=openmp -fopenmp -O2 -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t.out // host-no-diagnostics void bar1(void) { #pragma omp parallel // #0 // all-remark@#0 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // safe-remark@#0 {{Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will not attempt to rewrite the state machine use.}} // force-remark@#0 {{[UNSAFE] Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will rewrite the state machine use due to command line flag, this can lead to undefined behavior if the parallel region is called from a target region outside this translation unit.}} // force-remark@#0 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__2_wrapper, kernel ID: <NONE>}} { } } void bar2(void) { #pragma omp parallel // #1 // all-remark@#1 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // safe-remark@#1 {{Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will not attempt to rewrite the state machine use.}} // force-remark@#1 {{[UNSAFE] Parallel region is not known to be called from a unique single target region, maybe the surrounding function has external linkage?; will rewrite the state machine use due to command line flag, this can lead to undefined behavior if the parallel region is called from a target region outside this translation unit.}} // force-remark@#1 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__6_wrapper, kernel ID: <NONE>}} { } } void foo1(void) { #pragma omp target teams // #2 // all-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}} // all-remark@#2 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__3_wrapper, kernel ID: __omp_offloading}} { #pragma omp parallel // #3 // all-remark@#3 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#3 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__1_wrapper, kernel ID: __omp_offloading}} { } bar1(); #pragma omp parallel // #4 // all-remark@#4 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#4 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__3_wrapper, kernel ID: __omp_offloading}} { } } } void foo2(void) { #pragma omp target teams // #5 // all-remark@#5 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__5_wrapper, kernel ID: __omp_offloading}} // all-remark@#5 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__7_wrapper, kernel ID: __omp_offloading}} { #pragma omp parallel // #6 // all-remark@#6 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#6 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__5_wrapper, kernel ID: __omp_offloading}} { } bar1(); bar2(); #pragma omp parallel // #7 // all-remark@#7 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#7 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__7_wrapper, kernel ID: __omp_offloading}} { } bar1(); bar2(); } } void foo3(void) { #pragma omp target teams // #8 // all-remark@#8 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__9_wrapper, kernel ID: __omp_offloading}} // all-remark@#8 {{Target region containing the parallel region that is specialized. (parallel region ID: __omp_outlined__10_wrapper, kernel ID: __omp_offloading}} { #pragma omp parallel // #9 // all-remark@#9 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#9 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__9_wrapper, kernel ID: __omp_offloading}} { } bar1(); bar2(); #pragma omp parallel // #10 // all-remark@#10 {{Found a parallel region that is called in a target region but not part of a combined target construct nor nesed inside a target construct without intermediate code. This can lead to excessive register usage for unrelated target regions in the same translation unit due to spurious call edges assumed by ptxas.}} // all-remark@#10 {{Specialize parallel region that is only reached from a single target region to avoid spurious call edges and excessive register usage in other target regions. (parallel region ID: __omp_outlined__10_wrapper, kernel ID: __omp_offloading}} { } bar1(); bar2(); } } void spmd(void) { // Verify we do not emit the remarks above for "SPMD" regions. #pragma omp target teams #pragma omp parallel { } #pragma omp target teams distribute parallel for for (int i = 0; i < 100; ++i) { } } // all-remark@* 3 {{OpenMP runtime call __kmpc_global_thread_num moved to}} // all-remark@* 3 {{OpenMP runtime call __kmpc_global_thread_num deduplicated}}
GB_unaryop__abs_uint8_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint8_int32 // op(A') function: GB_tran__abs_uint8_int32 // C type: uint8_t // A type: int32_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT8 || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint8_int32 ( uint8_t *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint8_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
9847.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[200 + 0][200 + 0][200 + 0], double B[200 + 0][200 + 0][200 + 0]) { int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 1000; t2 += 1) { #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (t8 + 15 < n - 2 ? t8 + 15 : n - 2); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 1) B[t6][t10][t12] = 0.125 * (A[t6 + 1][t10][t12] - 2 * A[t6][t10][t12] + A[t6 - 1][t10][t12]) + 0.125 * (A[t6][t10 + 1][t12] - 2 * A[t6][t10][t12] + A[t6][t10 - 1][t12]) + 0.125 * (A[t6][t10][t12 + 1] - 2 * A[t6][t10][t12] + A[t6][t10][t12 - 1]) + A[t6][t10][t12]; #pragma omp parallel for private(t4,t6,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 32) for (t6 = t4; t6 <= (t4 + 31 < n - 2 ? t4 + 31 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 16) for (t10 = t8; t10 <= (t8 + 15 < n - 2 ? t8 + 15 : n - 2); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 1) A[t6][t10][t12] = 0.125 * (B[t6 + 1][t10][t12] - 2 * B[t6][t10][t12] + B[t6 - 1][t10][t12]) + 0.125 * (B[t6][t10 + 1][t12] - 2 * B[t6][t10][t12] + B[t6][t10 - 1][t12]) + 0.125 * (B[t6][t10][t12 + 1] - 2 * B[t6][t10][t12] + B[t6][t10][t12 - 1]) + B[t6][t10][t12]; } }
common.h
#ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <cstdio> #include <string> #include <vector> #include <sstream> #include <cstdint> #include <algorithm> #include <cmath> #include <functional> #include <memory> #include <iterator> #include <type_traits> #include <iomanip> #ifdef _MSC_VER #include "intrin.h" #endif namespace LightGBM { namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', '3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9', '4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9', '5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9', '6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9', '7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', '9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = char(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t #ifdef _MSC_VER buffer_len #endif ) { #ifdef _MSC_VER sprintf_s(buffer, buffer_len, "%.17g", value); #else sprintf(buffer, "%.17g", value); #endif } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t #ifdef _MSC_VER buf_len #endif ) const { #ifdef _MSC_VER sprintf_s(buffer, buf_len, "%g", value); #else sprintf(buffer, "%g", value); #endif } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK(strs.size() == static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformaton on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (size_t i = 0; i < input.size(); ++i) { ret.push_back(input.at(i).get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; for (size_t i = start; i < keys.size(); ++i) { arr.emplace_back(keys[i], values[i]); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { keys[i] = arr[i].first; values[i] = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>& data) { std::vector<T*> ptr(data.size()); for (size_t i = 0; i < data.size(); ++i) { ptr[i] = data[i].data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t> EmptyBitset(int n){ int size = n / 32; if(n % 32 != 0) size++; return std::vector<uint32_t>(size); } template<typename T> inline static void InsertBitset(std::vector<uint32_t>& vec, const T val){ int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec.size()) < i1 + 1) { vec.resize(i1 + 1, 0); } vec[i1] |= (1 << i2); } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
Grid.h
#pragma once #include "GridTypes.h" #include "ScalarField.h" #include "Vectors.h" namespace pfc { enum InterpolationType { Interpolation_CIC, Interpolation_TSC, Interpolation_SecondOrder, Interpolation_FourthOrder, Interpolation_PCS }; template<typename Data, GridTypes gridType> class Grid { public: Grid(const Int3 & _numInternalCells, FP _dt, const FP3 & minCoords, const FP3 & _steps, const Int3 & globalGridDims); Grid(const Int3 & _numAllCells, FP _dt, const Int3 & globalGridDims); // for complex grid const FP3 BxPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftBx; } const FP3 ByPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftBy; } const FP3 BzPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftBz; } const FP3 ExPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJx; } const FP3 EyPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJy; } const FP3 EzPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJz; } const FP3 JxPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJx; } const FP3 JyPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJy; } const FP3 JzPosition(int x, int y, int z) const { return baseCoords(x, y, z) + shiftEJz; } void getFieldsXYZ(FP x, FP y, FP z, FP3 & e, FP3 & b) const { FP3 coords(x, y, z); getFields(coords, e, b); } void getFields(const FP3& coords, FP3 & e, FP3 & b) const { (this->*interpolationFields)(coords, e, b); } virtual FP3 getJ(const FP3& coords) const; virtual FP3 getE(const FP3& coords) const; virtual FP3 getB(const FP3& coords) const; void getFieldsCIC(const FP3& coords, FP3 & e, FP3 & b) const; void getFieldsTSC(const FP3& coords, FP3 & e, FP3 & b) const; void getFieldsSecondOrder(const FP3& coords, FP3 & e, FP3 & b) const; void getFieldsFourthOrder(const FP3& coords, FP3 & e, FP3 & b) const; void getFieldsPCS(const FP3& coords, FP3 & e, FP3 & b) const; FP getEx(const FP3& coords) const { return (this->*interpolationEx)(coords); } FP getEy(const FP3& coords) const { return (this->*interpolationEy)(coords); } FP getEz(const FP3& coords) const { return (this->*interpolationEz)(coords); } FP getBx(const FP3& coords) const { return (this->*interpolationBx)(coords); } FP getBy(const FP3& coords) const { return (this->*interpolationBy)(coords); } FP getBz(const FP3& coords) const { return (this->*interpolationBz)(coords); } FP getJx(const FP3& coords) const { return (this->*interpolationJx)(coords); } FP getJy(const FP3& coords) const { return (this->*interpolationJy)(coords); } FP getJz(const FP3& coords) const { return (this->*interpolationJz)(coords); } FP getExCIC(const FP3& coords) const { return getFieldCIC(coords, Ex, shiftEJx); } FP getEyCIC(const FP3& coords) const { return getFieldCIC(coords, Ey, shiftEJy); } FP getEzCIC(const FP3& coords) const { return getFieldCIC(coords, Ez, shiftEJz); } FP getBxCIC(const FP3& coords) const { return getFieldCIC(coords, Bx, shiftBx); } FP getByCIC(const FP3& coords) const { return getFieldCIC(coords, By, shiftBy); } FP getBzCIC(const FP3& coords) const { return getFieldCIC(coords, Bz, shiftBz); } FP getJxCIC(const FP3& coords) const { return getFieldCIC(coords, Jx, shiftEJx); } FP getJyCIC(const FP3& coords) const { return getFieldCIC(coords, Jy, shiftEJy); } FP getJzCIC(const FP3& coords) const { return getFieldCIC(coords, Jz, shiftEJz); } FP getExTSC(const FP3& coords) const { return getFieldTSC(coords, Ex, shiftEJx); } FP getEyTSC(const FP3& coords) const { return getFieldTSC(coords, Ey, shiftEJy); } FP getEzTSC(const FP3& coords) const { return getFieldTSC(coords, Ez, shiftEJz); } FP getBxTSC(const FP3& coords) const { return getFieldTSC(coords, Bx, shiftBx); } FP getByTSC(const FP3& coords) const { return getFieldTSC(coords, By, shiftBy); } FP getBzTSC(const FP3& coords) const { return getFieldTSC(coords, Bz, shiftBz); } FP getJxTSC(const FP3& coords) const { return getFieldTSC(coords, Jx, shiftEJx); } FP getJyTSC(const FP3& coords) const { return getFieldTSC(coords, Jy, shiftEJy); } FP getJzTSC(const FP3& coords) const { return getFieldTSC(coords, Jz, shiftEJz); } FP getExSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Ex, shiftEJx); } FP getEySecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Ey, shiftEJy); } FP getEzSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Ez, shiftEJz); } FP getBxSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Bx, shiftBx); } FP getBySecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, By, shiftBy); } FP getBzSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Bz, shiftBz); } FP getJxSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Jx, shiftEJx); } FP getJySecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Jy, shiftEJy); } FP getJzSecondOrder(const FP3& coords) const { return getFieldSecondOrder(coords, Jz, shiftEJz); } FP getExFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Ex, shiftEJx); } FP getEyFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Ey, shiftEJy); } FP getEzFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Ez, shiftEJz); } FP getBxFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Bx, shiftBx); } FP getByFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, By, shiftBy); } FP getBzFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Bz, shiftBz); } FP getJxFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Jx, shiftEJx); } FP getJyFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Jy, shiftEJy); } FP getJzFourthOrder(const FP3& coords) const { return getFieldFourthOrder(coords, Jz, shiftEJz); } FP getExPCS(const FP3& coords) const { return getFieldPCS(coords, Ex, shiftEJx); } FP getEyPCS(const FP3& coords) const { return getFieldPCS(coords, Ey, shiftEJy); } FP getEzPCS(const FP3& coords) const { return getFieldPCS(coords, Ez, shiftEJz); } FP getBxPCS(const FP3& coords) const { return getFieldPCS(coords, Bx, shiftBx); } FP getByPCS(const FP3& coords) const { return getFieldPCS(coords, By, shiftBy); } FP getBzPCS(const FP3& coords) const { return getFieldPCS(coords, Bz, shiftBz); } FP getJxPCS(const FP3& coords) const { return getFieldPCS(coords, Jx, shiftEJx); } FP getJyPCS(const FP3& coords) const { return getFieldPCS(coords, Jy, shiftEJy); } FP getJzPCS(const FP3& coords) const { return getFieldPCS(coords, Jz, shiftEJz); } /*void dumpE(FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx); void dumpB(FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx); void dumpCurrents(FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx); void loadE(const FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx); void loadB(const FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx); void loadCurrents(const FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx);*/ /* Make all current density values zero. */ void zeroizeJ(); const Int3 getNumExternalLeftCells() const { Int3 result(2, 2, 2); for (int d = 0; d < 3; d++) if (globalGridDims[d] == 1) result[d] = 0; return result; } const Int3 getNumExternalRightCells() const { return getNumExternalLeftCells(); } void setInterpolationType(InterpolationType type); InterpolationType getInterpolationType() const; const Int3 globalGridDims; // important to initialize it first const FP3 steps; const FP dt; const Int3 numInternalCells; const Int3 numCells; const FP3 origin; const int dimensionality; // Time diffence between b and e const FP timeShiftE, timeShiftB, timeShiftJ; ScalarField<Data> Ex, Ey, Ez, Bx, By, Bz, Jx, Jy, Jz; private: // 3d shifts of the field in the cell const FP3 shiftEJx, shiftEJy, shiftEJz, shiftBx, shiftBy, shiftBz; /* Get grid index and normalized internal coords in [0, 0, 0]..(1, 1, 1) for given physical coords and shift. */ void getGridCoords(const FP3 & coords, const FP3 & shift, Int3 & idx, FP3 & internalCoords) const { idx.x = (int)((coords.x - origin.x - shift.x) / steps.x); idx.y = (int)((coords.y - origin.y - shift.y) / steps.y); idx.z = (int)((coords.z - origin.z - shift.z) / steps.z); internalCoords = (coords - baseCoords(idx.x, idx.y, idx.z) - shift) / steps; } void getClosestGridCoords(const FP3 & coords, const FP3 & shift, Int3 & idx, FP3 & internalCoords) const { idx.x = (int)((coords.x - origin.x - shift.x) / steps.x + 0.5); idx.y = (int)((coords.y - origin.y - shift.y) / steps.y + 0.5); idx.z = (int)((coords.z - origin.z - shift.z) / steps.z + 0.5); internalCoords = (coords - baseCoords(idx.x, idx.y, idx.z) - shift) / steps; } /* Get base coords of element (i, j, k) so that its real coords are base coords + corresponding shift. */ const FP3 baseCoords(int i, int j, int k) const { return origin + FP3(i, j, k) * steps; } FP getFieldCIC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; FP getFieldTSC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; FP getFieldSecondOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; FP getFieldFourthOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; FP getFieldPCS(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const; InterpolationType interpolationType; void (Grid::*interpolationFields)(const FP3&, FP3&, FP3&) const; FP (Grid::*interpolationEx)(const FP3&) const; FP(Grid::*interpolationEy)(const FP3&) const; FP(Grid::*interpolationEz)(const FP3&) const; FP(Grid::*interpolationBx)(const FP3&) const; FP(Grid::*interpolationBy)(const FP3&) const; FP(Grid::*interpolationBz)(const FP3&) const; FP(Grid::*interpolationJx)(const FP3&) const; FP(Grid::*interpolationJy)(const FP3&) const; FP(Grid::*interpolationJz)(const FP3&) const; }; typedef Grid<FP, GridTypes::YeeGridType> YeeGrid; typedef Grid<FP, GridTypes::StraightGridType> SimpleGrid; typedef Grid<FP, GridTypes::PSTDGridType> PSTDGrid; typedef Grid<FP, GridTypes::PSATDGridType> PSATDGrid; template <> inline Grid<FP, GridTypes::YeeGridType>::Grid(const Int3 & _numCells, FP _dt, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), dt(_dt), numInternalCells(_numCells), numCells(numInternalCells + getNumExternalLeftCells() + getNumExternalRightCells()), Ex(numCells), Ey(numCells), Ez(numCells), Bx(numCells), By(numCells), Bz(numCells), Jx(numCells), Jy(numCells), Jz(numCells), shiftEJx(FP3(0, 0.5, 0.5) * steps), shiftEJy(FP3(0.5, 0, 0.5) * steps), shiftEJz(FP3(0.5, 0.5, 0) * steps), shiftBx(FP3(0.5, 0, 0) * steps), shiftBy(FP3(0, 0.5, 0) * steps), shiftBz(FP3(0, 0, 0.5) * steps), timeShiftE(0.0), timeShiftB(dt/2), timeShiftJ(0.0), origin(minCoords.x - steps.x * getNumExternalLeftCells().x, minCoords.y - steps.y * getNumExternalLeftCells().y, minCoords.z - steps.z * getNumExternalLeftCells().z), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } template<> inline Grid<FP, GridTypes::StraightGridType>::Grid(const Int3 & _numInternalCells, FP _dt, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), dt(_dt), numInternalCells(_numInternalCells), numCells(numInternalCells + getNumExternalLeftCells() + getNumExternalRightCells()), Ex(numCells), Ey(numCells), Ez(numCells), Bx(numCells), By(numCells), Bz(numCells), Jx(numCells), Jy(numCells), Jz(numCells), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), timeShiftE(0.0), timeShiftB(0.0), timeShiftJ(0.0), origin(minCoords.x - steps.x * getNumExternalLeftCells().x, minCoords.y - steps.y * getNumExternalLeftCells().y, minCoords.z - steps.z * getNumExternalLeftCells().z), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } // SPECTRAL GRIDS template<> inline Grid<complexFP, GridTypes::PSTDGridType>::Grid(const Int3 & _numAllCells, FP _dt, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), dt(_dt), numInternalCells(_numAllCells), numCells(numInternalCells), Ex(numCells), Ey(numCells), Ez(numCells), Bx(numCells), By(numCells), Bz(numCells), Jx(numCells), Jy(numCells), Jz(numCells), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), timeShiftE(0.0), timeShiftB(dt / 2), timeShiftJ(dt / 2), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { } template<> inline Grid<FP, GridTypes::PSTDGridType>::Grid(const Int3 & _numInternalCells, FP _dt, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), dt(_dt), numInternalCells(_numInternalCells), numCells(numInternalCells + getNumExternalLeftCells() + getNumExternalRightCells()), Ex(numCells), Ey(numCells), Ez(numCells), Bx(numCells), By(numCells), Bz(numCells), Jx(numCells), Jy(numCells), Jz(numCells), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), timeShiftE(0.0), timeShiftB(dt / 2), timeShiftJ(dt / 2), origin(minCoords.x - steps.x * getNumExternalLeftCells().x, minCoords.y - steps.y * getNumExternalLeftCells().y, minCoords.z - steps.z * getNumExternalLeftCells().z), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } template<> inline Grid<complexFP, GridTypes::PSATDGridType>::Grid(const Int3 & _numInternalCells, FP _dt, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), dt(_dt), numInternalCells(_numInternalCells), numCells(numInternalCells), Ex(numCells), Ey(numCells), Ez(numCells), Bx(numCells), By(numCells), Bz(numCells), Jx(numCells), Jy(numCells), Jz(numCells), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), timeShiftE(0.0), timeShiftB(0.0), timeShiftJ(dt / 2), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { } template<> inline Grid<FP, GridTypes::PSATDGridType>::Grid(const Int3 & _numInternalCells, FP _dt, const FP3 & minCoords, const FP3 & _steps, const Int3 & _globalGridDims) : globalGridDims(_globalGridDims), steps(_steps), dt(_dt), numInternalCells(_numInternalCells), numCells(numInternalCells + getNumExternalLeftCells() + getNumExternalRightCells()), Ex(numCells), Ey(numCells), Ez(numCells), Bx(numCells), By(numCells), Bz(numCells), Jx(numCells), Jy(numCells), Jz(numCells), shiftEJx(FP3(0, 0, 0) * steps), shiftEJy(FP3(0, 0, 0) * steps), shiftEJz(FP3(0, 0, 0) * steps), shiftBx(FP3(0, 0, 0) * steps), shiftBy(FP3(0, 0, 0) * steps), shiftBz(FP3(0, 0, 0) * steps), timeShiftE(0.0), timeShiftB(0.0), timeShiftJ(dt / 2), origin(minCoords.x - steps.x * getNumExternalLeftCells().x, minCoords.y - steps.y * getNumExternalLeftCells().y, minCoords.z - steps.z * getNumExternalLeftCells().z), dimensionality((_globalGridDims.x != 1) + (_globalGridDims.y != 1) + (_globalGridDims.z != 1)) { setInterpolationType(Interpolation_CIC); } // end SPECTRAL GRIDS template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldCIC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getGridCoords(coords, shift, idx, internalCoords); return field.interpolateCIC(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldTSC(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shift, idx, internalCoords); return field.interpolateTSC(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldSecondOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shift, idx, internalCoords); return field.interpolateSecondOrder(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldFourthOrder(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shift, idx, internalCoords); return field.interpolateFourthOrder(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP Grid<Data, gT>::getFieldPCS(const FP3& coords, const ScalarField<Data>& field, const FP3 & shift) const { Int3 idx; FP3 internalCoords; getGridCoords(coords, shift, idx, internalCoords); return field.interpolatePCS(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsCIC(const FP3& coords, FP3 & e, FP3 & b) const { /* For each component of E and B get grid index and internal coords, use it as base index and coefficients of interpolation. */ Int3 idx; FP3 internalCoords; getGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateCIC(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsTSC(const FP3& coords, FP3 & e, FP3 & b) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateTSC(idx, internalCoords); getClosestGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateTSC(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsSecondOrder(const FP3& coords, FP3 & e, FP3 & b) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateSecondOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateSecondOrder(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsFourthOrder(const FP3& coords, FP3 & e, FP3 & b) const { Int3 idx; FP3 internalCoords; getClosestGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateFourthOrder(idx, internalCoords); getClosestGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateFourthOrder(idx, internalCoords); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::getFieldsPCS(const FP3& coords, FP3 & e, FP3 & b) const { Int3 idx; FP3 internalCoords; getGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolatePCS(idx, internalCoords); getGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolatePCS(idx, internalCoords); } template< typename Data, GridTypes gT> inline FP3 Grid<Data, gT>::getJ(const FP3& coords) const { /* For each component of J get grid index and internal coords, use it as base index and coefficients of interpolation. */ Int3 idx; FP3 internalCoords; FP3 j; getGridCoords(coords, shiftEJx, idx, internalCoords); j.x = Jx.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJy, idx, internalCoords); j.y = Jy.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJz, idx, internalCoords); j.z = Jz.interpolateCIC(idx, internalCoords); return j; } template< typename Data, GridTypes gT> inline FP3 Grid<Data, gT>::getE(const FP3& coords) const { /* For each component of J get grid index and internal coords, use it as base index and coefficients of interpolation. */ Int3 idx; FP3 internalCoords; FP3 e; getGridCoords(coords, shiftEJx, idx, internalCoords); e.x = Ex.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJy, idx, internalCoords); e.y = Ey.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftEJz, idx, internalCoords); e.z = Ez.interpolateCIC(idx, internalCoords); return e; } template< typename Data, GridTypes gT> inline FP3 Grid<Data, gT>::getB(const FP3& coords) const { /* For each component of J get grid index and internal coords, use it as base index and coefficients of interpolation. */ Int3 idx; FP3 internalCoords; FP3 b; getGridCoords(coords, shiftBx, idx, internalCoords); b.x = Bx.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBy, idx, internalCoords); b.y = By.interpolateCIC(idx, internalCoords); getGridCoords(coords, shiftBz, idx, internalCoords); b.z = Bz.interpolateCIC(idx, internalCoords); return b; } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::zeroizeJ() { Jx.zeroize(); Jy.zeroize(); Jz.zeroize(); } template< typename Data, GridTypes gT> inline void Grid<Data, gT>::setInterpolationType(InterpolationType type) { interpolationType = type; switch (interpolationType) { case Interpolation_CIC: interpolationFields = &Grid<Data, gT>::getFieldsCIC; interpolationEx = &Grid<Data, gT>::getExCIC; interpolationEy = &Grid<Data, gT>::getEyCIC; interpolationEz = &Grid<Data, gT>::getEzCIC; interpolationBx = &Grid<Data, gT>::getBxCIC; interpolationBy = &Grid<Data, gT>::getByCIC; interpolationBz = &Grid<Data, gT>::getBzCIC; interpolationJx = &Grid<Data, gT>::getJxCIC; interpolationJy = &Grid<Data, gT>::getJyCIC; interpolationJz = &Grid<Data, gT>::getJzCIC; break; case Interpolation_TSC: interpolationFields = &Grid<Data, gT>::getFieldsTSC; interpolationEx = &Grid<Data, gT>::getExTSC; interpolationEy = &Grid<Data, gT>::getEyTSC; interpolationEz = &Grid<Data, gT>::getEzTSC; interpolationBx = &Grid<Data, gT>::getBxTSC; interpolationBy = &Grid<Data, gT>::getByTSC; interpolationBz = &Grid<Data, gT>::getBzTSC; interpolationJx = &Grid<Data, gT>::getJxTSC; interpolationJy = &Grid<Data, gT>::getJyTSC; interpolationJz = &Grid<Data, gT>::getJzTSC; break; case Interpolation_PCS: interpolationFields = &Grid<Data, gT>::getFieldsPCS; interpolationEx = &Grid<Data, gT>::getExPCS; interpolationEy = &Grid<Data, gT>::getEyPCS; interpolationEz = &Grid<Data, gT>::getEzPCS; interpolationBx = &Grid<Data, gT>::getBxPCS; interpolationBy = &Grid<Data, gT>::getByPCS; interpolationBz = &Grid<Data, gT>::getBzPCS; interpolationJx = &Grid<Data, gT>::getJxPCS; interpolationJy = &Grid<Data, gT>::getJyPCS; interpolationJz = &Grid<Data, gT>::getJzPCS; break; case Interpolation_SecondOrder: interpolationFields = &Grid<Data, gT>::getFieldsSecondOrder; interpolationEx = &Grid<Data, gT>::getExSecondOrder; interpolationEy = &Grid<Data, gT>::getEySecondOrder; interpolationEz = &Grid<Data, gT>::getEzSecondOrder; interpolationBx = &Grid<Data, gT>::getBxSecondOrder; interpolationBy = &Grid<Data, gT>::getBySecondOrder; interpolationBz = &Grid<Data, gT>::getBzSecondOrder; interpolationJx = &Grid<Data, gT>::getJxSecondOrder; interpolationJy = &Grid<Data, gT>::getJySecondOrder; interpolationJz = &Grid<Data, gT>::getJzSecondOrder; break; case Interpolation_FourthOrder: interpolationFields = &Grid<Data, gT>::getFieldsFourthOrder; interpolationEx = &Grid<Data, gT>::getExFourthOrder; interpolationEy = &Grid<Data, gT>::getEyFourthOrder; interpolationEz = &Grid<Data, gT>::getEzFourthOrder; interpolationBx = &Grid<Data, gT>::getBxFourthOrder; interpolationBy = &Grid<Data, gT>::getByFourthOrder; interpolationBz = &Grid<Data, gT>::getBzFourthOrder; interpolationJx = &Grid<Data, gT>::getJxFourthOrder; interpolationJy = &Grid<Data, gT>::getJyFourthOrder; interpolationJz = &Grid<Data, gT>::getJzFourthOrder; break; } } template<typename Data, GridTypes gT> inline InterpolationType Grid<Data, gT>::getInterpolationType() const { return interpolationType; } /*template<> inline void Grid<FP, YeeGridType>::dumpB(FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; ++i) for (int j = 0; j < numCells.y; ++j) for (int k = 0; k < numCells.z; ++k) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); b[idx].x = Bx(nodeIdx); b[idx].y = By(nodeIdx); b[idx].z = Bz(nodeIdx); } } template<> inline void Grid<FP, YeeGridType>::dumpE(FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; ++i) for (int j = 0; j < numCells.y; ++j) for (int k = 0; k < numCells.z; ++k) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); e[idx].x = Ex(nodeIdx); e[idx].y = Ey(nodeIdx); e[idx].z = Ez(nodeIdx); } } template<> inline void Grid<FP, YeeGridType>::dumpCurrents(FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; ++i) for (int j = 0; j < numCells.y; ++j) for (int k = 0; k < numCells.z; ++k) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); currents[idx].x = Jx(nodeIdx); currents[idx].y = Jy(nodeIdx); currents[idx].z = Jz(nodeIdx); idx++; } } template<> inline void Grid<FP, YeeGridType>::loadE(const FP3 * e, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; i++) for (int j = 0; j < numCells.y; j++) for (int k = 0; k < numCells.z; k++) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); Ex(nodeIdx) = e[idx].x; Ey(nodeIdx) = e[idx].y; Ez(nodeIdx) = e[idx].z; } } template<> inline void Grid<FP, YeeGridType>::loadB(const FP3 * b, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; ++i) for (int j = 0; j < numCells.y; ++j) for (int k = 0; k < numCells.z; ++k) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); Bx(nodeIdx) = b[idx].x; By(nodeIdx) = b[idx].y; Bz(nodeIdx) = b[idx].z; } } template<> inline void Grid<FP, YeeGridType>::loadCurrents(const FP3 * currents, const Int3 * minCellIdx, const Int3 * maxCellIdx) { Int3 numCells = *maxCellIdx - *minCellIdx; #pragma omp parallel for collapse(3) for (int i = 0; i < numCells.x; i++) for (int j = 0; j < numCells.y; j++) for (int k = 0; k < numCells.z; k++) { int idx = numCells.y * numCells.z * i + numCells.z * j + k; Int3 nodeIdx = *minCellIdx + Int3(i, j, k); Jx(nodeIdx) = currents[idx].x; Jy(nodeIdx) = currents[idx].y; Jz(nodeIdx) = currents[idx].z; } }*/ }
GB_unop__cos_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__cos_fc64_fc64) // op(A') function: GB (_unop_tran__cos_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = ccos (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ccos (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = ccos (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_COS || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__cos_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ccos (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ccos (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__cos_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__one_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__one_fp32_fp32 // op(A') function: GB_tran__one_fp32_fp32 // C type: float // A type: float // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CASTING(z, aij) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__one_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased float *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__one_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
10 - MP+MPI.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <sys/types.h> #include <omp.h> #include </usr/include/openmpi/mpi.h> #define SIZE_GAU 8 #define SIZE_JVL 15 int JAVA_COUNT = SIZE_JVL; int tag = 0; /* void gaules(void* g) { while(1) { //takeJavali(g); sleep(1); } } */ void main(int argc, char *argv[]) { int i; int size; int my_rank; MPI_Status status; char Name[8]; strcpy(Name,"Wel1iton"); MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if(my_rank == 0) //cooker { int Javalis[1] = {14}; int Received[1]; while(1) { MPI_Recv(Received, 1, MPI_INT, 99, tag, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Send(Javalis, 1, MPI_INT, 99, tag, MPI_COMM_WORLD); } } else //table { printf("-----------------[Dinner is Served]--------------\n"); #pragma omp parallel num_threads(SIZE_JVL) //gauleses { int Gaules[1]; Gaules[0] = omp_get_thread_num(); //printf(" + Gaules G[%c] is on the table.\n", gaules[0]); int Rank[1] = {my_rank}; int LuckyJava[1]; #pragma omp critical(section1) { if(JAVA_COUNT == 0) { printf("\n - Gaules [%d] get hungry first n wake up the cooker\n", gaules[0]); MPI_Send(Javalis,1,MPI_INT,0,0,MPI_COMM_WORLD); MPI_Recv(Javalis,1,MPI_INT,0,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE); } else { MPI_Recv(Recebidos,1,MPI_INT,2,0,MPI_COMM_WORLD,MPI_STATUS_IGNORE); printf("Mesa recebeu: %c e esta enviando %d Javalis\n",Nome[Recebidos[0]],Javalis[0]); //printf(" - G[%d] is eating the lucky javali %d n very happy.\n", gaules[0], JAVA_COUNT); MPI_Send(Javalis,1,MPI_INT,2,0,MPI_COMM_WORLD); Javalis[0]--; //JAVA_COUNT--; } } } } printf("-----------------[Dinner is Over]----------------\n"); MPI_Finalize(); }
GB_binop__rdiv_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__rdiv_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fc64) // A*D function (colscale): GB (_AxD__rdiv_fc64) // D*A function (rowscale): GB (_DxB__rdiv_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fc64) // C=scalar+B GB (_bind1st__rdiv_fc64) // C=scalar+B' GB (_bind1st_tran__rdiv_fc64) // C=A+scalar GB (_bind2nd__rdiv_fc64) // C=A'+scalar GB (_bind2nd_tran__rdiv_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // A pattern? 0 // B type: GxB_FC64_t // B pattern? 0 // BinaryOp: cij = GB_FC64_div (bij, aij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_div (y, x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RDIV || GxB_NO_FC64 || GxB_NO_RDIV_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rdiv_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rdiv_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC64_t alpha_scalar ; GxB_FC64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rdiv_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rdiv_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rdiv_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_div (bij, x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rdiv_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_div (y, aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_div (aij, x) ; \ } GrB_Info GB (_bind1st_tran__rdiv_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_div (y, aij) ; \ } GrB_Info GB (_bind2nd_tran__rdiv_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rawMD5_fmt_plug.c
/* * Raw-MD5 (thick) based on Raw-MD4 w/ mmx/sse/intrinsics * This software is Copyright (c) 2011 magnum, and it is hereby released to the * general public under the following terms: Redistribution and use in source * and binary forms, with or without modification, are permitted. * * OMP added May 2013, JimF */ #if FMT_EXTERNS_H extern struct fmt_main fmt_rawMD5; #elif FMT_REGISTERS_H john_register_one(&fmt_rawMD5); #else #include <string.h> #include "arch.h" #include "md5.h" #include "common.h" #include "formats.h" #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP #ifdef MMX_COEF #define OMP_SCALE 1024 #else #define OMP_SCALE 2048 #endif #include <omp.h> #endif #include "sse-intrinsics.h" #include "memdbg.h" #define FORMAT_LABEL "Raw-MD5" #define FORMAT_NAME "" #define ALGORITHM_NAME "MD5 " MD5_ALGORITHM_NAME #ifdef MMX_COEF # define NBKEYS (MMX_COEF * MD5_SSE_PARA) # define DO_MMX_MD5(in, out) SSEmd5body(in, (unsigned int*)out, NULL, SSEi_MIXED_IN) #endif #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #ifndef MD5_BUF_SIZ #define MD5_BUF_SIZ 16 #endif #define CIPHERTEXT_LENGTH 32 #define DIGEST_SIZE 16 #define BINARY_SIZE 16 // source() #define BINARY_ALIGN 4 #define SALT_SIZE 0 #define SALT_ALIGN 1 #define FORMAT_TAG "$dynamic_0$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) static struct fmt_tests tests[] = { {"5a105e8b9d40e1329780d62ea2265d8a", "test1"}, {FORMAT_TAG "5a105e8b9d40e1329780d62ea2265d8a", "test1"}, {"098f6bcd4621d373cade4e832627b4f6", "test"}, {FORMAT_TAG "378e2c4a07968da2eca692320136433d", "thatsworking"}, {FORMAT_TAG "8ad8757baa8564dc136c1e07507f4a98", "test3"}, {"d41d8cd98f00b204e9800998ecf8427e", ""}, #ifdef DEBUG {FORMAT_TAG "c9ccf168914a1bcfc3229f1948e67da0","1234567890123456789012345678901234567890123456789012345"}, #if PLAINTEXT_LENGTH >= 80 {FORMAT_TAG "57edf4a22be3c955ac49da2e2107b67a","12345678901234567890123456789012345678901234567890123456789012345678901234567890"}, #endif #endif {NULL} }; #ifdef MMX_COEF #define PLAINTEXT_LENGTH 55 #define MIN_KEYS_PER_CRYPT NBKEYS #define MAX_KEYS_PER_CRYPT NBKEYS #define GETPOS(i, index) ( (index&(MMX_COEF-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF + ((i)&3) + (index>>(MMX_COEF>>1))*MD5_BUF_SIZ*4*MMX_COEF ) #else #define PLAINTEXT_LENGTH 125 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #endif #ifdef MMX_COEF static ARCH_WORD_32 (*saved_key)[MD5_BUF_SIZ*NBKEYS]; static ARCH_WORD_32 (*crypt_key)[DIGEST_SIZE/4*NBKEYS]; #else static int (*saved_key_length); static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static ARCH_WORD_32 (*crypt_key)[4]; #endif static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT; #endif #ifndef MMX_COEF saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD); #else saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt/NBKEYS, MEM_ALIGN_SIMD); crypt_key = mem_calloc_tiny(sizeof(*crypt_key) * self->params.max_keys_per_crypt/NBKEYS, MEM_ALIGN_SIMD); #endif } static int valid(char *ciphertext, struct fmt_main *self) { char *p, *q; p = ciphertext; if (!strncmp(p, FORMAT_TAG, TAG_LENGTH)) p += TAG_LENGTH; q = p; while (atoi16[ARCH_INDEX(*q)] != 0x7F) { if (*q >= 'A' && *q <= 'F') /* support lowercase only */ return 0; q++; } return !*q && q - p == CIPHERTEXT_LENGTH; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[TAG_LENGTH + CIPHERTEXT_LENGTH + 1]; if (!strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return ciphertext; memcpy(out, FORMAT_TAG, TAG_LENGTH); memcpy(out + TAG_LENGTH, ciphertext, CIPHERTEXT_LENGTH + 1); return out; } static void *binary(char *ciphertext) { static unsigned char *out; char *p; int i; if (!out) out = mem_alloc_tiny(DIGEST_SIZE, MEM_ALIGN_WORD); p = ciphertext + TAG_LENGTH; for (i = 0; i < DIGEST_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } #ifdef MMX_COEF #define HASH_OFFSET (index&(MMX_COEF-1))+((index%NBKEYS)/MMX_COEF)*MMX_COEF*4 static int get_hash_0(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xf; } static int get_hash_1(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xff; } static int get_hash_2(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xfff; } static int get_hash_3(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xffff; } static int get_hash_4(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xfffff; } static int get_hash_5(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0xffffff; } static int get_hash_6(int index) { return crypt_key[index/NBKEYS][HASH_OFFSET] & 0x7ffffff; } #else static int get_hash_0(int index) { return crypt_key[index][0] & 0xf; } static int get_hash_1(int index) { return crypt_key[index][0] & 0xff; } static int get_hash_2(int index) { return crypt_key[index][0] & 0xfff; } static int get_hash_3(int index) { return crypt_key[index][0] & 0xffff; } static int get_hash_4(int index) { return crypt_key[index][0] & 0xfffff; } static int get_hash_5(int index) { return crypt_key[index][0] & 0xffffff; } static int get_hash_6(int index) { return crypt_key[index][0] & 0x7ffffff; } #endif #ifdef MMX_COEF static void set_key(char *_key, int index) { const ARCH_WORD_32 *key = (ARCH_WORD_32*)_key; ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32*)saved_key)[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*MD5_BUF_SIZ*MMX_COEF]; ARCH_WORD_32 *keybuf_word = keybuffer; unsigned int len; ARCH_WORD_32 temp; len = 0; while((temp = *key++) & 0xff) { if (!(temp & 0xff00)) { *keybuf_word = (temp & 0xff) | (0x80 << 8); len++; goto key_cleaning; } if (!(temp & 0xff0000)) { *keybuf_word = (temp & 0xffff) | (0x80 << 16); len+=2; goto key_cleaning; } if (!(temp & 0xff000000)) { *keybuf_word = temp | (0x80 << 24); len+=3; goto key_cleaning; } *keybuf_word = temp; len += 4; keybuf_word += MMX_COEF; } *keybuf_word = 0x80; #ifdef DEBUG /* This function is higly optimized and assumes that we are never ever given a key longer than fmt_params.plaintext_length. If we are, buffer overflows WILL happen */ if (len > PLAINTEXT_LENGTH) { fprintf(stderr, "\n** Core bug: got len %u\n'%s'\n", len, _key); error(); } #endif key_cleaning: keybuf_word += MMX_COEF; while(*keybuf_word) { *keybuf_word = 0; keybuf_word += MMX_COEF; } keybuffer[14*MMX_COEF] = len << 3; } #else static void set_key(char *key, int index) { int len = strlen(key); saved_key_length[index] = len; memcpy(saved_key[index], key, len); } #endif #ifdef MMX_COEF static char *get_key(int index) { static char out[PLAINTEXT_LENGTH + 1]; unsigned int i; ARCH_WORD_32 len = ((ARCH_WORD_32*)saved_key)[14*MMX_COEF + (index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*MD5_BUF_SIZ*MMX_COEF] >> 3; for(i=0;i<len;i++) out[i] = ((char*)saved_key)[GETPOS(i, index)]; out[i] = 0; return (char*)out; } #else static char *get_key(int index) { saved_key[index][saved_key_length[index]] = 0; return saved_key[index]; } #endif static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int index = 0; #ifdef _OPENMP int loops = (count + MAX_KEYS_PER_CRYPT - 1) / MAX_KEYS_PER_CRYPT; #pragma omp parallel for for (index = 0; index < loops; index++) #endif { #if MMX_COEF DO_MMX_MD5(saved_key[index], crypt_key[index]); #else MD5_CTX ctx; MD5_Init(&ctx); MD5_Update(&ctx, saved_key[index], saved_key_length[index]); MD5_Final((unsigned char *)crypt_key[index], &ctx); #endif } return count; } static int cmp_all(void *binary, int count) { int index; for (index = 0; index < count; index++) #ifdef MMX_COEF if (((ARCH_WORD_32 *) binary)[0] == ((ARCH_WORD_32*)crypt_key)[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*4*MMX_COEF]) #else if ( ((ARCH_WORD_32*)binary)[0] == crypt_key[index][0] ) #endif return 1; return 0; } static int cmp_one(void *binary, int index) { #ifdef MMX_COEF int i; for (i = 0; i < BINARY_SIZE/sizeof(ARCH_WORD_32); i++) if (((ARCH_WORD_32 *) binary)[i] != ((ARCH_WORD_32*)crypt_key)[(index&(MMX_COEF-1)) + (index>>(MMX_COEF>>1))*4*MMX_COEF+i*MMX_COEF]) return 0; return 1; #else return !memcmp(binary, crypt_key[index], BINARY_SIZE); #endif } static int cmp_exact(char *source, int index) { return 1; } static char *source(char *source, void *binary) { static char Buf[CIPHERTEXT_LENGTH + TAG_LENGTH + 1]; unsigned char *cpi; char *cpo; int i; strcpy(Buf, FORMAT_TAG); cpo = &Buf[TAG_LENGTH]; cpi = (unsigned char*)(binary); for (i = 0; i < BINARY_SIZE; ++i) { *cpo++ = itoa16[(*cpi)>>4]; *cpo++ = itoa16[*cpi&0xF]; ++cpi; } *cpo = 0; return Buf; } struct fmt_main fmt_rawMD5 = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT, #if FMT_MAIN_VERSION > 11 { NULL }, #endif tests }, { init, fmt_default_done, fmt_default_reset, fmt_default_prepare, valid, split, binary, fmt_default_salt, #if FMT_MAIN_VERSION > 11 { NULL }, #endif source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
blas_server_omp.c
/*********************************************************************/ /* Copyright 2009, 2010 The University of Texas at Austin. */ /* All rights reserved. */ /* */ /* Redistribution and use in source and binary forms, with or */ /* without modification, are permitted provided that the following */ /* conditions are met: */ /* */ /* 1. Redistributions of source code must retain the above */ /* copyright notice, this list of conditions and the following */ /* disclaimer. */ /* */ /* 2. Redistributions in binary form must reproduce the above */ /* copyright notice, this list of conditions and the following */ /* disclaimer in the documentation and/or other materials */ /* provided with the distribution. */ /* */ /* THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY OF TEXAS AT */ /* AUSTIN ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, */ /* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ /* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ /* DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF TEXAS AT */ /* AUSTIN OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES */ /* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE */ /* GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR */ /* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF */ /* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT */ /* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT */ /* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ /* */ /* The views and conclusions contained in the software and */ /* documentation are those of the authors and should not be */ /* interpreted as representing official policies, either expressed */ /* or implied, of The University of Texas at Austin. */ /*********************************************************************/ #include <stdbool.h> #include <stdio.h> #include <stdlib.h> //#include <sys/mman.h> #include "common.h" #ifndef USE_OPENMP #include "blas_server.c" #else #ifndef OMP_SCHED #define OMP_SCHED static #endif int blas_server_avail = 0; static void * blas_thread_buffer[MAX_PARALLEL_NUMBER][MAX_CPU_NUMBER]; #if __STDC_VERSION__ >= 201112L static atomic_bool blas_buffer_inuse[MAX_PARALLEL_NUMBER]; #else static _Bool blas_buffer_inuse[MAX_PARALLEL_NUMBER]; #endif void goto_set_num_threads(int num_threads) { int i=0, j=0; if (num_threads < 1) num_threads = blas_num_threads; if (num_threads > MAX_CPU_NUMBER) num_threads = MAX_CPU_NUMBER; if (num_threads > blas_num_threads) { blas_num_threads = num_threads; } blas_cpu_number = num_threads; omp_set_num_threads(blas_cpu_number); //adjust buffer for each thread for(i=0; i<MAX_PARALLEL_NUMBER; i++) { for(j=0; j<blas_cpu_number; j++){ if(blas_thread_buffer[i][j]==NULL){ blas_thread_buffer[i][j]=blas_memory_alloc(2); } } for(; j<MAX_CPU_NUMBER; j++){ if(blas_thread_buffer[i][j]!=NULL){ blas_memory_free(blas_thread_buffer[i][j]); blas_thread_buffer[i][j]=NULL; } } } #if defined(ARCH_MIPS64) //set parameters for different number of threads. blas_set_parameter(); #endif } void openblas_set_num_threads(int num_threads) { goto_set_num_threads(num_threads); } int blas_thread_init(void){ int i=0, j=0; blas_get_cpu_number(); blas_server_avail = 1; for(i=0; i<MAX_PARALLEL_NUMBER; i++) { for(j=0; j<blas_num_threads; j++){ blas_thread_buffer[i][j]=blas_memory_alloc(2); } for(; j<MAX_CPU_NUMBER; j++){ blas_thread_buffer[i][j]=NULL; } } return 0; } int BLASFUNC(blas_thread_shutdown)(void){ int i=0, j=0; blas_server_avail = 0; for(i=0; i<MAX_PARALLEL_NUMBER; i++) { for(j=0; j<MAX_CPU_NUMBER; j++){ if(blas_thread_buffer[i][j]!=NULL){ blas_memory_free(blas_thread_buffer[i][j]); blas_thread_buffer[i][j]=NULL; } } } return 0; } static void legacy_exec(void *func, int mode, blas_arg_t *args, void *sb){ if (!(mode & BLAS_COMPLEX)){ #ifdef EXPRECISION if (mode & BLAS_XDOUBLE){ /* REAL / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((xdouble *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else #endif if (mode & BLAS_DOUBLE){ /* REAL / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((double *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else { /* REAL / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((float *)args -> alpha)[0], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } } else { #ifdef EXPRECISION if (mode & BLAS_XDOUBLE){ /* COMPLEX / Extended Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, xdouble, xdouble, xdouble *, BLASLONG, xdouble *, BLASLONG, xdouble *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((xdouble *)args -> alpha)[0], ((xdouble *)args -> alpha)[1], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else #endif if (mode & BLAS_DOUBLE){ /* COMPLEX / Double */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, double, double, double *, BLASLONG, double *, BLASLONG, double *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((double *)args -> alpha)[0], ((double *)args -> alpha)[1], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } else { /* COMPLEX / Single */ void (*afunc)(BLASLONG, BLASLONG, BLASLONG, float, float, float *, BLASLONG, float *, BLASLONG, float *, BLASLONG, void *) = func; afunc(args -> m, args -> n, args -> k, ((float *)args -> alpha)[0], ((float *)args -> alpha)[1], args -> a, args -> lda, args -> b, args -> ldb, args -> c, args -> ldc, sb); } } } static void exec_threads(blas_queue_t *queue, int buf_index){ void *buffer, *sa, *sb; int pos=0, release_flag=0; buffer = NULL; sa = queue -> sa; sb = queue -> sb; #ifdef CONSISTENT_FPCSR __asm__ __volatile__ ("ldmxcsr %0" : : "m" (queue -> sse_mode)); __asm__ __volatile__ ("fldcw %0" : : "m" (queue -> x87_mode)); #endif if ((sa == NULL) && (sb == NULL) && ((queue -> mode & BLAS_PTHREAD) == 0)) { pos = omp_get_thread_num(); buffer = blas_thread_buffer[buf_index][pos]; //fallback if(buffer==NULL) { buffer = blas_memory_alloc(2); release_flag=1; } if (sa == NULL) { sa = (void *)((BLASLONG)buffer + GEMM_OFFSET_A); queue->sa=sa; } if (sb == NULL) { if (!(queue -> mode & BLAS_COMPLEX)){ #ifdef EXPRECISION if (queue -> mode & BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((QGEMM_P * QGEMM_Q * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif if (queue -> mode & BLAS_DOUBLE){ sb = (void *)(((BLASLONG)sa + ((DGEMM_P * DGEMM_Q * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else { sb = (void *)(((BLASLONG)sa + ((SGEMM_P * SGEMM_Q * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } } else { #ifdef EXPRECISION if (queue -> mode & BLAS_XDOUBLE){ sb = (void *)(((BLASLONG)sa + ((XGEMM_P * XGEMM_Q * 2 * sizeof(xdouble) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else #endif if (queue -> mode & BLAS_DOUBLE){ sb = (void *)(((BLASLONG)sa + ((ZGEMM_P * ZGEMM_Q * 2 * sizeof(double) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } else { sb = (void *)(((BLASLONG)sa + ((CGEMM_P * CGEMM_Q * 2 * sizeof(float) + GEMM_ALIGN) & ~GEMM_ALIGN)) + GEMM_OFFSET_B); } } queue->sb=sb; } } if (queue -> mode & BLAS_LEGACY) { legacy_exec(queue -> routine, queue -> mode, queue -> args, sb); } else if (queue -> mode & BLAS_PTHREAD) { void (*pthreadcompat)(void *) = queue -> routine; (pthreadcompat)(queue -> args); } else { int (*routine)(blas_arg_t *, void *, void *, void *, void *, BLASLONG) = queue -> routine; (routine)(queue -> args, queue -> range_m, queue -> range_n, sa, sb, queue -> position); } if (release_flag) blas_memory_free(buffer); } int exec_blas(BLASLONG num, blas_queue_t *queue){ BLASLONG i, buf_index; if ((num <= 0) || (queue == NULL)) return 0; #ifdef CONSISTENT_FPCSR for (i = 0; i < num; i ++) { __asm__ __volatile__ ("fnstcw %0" : "=m" (queue[i].x87_mode)); __asm__ __volatile__ ("stmxcsr %0" : "=m" (queue[i].sse_mode)); } #endif while(true) { for(i=0; i < MAX_PARALLEL_NUMBER; i++) { #if __STDC_VERSION__ >= 201112L _Bool inuse = false; if(atomic_compare_exchange_weak(&blas_buffer_inuse[i], &inuse, true)) { #else if(blas_buffer_inuse[i] == false) { blas_buffer_inuse[i] = true; #endif buf_index = i; break; } } if(i != MAX_PARALLEL_NUMBER) break; } #pragma omp parallel for schedule(OMP_SCHED) for (i = 0; i < num; i ++) { #ifndef USE_SIMPLE_THREADED_LEVEL3 queue[i].position = i; #endif exec_threads(&queue[i], buf_index); } #if __STDC_VERSION__ >= 201112L atomic_store(&blas_buffer_inuse[buf_index], false); #else blas_buffer_inuse[buf_index] = false; #endif return 0; } #endif
ast-dump-openmp-begin-declare-variant_decl_1.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s | FileCheck %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify -ast-dump %s -x c++| FileCheck %s // expected-no-diagnostics // FIXME: We have to improve the warnings here as nothing is impacted by the declare variant. int also_before(void) { return 0; } #pragma omp begin declare variant match(device={kind(cpu)}) int also_before(void); #pragma omp end declare variant #pragma omp begin declare variant match(implementation={vendor(score(100):llvm)}) int also_after(void); #pragma omp end declare variant #pragma omp begin declare variant match(implementation={vendor(score(0):llvm)}) int also_before(void); #pragma omp end declare variant int also_after(void) { return 0; } int test(void) { // Should return 0. return also_after() + also_before(); } // Make sure: // - we do see the ast nodes for the cpu kind // - we do see the ast nodes for the llvm vendor // - we pick the right callees // CHECK: |-FunctionDecl [[ADDR_0:0x[a-z0-9]*]] <{{.*}}, line:7:1> line:5:5 used also_before 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_1:0x[a-z0-9]*]] <col:23, line:7:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_2:0x[a-z0-9]*]] <line:6:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_3:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: |-FunctionDecl [[ADDR_4:0x[a-z0-9]*]] prev [[ADDR_0]] <line:10:1, col:21> col:5 used also_before 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_5:0x[a-z0-9]*]] <line:13:1, col:20> col:5 used also_after 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_6:0x[a-z0-9]*]] prev [[ADDR_4]] <line:16:1, col:21> col:5 used also_before 'int ({{.*}})' // CHECK-NEXT: |-FunctionDecl [[ADDR_7:0x[a-z0-9]*]] prev [[ADDR_5]] <line:19:1, line:21:1> line:19:5 used also_after 'int ({{.*}})' // CHECK-NEXT: | `-CompoundStmt [[ADDR_8:0x[a-z0-9]*]] <col:22, line:21:1> // CHECK-NEXT: | `-ReturnStmt [[ADDR_9:0x[a-z0-9]*]] <line:20:3, col:10> // CHECK-NEXT: | `-IntegerLiteral [[ADDR_10:0x[a-z0-9]*]] <col:10> 'int' 0 // CHECK-NEXT: `-FunctionDecl [[ADDR_11:0x[a-z0-9]*]] <line:23:1, line:26:1> line:23:5 test 'int ({{.*}})' // CHECK-NEXT: `-CompoundStmt [[ADDR_12:0x[a-z0-9]*]] <col:16, line:26:1> // CHECK-NEXT: `-ReturnStmt [[ADDR_13:0x[a-z0-9]*]] <line:25:3, col:37> // CHECK-NEXT: `-BinaryOperator [[ADDR_14:0x[a-z0-9]*]] <col:10, col:37> 'int' '+' // CHECK-NEXT: |-CallExpr [[ADDR_15:0x[a-z0-9]*]] <col:10, col:21> 'int' // CHECK-NEXT: | `-ImplicitCastExpr [[ADDR_16:0x[a-z0-9]*]] <col:10> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: | `-DeclRefExpr [[ADDR_17:0x[a-z0-9]*]] <col:10> 'int ({{.*}})' {{.*}}Function [[ADDR_7]] 'also_after' 'int ({{.*}})' // CHECK-NEXT: `-CallExpr [[ADDR_18:0x[a-z0-9]*]] <col:25, col:37> 'int' // CHECK-NEXT: `-ImplicitCastExpr [[ADDR_19:0x[a-z0-9]*]] <col:25> 'int (*)({{.*}})' <FunctionToPointerDecay> // CHECK-NEXT: `-DeclRefExpr [[ADDR_20:0x[a-z0-9]*]] <col:25> 'int ({{.*}})' {{.*}}Function [[ADDR_6]] 'also_before' 'int ({{.*}})'
pr96867.c
/* PR c++/96867 */ int *v; void foo (int x) { #pragma omp target update to (x, v[:]) /* { dg-error "for pointer type length expression must be specified" } */ }
LAGraph_Sort3.c
//------------------------------------------------------------------------------ // LAGraph_Sort3: sort a 3-by-n list of integers, using A[0:2][ ] as the key //------------------------------------------------------------------------------ // LAGraph, (c) 2021 by The LAGraph Contributors, All Rights Reserved. // SPDX-License-Identifier: BSD-2-Clause // Contributed by Tim Davis, Texas A&M University. //------------------------------------------------------------------------------ // A parallel mergesort of an array of 3-by-n integers. Each key // consists of three integers. #define LAGraph_FREE_ALL LAGraph_Free ((void **) &W) ; #include "LG_internal.h" //------------------------------------------------------------------------------ // prototype only needed for LAGraph_Sort3 //------------------------------------------------------------------------------ void LG_msort_3b_create_merge_tasks ( // output: int64_t *LG_RESTRICT L_task, // L_task [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT L_len, // L_len [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT R_task, // R_task [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT R_len, // R_len [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT S_task, // S_task [t0...t0+ntasks-1] computed // input: const int t0, // first task tid to create const int ntasks, // # of tasks to create const int64_t pS_start, // merge into S [pS_start...] const int64_t *LG_RESTRICT L_0, // Left = L [pL_start...pL_end-1] const int64_t *LG_RESTRICT L_1, const int64_t *LG_RESTRICT L_2, const int64_t pL_start, const int64_t pL_end, const int64_t *LG_RESTRICT R_0, // Right = R [pR_start...pR_end-1] const int64_t *LG_RESTRICT R_1, const int64_t *LG_RESTRICT R_2, const int64_t pR_start, const int64_t pR_end ) ; //------------------------------------------------------------------------------ // LG_msort_3b_binary_search: binary search for the pivot //------------------------------------------------------------------------------ // The Pivot value is Y [pivot], and a binary search for the Pivot is made in // the array X [p_pstart...p_end-1], which is sorted in non-decreasing order on // input. The return value is pleft, where // // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. // // pleft is returned in the range p_start to p_end. If pleft is p_start, then // the Pivot is smaller than all entries in X [p_start...p_end-1], and the left // list X [p_start...pleft-1] is empty. If pleft is p_end, then the Pivot is // larger than all entries in X [p_start...p_end-1], and the right list X // [pleft...p_end-1] is empty. static int64_t LG_msort_3b_binary_search // return pleft ( const int64_t *LG_RESTRICT Y_0, // Pivot is Y [pivot] const int64_t *LG_RESTRICT Y_1, const int64_t *LG_RESTRICT Y_2, const int64_t pivot, const int64_t *LG_RESTRICT X_0, // search in X [p_start..p_end_-1] const int64_t *LG_RESTRICT X_1, const int64_t *LG_RESTRICT X_2, const int64_t p_start, const int64_t p_end ) { //-------------------------------------------------------------------------- // find where the Pivot appears in X //-------------------------------------------------------------------------- // binary search of X [p_start...p_end-1] for the Pivot int64_t pleft = p_start ; int64_t pright = p_end - 1 ; while (pleft < pright) { int64_t pmiddle = (pleft + pright) >> 1 ; // less = (X [pmiddle] < Pivot) bool less = LG_lt_3 (X_0, X_1, X_2, pmiddle, Y_0, Y_1, Y_2, pivot) ; pleft = less ? (pmiddle+1) : pleft ; pright = less ? pright : pmiddle ; } // binary search is narrowed down to a single item // or it has found the list is empty: ASSERT (pleft == pright || pleft == pright + 1) ; // If found is true then X [pleft == pright] == Pivot. If duplicates // appear then X [pleft] is any one of the entries equal to the Pivot // in the list. If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft+1 ... p_end-1] > Pivot holds. // The value X [pleft] may be either < or > Pivot. bool found = (pleft == pright) && LG_eq_3 (X_0, X_1, X_2, pleft, Y_0, Y_1, Y_2, pivot) ; // Modify pleft and pright: if (!found && (pleft == pright)) { if (LG_lt_3 (X_0, X_1, X_2, pleft, Y_0, Y_1, Y_2, pivot)) { pleft++ ; } else { // pright++ ; // (not needed) } } //-------------------------------------------------------------------------- // return result //-------------------------------------------------------------------------- // If found is false then // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] > Pivot holds, // and pleft-1 == pright // If X has no duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] < Pivot and // X [pleft ... p_end-1] >= Pivot holds. // If X has duplicates, then whether or not Pivot is found, // X [p_start ... pleft-1] <= Pivot and // X [pleft ... p_end-1] >= Pivot holds. return (pleft) ; } //------------------------------------------------------------------------------ // LG_msort_3b_create_merge_tasks //------------------------------------------------------------------------------ // Recursively constructs ntasks tasks to merge two arrays, Left and Right, // into Sresult, where Left is L [pL_start...pL_end-1], Right is R // [pR_start...pR_end-1], and Sresult is S [pS_start...pS_start+total_work-1], // and where total_work is the total size of Left and Right. // // Task tid will merge L [L_task [tid] ... L_task [tid] + L_len [tid] - 1] and // R [R_task [tid] ... R_task [tid] + R_len [tid] -1] into the merged output // array S [S_task [tid] ... ]. The task tids created are t0 to // t0+ntasks-1. void LG_msort_3b_create_merge_tasks ( // output: int64_t *LG_RESTRICT L_task, // L_task [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT L_len, // L_len [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT R_task, // R_task [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT R_len, // R_len [t0...t0+ntasks-1] computed int64_t *LG_RESTRICT S_task, // S_task [t0...t0+ntasks-1] computed // input: const int t0, // first task tid to create const int ntasks, // # of tasks to create const int64_t pS_start, // merge into S [pS_start...] const int64_t *LG_RESTRICT L_0, // Left = L [pL_start...pL_end-1] const int64_t *LG_RESTRICT L_1, const int64_t *LG_RESTRICT L_2, const int64_t pL_start, const int64_t pL_end, const int64_t *LG_RESTRICT R_0, // Right = R [pR_start...pR_end-1] const int64_t *LG_RESTRICT R_1, const int64_t *LG_RESTRICT R_2, const int64_t pR_start, const int64_t pR_end ) { //-------------------------------------------------------------------------- // get problem size //-------------------------------------------------------------------------- int64_t nleft = pL_end - pL_start ; // size of Left array int64_t nright = pR_end - pR_start ; // size of Right array int64_t total_work = nleft + nright ; // total work to do ASSERT (ntasks >= 1) ; ASSERT (total_work > 0) ; //-------------------------------------------------------------------------- // create the tasks //-------------------------------------------------------------------------- if (ntasks == 1) { //---------------------------------------------------------------------- // a single task will merge all of Left and Right into Sresult //---------------------------------------------------------------------- L_task [t0] = pL_start ; L_len [t0] = nleft ; R_task [t0] = pR_start ; R_len [t0] = nright ; S_task [t0] = pS_start ; } else { //---------------------------------------------------------------------- // partition the Left and Right arrays for multiple merge tasks //---------------------------------------------------------------------- int64_t pleft, pright ; if (nleft >= nright) { // split Left in half, and search for its pivot in Right pleft = (pL_end + pL_start) >> 1 ; pright = LG_msort_3b_binary_search ( L_0, L_1, L_2, pleft, R_0, R_1, R_2, pR_start, pR_end) ; } else { // split Right in half, and search for its pivot in Left pright = (pR_end + pR_start) >> 1 ; pleft = LG_msort_3b_binary_search ( R_0, R_1, R_2, pright, L_0, L_1, L_2, pL_start, pL_end) ; } //---------------------------------------------------------------------- // partition the tasks according to the work of each partition //---------------------------------------------------------------------- // work0 is the total work in the first partition int64_t work0 = (pleft - pL_start) + (pright - pR_start) ; int ntasks0 = (int) round ((double) ntasks * (((double) work0) / ((double) total_work))) ; // ensure at least one task is assigned to each partition ntasks0 = LAGraph_MAX (ntasks0, 1) ; ntasks0 = LAGraph_MIN (ntasks0, ntasks-1) ; int ntasks1 = ntasks - ntasks0 ; //---------------------------------------------------------------------- // assign ntasks0 to the first half //---------------------------------------------------------------------- // ntasks0 tasks merge L [pL_start...pleft-1] and R [pR_start..pright-1] // into the result S [pS_start...work0-1]. LG_msort_3b_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, t0, ntasks0, pS_start, L_0, L_1, L_2, pL_start, pleft, R_0, R_1, R_2, pR_start, pright) ; //---------------------------------------------------------------------- // assign ntasks1 to the second half //---------------------------------------------------------------------- // ntasks1 tasks merge L [pleft...pL_end-1] and R [pright...pR_end-1] // into the result S [pS_start+work0...pS_start+total_work]. int t1 = t0 + ntasks0 ; // first task id of the second set of tasks int64_t pS_start1 = pS_start + work0 ; // 2nd set starts here in S LG_msort_3b_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, t1, ntasks1, pS_start1, L_0, L_1, L_2, pleft, pL_end, R_0, R_1, R_2, pright, pR_end) ; } } //------------------------------------------------------------------------------ // LG_msort_3b_merge: merge two sorted lists via a single thread //------------------------------------------------------------------------------ // merge Left [0..nleft-1] and Right [0..nright-1] into S [0..nleft+nright-1] */ static void LG_msort_3b_merge ( int64_t *LG_RESTRICT S_0, // output of length nleft + nright int64_t *LG_RESTRICT S_1, int64_t *LG_RESTRICT S_2, const int64_t *LG_RESTRICT Left_0, // left input of length nleft const int64_t *LG_RESTRICT Left_1, const int64_t *LG_RESTRICT Left_2, const int64_t nleft, const int64_t *LG_RESTRICT Right_0, // right input of length nright const int64_t *LG_RESTRICT Right_1, const int64_t *LG_RESTRICT Right_2, const int64_t nright ) { int64_t p, pleft, pright ; // merge the two inputs, Left and Right, while both inputs exist for (p = 0, pleft = 0, pright = 0 ; pleft < nleft && pright < nright ; p++) { if (LG_lt_3 (Left_0, Left_1, Left_2, pleft, Right_0, Right_1, Right_2, pright)) { // S [p] = Left [pleft++] S_0 [p] = Left_0 [pleft] ; S_1 [p] = Left_1 [pleft] ; S_2 [p] = Left_2 [pleft] ; pleft++ ; } else { // S [p] = Right [pright++] S_0 [p] = Right_0 [pright] ; S_1 [p] = Right_1 [pright] ; S_2 [p] = Right_2 [pright] ; pright++ ; } } // either input is exhausted; copy the remaining list into S if (pleft < nleft) { int64_t nremaining = (nleft - pleft) ; memcpy (S_0 + p, Left_0 + pleft, nremaining * sizeof (int64_t)) ; memcpy (S_1 + p, Left_1 + pleft, nremaining * sizeof (int64_t)) ; memcpy (S_2 + p, Left_2 + pleft, nremaining * sizeof (int64_t)) ; } else if (pright < nright) { int64_t nremaining = (nright - pright) ; memcpy (S_0 + p, Right_0 + pright, nremaining * sizeof (int64_t)) ; memcpy (S_1 + p, Right_1 + pright, nremaining * sizeof (int64_t)) ; memcpy (S_2 + p, Right_2 + pright, nremaining * sizeof (int64_t)) ; } } //------------------------------------------------------------------------------ // LAGraph_Sort3: parallel mergesort //------------------------------------------------------------------------------ int LAGraph_Sort3 // sort array A of size 3-by-n, using 3 keys (A [0:2][]) ( int64_t *LG_RESTRICT A_0, // size n array int64_t *LG_RESTRICT A_1, // size n array int64_t *LG_RESTRICT A_2, // size n array const int64_t n, int nthreads, // # of threads to use char *msg ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- LG_CLEAR_MSG ; int64_t *LG_RESTRICT W = NULL ; LG_CHECK (A_0 == NULL, -1, "A_0 is NULL") ; LG_CHECK (A_1 == NULL, -1, "A_1 is NULL") ; LG_CHECK (A_2 == NULL, -1, "A_2 is NULL") ; //-------------------------------------------------------------------------- // handle small problems with a single thread //-------------------------------------------------------------------------- if (nthreads <= 1 || n <= LG_BASECASE) { // sequential quicksort LG_qsort_3 (A_0, A_1, A_2, n) ; return (0) ; } //-------------------------------------------------------------------------- // determine # of tasks //-------------------------------------------------------------------------- // determine the number of levels to create, which must always be an // even number. The # of levels is chosen to ensure that the # of leaves // of the task tree is between 4*nthreads and 16*nthreads. // 2 to 4 threads: 4 levels, 16 qsort leaves // 5 to 16 threads: 6 levels, 64 qsort leaves // 17 to 64 threads: 8 levels, 256 qsort leaves // 65 to 256 threads: 10 levels, 1024 qsort leaves // 256 to 1024 threads: 12 levels, 4096 qsort leaves // ... int k = (int) (2 + 2 * ceil (log2 ((double) nthreads) / 2)) ; int ntasks = 1 << k ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- W = LAGraph_Malloc (3*n + 6*ntasks + 1, sizeof (int64_t)) ; LG_CHECK (W == NULL, -1, "out of memory") ; int64_t *T = W ; int64_t *LG_RESTRICT W_0 = T ; T += n ; int64_t *LG_RESTRICT W_1 = T ; T += n ; int64_t *LG_RESTRICT W_2 = T ; T += n ; int64_t *LG_RESTRICT L_task = T ; T += ntasks ; int64_t *LG_RESTRICT L_len = T ; T += ntasks ; int64_t *LG_RESTRICT R_task = T ; T += ntasks ; int64_t *LG_RESTRICT R_len = T ; T += ntasks ; int64_t *LG_RESTRICT S_task = T ; T += ntasks ; int64_t *LG_RESTRICT Slice = T ; T += (ntasks+1) ; //-------------------------------------------------------------------------- // partition and sort the leaves //-------------------------------------------------------------------------- LG_eslice (Slice, n, ntasks) ; int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { int64_t leaf = Slice [tid] ; int64_t leafsize = Slice [tid+1] - leaf ; LG_qsort_3 (A_0 + leaf, A_1 + leaf, A_2 + leaf, leafsize) ; } //-------------------------------------------------------------------------- // merge each level //-------------------------------------------------------------------------- int nt = 1 ; for ( ; k >= 2 ; k -= 2) { //---------------------------------------------------------------------- // merge level k into level k-1, from A into W //---------------------------------------------------------------------- // this could be done in parallel if ntasks was large for (int tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two A sublists into one W sublist LG_msort_3b_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], A_0, A_1, A_2, Slice [tid], Slice [tid+nt], A_0, A_1, A_2, Slice [tid+nt], Slice [tid+2*nt]) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; LG_msort_3b_merge ( W_0 + pS, W_1 + pS, W_2 + pS, A_0 + pL, A_1 + pL, A_2 + pL, nL, A_0 + pR, A_1 + pR, A_2 + pR, nR) ; } nt = 2*nt ; //---------------------------------------------------------------------- // merge level k-1 into level k-2, from W into A //---------------------------------------------------------------------- // this could be done in parallel if ntasks was large for (int tid = 0 ; tid < ntasks ; tid += 2*nt) { // create 2*nt tasks to merge two W sublists into one A sublist LG_msort_3b_create_merge_tasks ( L_task, L_len, R_task, R_len, S_task, tid, 2*nt, Slice [tid], W_0, W_1, W_2, Slice [tid], Slice [tid+nt], W_0, W_1, W_2, Slice [tid+nt], Slice [tid+2*nt]) ; } #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (tid = 0 ; tid < ntasks ; tid++) { // merge A [pL...pL+nL-1] and A [pR...pR+nR-1] into W [pS..] int64_t pL = L_task [tid], nL = L_len [tid] ; int64_t pR = R_task [tid], nR = R_len [tid] ; int64_t pS = S_task [tid] ; LG_msort_3b_merge ( A_0 + pS, A_1 + pS, A_2 + pS, W_0 + pL, W_1 + pL, W_2 + pL, nL, W_0 + pR, W_1 + pR, W_2 + pR, nR) ; } nt = 2*nt ; } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- LAGraph_FREE_ALL ; return (0) ; }
splibmp.c
/* * Copyright (c) 2017 ltlollo * Licensed under the MIT license <LICENSE-MIT or * http://opensource.org/licenses/MIT>. This file may not be copied, * modified, or distributed except according to those terms. */ #include <stdlib.h> #include <unistd.h> #include <stdio.h> #include <string.h> #include <err.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <pthread.h> #include <string.h> #include "ebutil.h" #include "splib.h" #define UNLIKELY(x) __builtin_expect(!!(x), 0) #define LIKELY(x) __builtin_expect(!!(x), 1) #define CXLEN(sarr) (sizeof((sarr)) - sizeof((sarr[0]))) typedef struct { Info comm; FILE *rnd; FILE *input; u8 obuf[BUFSIZE]; u8 rbuf[BUFSIZE]; u8 combbuf[MAXNUM]; u8 filledtab[MAXNUM]; FILE *files[MAXNUM]; u8 filenum[MAXNUM]; } Data; typedef struct { char *str; char buf[FLEN]; } StrBuf; static u8 stable_min_pos(u8 *restrict, u8 *restrict, u8); static u8 ndigits(u8 of); static void xorv(u8 *restrict, u8 *restrict); static void bsort(FILE *restrict[], u8 *restrict, u8); static void gather_combfiles_front(FILE *restrict[], u8 *restrict, u8 *restrict, u8, u8); static int raw_split(Data *); static int raw_mm_split(Data *); static int raw_nm_split(Data *); static int raw_writedep(Data *, u8); static int raw_join(Data *); static int raw_mm_join(Data *); static int raw_nm_join(Data *); static int raw_writefile(Data *, u8); static void populate_nschema(char *, size_t, u8, u8); static void combbuf_init(u8 *, u8); static u8 combbuf_next(u8 *, u8, u8); static u8 combbuf_match(u8 *, u8 *, u8, u8); int splitvp(u8 n, char const* paths[], u8 m, const char *file, const char *rndsrc) { if (m < n || n < 2 || m < 2) { warnx("M must be greater than N, and they must be greater than 1"); return -1; } size_t len; size_t nslen; if ((len = strlen(file)) == 0) { warnx("filename cannot be empty"); return -1; } Data d = { .rnd = NULL, .input = NULL, .comm = { .fsig = FSIG, .n = n, .m = m }, .combbuf = {0}, .filledtab = {0}, .files = {NULL}, }; StrBuf nschema = { .str = nschema.buf, }; u8 nd = ndigits(m); nslen = len +1 +nd +CXLEN(EXT)+ 1; //format:name+'.'+[0-255]+'.spl' +'\0' if (paths == NULL) { if (nslen > FLEN) { if ((nschema.str = (char *)malloc(nslen*sizeof(char))) == NULL) { warn(NULL); return -1; } } strncpy(nschema.str, file, len); nschema.str[len] = '.'; strcpy(nschema.str + len + 1 + nd, EXT); } int efatal = 0; u8 ifile = 0; if ((d.input = fopen(file, "r")) == NULL) { warn("%s", file); efatal = -1; goto CLEANUP; } if ((d.rnd = fopen(rndsrc, "r")) == NULL) { warn("%s", file); efatal = -1; goto CLEANUP; } if (fseek(d.input, 0, SEEK_END) == -1) { warn("%s", file); efatal = -1; goto CLEANUP; } long fsize; if ((fsize = ftell(d.input)) == -1) { warn("%s", file); efatal = -1; goto CLEANUP; } d.comm.size = fsize; if (paths == NULL) { for (; ifile < m; ++ifile) { populate_nschema(nschema.str, len, nd, ifile); if ((d.files[ifile] = fopen(nschema.str, "w+x")) == NULL) { warn("%s", nschema.str); efatal = -1; goto CLEANUP; } } } else { for (; ifile < m; ++ifile) { if ((d.files[ifile] = fopen(paths[ifile], "w+x")) == NULL) { warn("%s", paths[ifile]); efatal = -1; goto CLEANUP; } } } if ((efatal = raw_split(&d)) != 0) { warn("raw_split"); } CLEANUP: if (paths == NULL) { for (u8 i = 0; i < ifile; ++i) { (void)fclose(d.files[i]); if (efatal) { populate_nschema(nschema.str, len, nd, i); warnx("deleting %s", nschema.str); (void)unlink(nschema.str); } } } else { for (u8 i = 0; i < ifile; ++i) { (void)fclose(d.files[i]); if (efatal) { warnx("deleting %s", paths[i]); (void)unlink(paths[i]); } } } if (nschema.str != nschema.buf) { free(nschema.str); } if (d.input) { (void)fclose(d.input); } if (d.rnd) { (void)fclose(d.rnd); } return efatal; } int split(u8 n, u8 m, const char *file, const char *rndsrc) { return splitvp(n, NULL, m, file, rndsrc); } static int raw_split(Data *d) { Info comm = { .fsig = d->comm.fsig, .ele = d->comm.ele, .n = d->comm.n, .m = d->comm.m, .size = htole64(d->comm.size) }; for (u8 i = 0; i < comm.m; ++i) { comm.ele = i; if (fwrite(&comm, 1, sizeof(Info), d->files[i]) != sizeof(Info)) { return -1; } } return (d->comm.n == d->comm.m) ? raw_mm_split(d) : raw_nm_split(d); } static int raw_mm_split(Data *d) { if (fseek(d->input, 0, SEEK_SET) == -1) { return -1; } size_t nc = 0; while ((nc = fread(d->obuf, 1, BUFSIZE, d->input))) { for (u8 i = 0; i < d->comm.n-1; ++i) { if (fread(d->rbuf, 1, nc, d->rnd) != nc) { return -1; } if (fwrite(d->rbuf, 1, nc, d->files[i]) != nc) { return -1; } xorv(d->obuf, d->rbuf); } if (fwrite(d->obuf, 1, nc, d->files[d->comm.n-1]) != nc) { return -1; } } return (feof(d->input) == 1) ? 0 : -1; } static int raw_nm_split(Data *d) { combbuf_init(d->combbuf, d->comm.n); size_t chunksize; do { chunksize = (d->comm.size < BUFSIZE) ? d->comm.size : BUFSIZE; for (u8 i = 0; i < d->comm.m; ++i) { if (fread(d->rbuf, 1, chunksize, d->rnd) != chunksize) { return -1; } if (fwrite(d->rbuf, 1, chunksize, d->files[i]) != chunksize) { return -1; } } } while ((d->comm.size -= chunksize)); if (raw_writedep(d, 0) == -1) { return -1; } ++d->filledtab[0]; while (combbuf_next(d->combbuf, d->comm.n, d->comm.m)) { u8 min_pos = stable_min_pos(d->filledtab, d->combbuf, d->comm.n); if (raw_writedep(d, min_pos) == -1) { return -1; } ++d->filledtab[d->combbuf[min_pos]]; } return 0; } static u8 stable_min_pos(u8 *restrict filledtab, u8 *restrict combbuf, u8 n) { u8 pos = 0; u8 min = filledtab[combbuf[0]]; for (u8 i = 1; i < n; ++i) { if (UNLIKELY(filledtab[combbuf[i]] < min)) { pos = i; min = filledtab[combbuf[i]]; } } return pos; } static int raw_writedep(Data *d, u8 min_pos) { for (u8 i = 0; i < d->comm.n; ++i) { if (UNLIKELY(i == min_pos)) { continue; } if (fseek(d->files[d->combbuf[i]], sizeof(Info), SEEK_SET) == -1) { return -1; } } if (fseek(d->input, 0, SEEK_SET) == -1) { return -1; } size_t nc; while ((nc = fread(d->obuf, 1, BUFSIZE, d->input))) { for (u8 i = 0; i < d->comm.n; ++i) { if (UNLIKELY(i == min_pos)) { continue; } if (fread(d->rbuf, 1, nc, d->files[d->combbuf[i]]) != nc) { warnx("WHAT: pos %u s", i); return -1; } xorv(d->obuf, d->rbuf); } if (fwrite(d->obuf, 1, nc, d->files[d->combbuf[min_pos]]) != nc) { return -1; } } for (u8 i = 0; i < d->comm.n; ++i) { if (UNLIKELY(i == min_pos)) { continue; } if (fseek(d->files[d->combbuf[i]], 0, SEEK_END) == -1) { return -1; } } return (feof(d->input) == 1) ? 0 : -1; } int join(const char *out, char *const fnames[], u8 size) { if (size < 2) { warnx("must pass n in [2, %u] fnames", MAXNUM); return -1; } Data d = { .rnd = NULL, .input = NULL, .combbuf = {0}, .filledtab = {0}, .files = {NULL} }; if ((d.input = fopen(out, "w+x")) == NULL) { warn("%s", out); return -1; } int efatal = 0; u8 ifile = 0; for (; ifile < size; ++ifile) { if ((d.files[ifile] = fopen(fnames[ifile], "r")) == NULL) { efatal = -1; warn("%s", fnames[ifile]); goto CLEANUP; } } if (fread(&d.comm, 1, sizeof(Info), d.files[0]) != sizeof(Info)) { warn(NULL); efatal = -1; goto CLEANUP; } d.comm.size = le64toh(d.comm.size); if (d.comm.n > size) { warnx("not enough files provided"); efatal = -1; goto CLEANUP; } if (d.comm.m < size) { warnx("too many files provided"); efatal = -1; goto CLEANUP; } if ((d.filenum[0] = d.comm.ele) > d.comm.m) { warnx("element number out of range in %s", fnames[0]); efatal = -1; goto CLEANUP; } Info tmp; for (u8 i = 1; i < size; ++i) { if (fread(&tmp, 1, sizeof(Info), d.files[i]) != sizeof(Info)) { warn(NULL); efatal = -1; goto CLEANUP; } tmp.size = le64toh(tmp.size); if (tmp.fsig != d.comm.fsig || tmp.n != d.comm.n || tmp.m != d.comm.m || tmp.size != d.comm.size) { warnx("incorrect file prelude, %s, %s preludes differ", fnames[0], fnames[i]); efatal = -1; goto CLEANUP; } if ((d.filenum[i] = tmp.ele) > d.comm.m) { warnx("element number out of range in %s", fnames[i]); efatal = -1; goto CLEANUP; } } d.comm.ele = size; bsort(d.files, d.filenum, d.comm.ele); for (u8 i = 0; i < d.comm.ele-1; ++i) { if (d.filenum[i] == d.filenum[i+1]) { warnx("found duplicate input file"); efatal = -1; goto CLEANUP; } } if ((efatal = raw_join(&d)) != 0) { warn("raw_join"); } CLEANUP: (void)fclose(d.input); if (efatal) { (void)unlink(out); } for (u8 i = 0; i < ifile; ++i) { (void)fclose(d.files[i]); } return efatal; } static int raw_join(Data *d) { return (d->comm.n == d->comm.m) ? raw_mm_join(d) : raw_nm_join(d); } static int raw_mm_join(Data *d) { size_t nc; while ((nc = fread(d->obuf, 1, BUFSIZE, d->files[0]))) { for (u8 i = 1; i < d->comm.n; ++i) { if (fread(d->rbuf, 1, nc, d->files[i]) != nc) { return -1; } xorv(d->obuf, d->rbuf); } if (fwrite(d->obuf, 1, nc, d->input) != nc) { return -1; } } return 0; } static int raw_nm_join(Data *d) { combbuf_init(d->combbuf, d->comm.n); if (combbuf_match(d->combbuf, d->filenum, d->comm.n, d->comm.m)) { return raw_writefile(d, 0); } ++d->filledtab[0]; while (combbuf_next(d->combbuf, d->comm.n, d->comm.m)) { u8 min_pos = stable_min_pos(d->filledtab, d->combbuf, d->comm.n); if (UNLIKELY(combbuf_match(d->combbuf, d->filenum, d->comm.n, d->comm.m))) { return raw_writefile(d, min_pos); } ++d->filledtab[d->combbuf[min_pos]]; } return -1; } static int raw_writefile(Data *d, u8 min_pos) { gather_combfiles_front(d->files, d->filenum, d->combbuf, d->comm.ele, min_pos); if (fseek(d->files[0], sizeof(Info) + d->comm.size * (1 + d->filledtab[d->combbuf[min_pos]]), SEEK_SET) == -1) { return -1; } u8 *buffers[MAXNUM/2+1] = {NULL}; int err = 0; u8 f[BUFSIZE], s[BUFSIZE]; while (d->comm.size) { size_t chunksize = d->comm.size < BUFSIZE ? d->comm.size : BUFSIZE; #pragma omp parallel shared(err, buffers) private(f, s) { #pragma omp for nowait schedule(static) for (u8 i = 0; i < d->comm.n/2 + 1; ++i) { if (fread(f, 1, chunksize, d->files[2*i]) != chunksize) { err = -1; } if (2*i+1 < d->comm.n) { if (fread(s, 1, chunksize, d->files[2*i+1]) != chunksize) { err = -1; } xorv(f, s); } buffers[i] = f; #pragma omp flush(buffers) } #pragma omp master { for (u8 i = 1; i < d->comm.n/2 + d->comm.n%2; ++i) { while (buffers[i] == NULL) { #pragma omp flush(buffers) } xorv(buffers[0], buffers[i]); buffers[i] = NULL; } if (fwrite(buffers[0], 1, chunksize, d->input) != chunksize) { err = -1; } #pragma omp flush(buffers) } } if (err) { return -1; } d->comm.size -= chunksize; } return 0; } static void bsort(FILE *restrict files[], u8 *restrict filenum, u8 ele) { u8 tmpf; FILE* tmps; for (u8 len = ele, n; len; len = n) { n = 0; for (u8 i = 1; i < len; ++i) { if (UNLIKELY(filenum[i-1] > filenum[i])) { tmpf = filenum[i]; tmps = files[i]; filenum[i] = filenum[i-1]; filenum[i-1] = tmpf; files[i] = files[i-1]; files[i-1] = tmps; n = i; } } } } static inline void combbuf_init(u8 *combbuf, u8 n) { for (u8 i = 0; i < n; ++i) { combbuf[i] = i; } } static inline u8 combbuf_next(u8 *combbuf, u8 n, u8 m) { u8 i = n-1; ++combbuf[i]; while (i && (combbuf[i] >= m - n+i+1)) { ++combbuf[--i]; } if (UNLIKELY(combbuf[0] > m - n)) { return 0; } while (i++ < n) { combbuf[i] = combbuf[i-1]+1; } return 1; } static inline u8 combbuf_match(u8 *restrict combbuf, u8 *restrict filenum, u8 n, u8 ele) { u8 nmatch = 0; for (u8 i = 0; i < ele; ++i) { if (UNLIKELY(nmatch == n)) { break; } if (combbuf[nmatch] == filenum[i]) { ++nmatch; } } return (nmatch == n); } static inline void populate_nschema(char *nschema, size_t len, u8 nd, u8 of) { for (size_t in = len + nd; in != len; --in, of /= 10) { nschema[in] = '0' + of%10; } } static inline u8 ndigits(u8 of) { if (of > 99) { return 3; } else if (of > 9) { return 2; } else if (LIKELY(of)) { return 1; } else { return 0; } } static inline void xorv(u8 *restrict obuf, u8 *restrict rbuf) { for (unsigned i = 0; i < BUFSIZE; ++i) { obuf[i] ^= rbuf[i]; } } static void gather_combfiles_front(FILE *restrict files[], u8 *restrict filenum, u8 *restrict combbuf, u8 ele, u8 min_pos) { FILE* tmp; u8 xele = 0; u8 move = 0; for (u8 ifn = 0, icb = 0; ifn < ele; ++ifn) { if (filenum[ifn] == combbuf[icb]) { if (icb == min_pos) { xele = move; } tmp = files[move]; files[move] = files[ifn]; files[ifn] = tmp; ++icb; ++move; } } if (xele != 0) { tmp = files[0]; files[0] = files[xele]; files[xele] = tmp; } }
parallel_macros.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2010, Knut Reinert, FU Berlin // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: Manuel Holtgrewe <manuel.holtgrewe@fu-berlin.de> // ========================================================================== // Utility macros for parallelism. // ========================================================================== #ifndef SEQAN_PARALLEL_PARALLEL_MACROS_H_ #define SEQAN_PARALLEL_PARALLEL_MACROS_H_ /** .Macro.SEQAN_OMP_PRAGMA ..summary:Portable conditional $#pragma$ issuing if OpenMP is enabled. ..cat:Parallelism ..signature:SEQAN_OMP_PRAGMA(x) ..param.x:The string to issue behind $#pragma omp$. ..remarks:This macro uses portable pragma generation, dependent on the macro $_OPENMP$ being defined (as by the OpenMP standard). ..remarks:This is useful for disabling OpenMP pragmas on compilers that do not support OpenMP to suppress warnings. ..example.text:Parallelize loop with OpenMP if OpenMP is enabled: ..example.code: SEQAN_OMP_PRAGMA(parallel for) // becomes: #pragma omp parallel for for (int i = 0; i < x; ++i) { // Do work. } ..example.text:Make an addition atomic if OpenMP is enabled: ..example.code: SEQAN_OMP_PRAGMA(parallel atomic) // becomes: #pragma omp parallel atomic i += 1; */ #ifdef _OPENMP #include <omp.h> #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) // GCC _Pragma operator #define SEQAN_DO_PRAGMA(x) _Pragma(#x) #define SEQAN_OMP_PRAGMA(x) SEQAN_DO_PRAGMA(omp x) #else // #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) // MSVC __pragma-operator #define SEQAN_OMP_PRAGMA(x) __pragma (omp x) #endif // #if defined(PLATFORM_WINDOWS_MINGW) || defined(PLATFORM_GCC) #else // #ifdef _OPENMP #define SEQAN_OMP_PRAGMA(x) #endif // #ifdef _OPENMP #endif // SEQAN_PARALLEL_PARALLEL_MACROS_H_
GB_unaryop__abs_int64_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_uint16 // op(A') function: GB_tran__abs_int64_uint16 // C type: int64_t // A type: uint16_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_uint16 ( int64_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
psicov21.c
/* PSICOV - Protein Sparse Inverse COVariance analysis program */ /* by David T. Jones August 2011 - Copyright (C) 2011 University College London */ /* This code is licensed under the terms of GNU General Public License v2 or later */ /* Version 2.1beta3 - Last Edit 27/4/14 */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <ctype.h> #include <math.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #define FALSE 0 #define TRUE 1 #define SQR(x) ((x)*(x)) #define MAX(x,y) ((x)>(y)?(x):(y)) #define MIN(x,y) ((x)<(y)?(x):(y)) #define MAXSEQLEN 5000 #define MINEFSEQS (seqlen) /* Dump a rude message to standard error and exit */ void fail(char *fmt, ...) { va_list ap; va_start(ap, fmt) ; fprintf(stderr, "*** "); vfprintf(stderr, fmt, ap); fputc('\n', stderr); exit(-1); } /* Convert AA letter to numeric code (0-21) */ int aanum(int ch) { const static int aacvs[] = { 999, 0, 3, 4, 3, 6, 13, 7, 8, 9, 21, 11, 10, 12, 2, 21, 14, 5, 1, 15, 16, 21, 19, 17, 21, 18, 6 }; return (isalpha(ch) ? aacvs[ch & 31] : 20); } /* Allocate matrix */ void *allocmat(int rows, int columns, int size) { int i; void **p, *rp; rp = malloc(rows * sizeof(void *) + sizeof(int)); if (rp == NULL) fail("allocmat: malloc [] failed!"); *((int *)rp) = rows; p = rp + sizeof(int); for (i = 0; i < rows; i++) if ((p[i] = calloc(columns, size)) == NULL) fail("allocmat: malloc [][] failed!"); return p; } /* Allocate vector */ void *allocvec(int columns, int size) { void *p; p = calloc(columns, size); if (p == NULL) fail("allocvec: calloc failed!"); return p; } /* This subroutine computes the L1 regularized covariance matrix estimate using the algorithm described in the paper: J. Friedman, T. Hastie, R. Tibshirani: Sparse inverse covariance estimation with the graphical lasso Biostatistics, 9(3):432-441, July 2008. This code is adapted from the Fortran code described in the following report: M. A. Sustik & B. Calderhead: GLASSOFAST: An efficient GLASSO implementation Technical Report TR-12-29, University of Texas at Austin NOTE: that when multiple threads are used, we gain a huge time saving by avoiding full thread synchronisation when updating elements of the W (covariance) matrix. In multithreaded mode, the order of updates to the W matrix at each iteration will depend on the order in which threads complete. In practice, this hardly matters, because the algorithm is iterative, and in testing still converges to within 6 d.p. of the non-threaded code. If a very small degree of non-deterministic behaviour really worries you, then set the maximum number of threads to 1 (or compile without OpenMP). */ #define EPS (1.1e-15) #define BIG (1e9) int glassofast(const int n, double **S, double **L, const double thr, const int maxit, int approxflg, int warm, double **X, double **W) { int i, j, ii, iter, jj; double a, b, c, delta, dlx, dw, shr, sum, thrlasso, tmp, wd[MAXSEQLEN*21], wxj[MAXSEQLEN*21]; for (shr=ii=0; ii<n; ii++) for (jj=0; jj<n; jj++) shr += fabs(S[ii][jj]); for (i=0; i<n; i++) shr -= fabs(S[i][i]); if (shr == 0.0) { /* S is diagonal. */ for (ii=0; ii<n; ii++) for (jj=0; jj<n; jj++) W[ii][jj] = X[ii][jj] = 0.0; for (i=0; i<n; i++) W[i][i] = W[i][i] + L[i][i]; for (ii=0; ii<n; ii++) for (jj=0; jj<n; jj++) X[ii][jj] = 0.0; for (i=0; i<n; i++) X[i][i] = 1.0 / MAX(W[i][i], EPS); return 0; } shr *= thr/(n-1); thrlasso = shr/n; if (thrlasso < 2*EPS) thrlasso = 2*EPS; if (!warm) { for (ii=0; ii<n; ii++) for (jj=0; jj<n; jj++) { W[ii][jj] = S[ii][jj]; X[ii][jj] = 0.0; } } else { for (i=0; i<n; i++) { for (ii=0; ii<n; ii++) X[i][ii] = -X[i][ii]/X[i][i]; X[i][i] = 0.0; } } for (i=0; i<n; i++) { wd[i] = S[i][i] + L[i][i]; W[i][i] = wd[i]; } for (iter = 1; iter<=maxit; iter++) { dw = 0.0; #pragma omp parallel for default(shared) private(i,j,ii,wxj,a,b,c,dlx,delta,sum) for (j=0; j<n; j++) { for (ii=0; ii<n; ii++) wxj[ii] = 0.0; for (i=0; i<n; i++) if (X[j][i] != 0.0) for (ii=0; ii<n; ii++) wxj[ii] += W[i][ii] * X[j][i]; for (;;) { dlx = 0.0; for (i=0; i<n; i++) { if (i != j && L[j][i] < BIG) { a = S[j][i] - wxj[i] + wd[i] * X[j][i]; b = fabs(a) - L[j][i]; if (b <= 0.0) c = 0.0; else if (a >= 0.0) c = b / wd[i]; else c = -b / wd[i]; delta = c - X[j][i]; if (delta != 0.0 && (!approxflg || fabs(delta) > 1e-6)) { X[j][i] = c; for (ii=0; ii<n; ii++) wxj[ii] += W[i][ii] * delta; if (fabs(delta) > dlx) dlx = fabs(delta); } } } if (dlx < thrlasso) break; } wxj[j] = wd[j]; for (sum=ii=0; ii<n; ii++) sum += fabs(wxj[ii] - W[j][ii]); #pragma omp critical if (sum > dw) dw = sum; for (ii=0; ii<n; ii++) W[j][ii] = wxj[ii]; for (ii=0; ii<n; ii++) W[ii][j] = wxj[ii]; } if (dw <= shr) break; } for (i=0; i<n; i++) { for (sum=ii=0; ii<n; ii++) sum += X[i][ii] * W[i][ii]; tmp = 1.0 / (wd[i] - sum); for (ii=0; ii<n; ii++) X[i][ii] = -tmp * X[i][ii]; X[i][i] = tmp; } for (i=0; i<n-1; i++) { for (ii=i+1; ii<n; ii++) { X[i][ii] = 0.5 * (X[i][ii] + X[ii][i]); X[ii][i] = X[i][ii]; } } return iter; } /* Perform Cholesky decomposition on matrix */ int test_cholesky(double **a, const int n) { int i, j, k, status=0; double sum; static double *diag; if (diag == NULL) diag = allocvec(n, sizeof(double)); for (i=0; i<n; i++) { if (!status) for (j=i; j<n; j++) { sum = a[i][j]; for (k=i-1; k >= 0; k--) sum -= a[i][k]*a[j][k]; if (i == j) { if (sum <= 0.0) status = 1; diag[i] = sqrt(sum); } else a[j][i] = sum / diag[i]; } } return status; } struct sc_entry { double sc; int i, j; } *sclist; /* Sort descending */ int cmpfn(const void *a, const void *b) { if (((struct sc_entry *)a)->sc == ((struct sc_entry *)b)->sc) return 0; if (((struct sc_entry *)a)->sc < ((struct sc_entry *)b)->sc) return 1; return -1; } int main(int argc, char **argv) { int a, b, i, j, k, seqlen, nids, s, nseqs, ncon, opt, ndim, filtflg=0, approxflg=0, initflg=0, apcflg=1, maxit=10000, npair, nnzero, niter, jerr, shrinkflg=1, rawscflg = 1, pseudoc = 1, minseqsep = 5, overrideflg=0; unsigned int *wtcount, ccount[MAXSEQLEN]; double thresh=1e-4, del, **pcmat, *pcsum, pcmean, pc, trialrho, rhodefault = -1.0; double sum, score, **pa, wtsum, lambda, smean, fnzero, lastfnzero, rfact, r2, targfnzero = 0.0, scsum, scsumsq, mean, sd, zscore, ppv; double *weight, idthresh = -1.0, maxgapf = 0.9; char buf[4096], seq[MAXSEQLEN], *blockfn = NULL, **aln; FILE *ifp; while ((opt = getopt(argc, argv, "aflnopr:b:i:t:c:g:d:j:z:")) >= 0) switch (opt) { case 'a': approxflg = 1; break; case 'n': shrinkflg = 0; break; case 'o': overrideflg = 1; break; case 'p': rawscflg = 0; break; case 'f': filtflg = 1; break; case 'l': apcflg = 0; break; case 'r': rhodefault = atof(optarg); break; case 'd': targfnzero = atof(optarg); if (targfnzero < 5e-5 || targfnzero >= 1.0) fail("Target density value must be in range 5e-5 >= d < 1!"); break; case 't': thresh = atof(optarg); break; case 'i': idthresh = 1.0 - atof(optarg)/100.0; break; case 'c': pseudoc = atoi(optarg); break; case 'j': minseqsep = atoi(optarg); break; case 'b': blockfn = strdup(optarg); break; case 'g': maxgapf = atof(optarg); break; case 'z': #ifdef _OPENMP omp_set_num_threads(atoi(optarg)); #endif break; case '?': exit(-1); } if (optind >= argc) fail("Usage: psicov [options] alnfile\n\nOptions:\n-a\t: use approximate Lasso algorithm\n-n\t: don't pre-shrink the sample covariance matrix\n-f\t: filter low-scoring contacts\n-p\t: output PPV estimates rather than raw scores\n-l\t: don't apply APC to Lasso output\n-r nnn\t: set initial rho parameter\n-d nnn\t: set target precision matrix sparsity (default 0 = not specified)\n-t nnn\t: set Lasso convergence threshold (default 1e-4)\n-i nnn\t: select BLOSUM-like weighting with given identity threshold (default selects threshold automatically)\n-c nnn\t: set pseudocount value (default 1)\n-j nnn\t: set minimum sequence separation (default 5)\n-g nnn\t: maximum fraction of gaps (default 0.9)\n-z nnn\t: set maximum no. of threads\n-b file\t: read rho parameter file\n"); ifp = fopen(argv[optind], "r"); if (!ifp) fail("Unable to open alignment file!"); for (nseqs=0;; nseqs++) if (!fgets(seq, MAXSEQLEN, ifp)) break; aln = allocvec(nseqs, sizeof(char *)); weight = allocvec(nseqs, sizeof(double)); wtcount = allocvec(nseqs, sizeof(unsigned int)); rewind(ifp); if (!fgets(seq, MAXSEQLEN, ifp)) fail("Bad alignment file!"); seqlen = strlen(seq)-1; if (!(aln[0] = malloc(seqlen))) fail("Out of memory!"); for (j=0; j<seqlen; j++) aln[0][j] = aanum(seq[j]); for (i=1; i<nseqs; i++) { if (!fgets(seq, MAXSEQLEN, ifp)) break; if (seqlen != strlen(seq)-1) fail("Length mismatch in alignment file!"); if (!(aln[i] = malloc(seqlen))) fail("Out of memory!"); for (j=0; j<seqlen; j++) aln[i][j] = aanum(seq[j]); } /* Calculate sequence weights (use openMP/pthreads if available) */ if (idthresh < 0.0) { double meanfracid = 0.0; #pragma omp parallel for default(shared) private(j,k) reduction(+:meanfracid) for (i=0; i<nseqs; i++) for (j=i+1; j<nseqs; j++) { int nids; double fracid; for (nids=k=0; k<seqlen; k++) nids += (aln[i][k] == aln[j][k]); fracid = (double)nids / seqlen; meanfracid += fracid; } meanfracid /= 0.5 * nseqs * (nseqs - 1.0); idthresh = MIN(0.6, 0.38 * 0.32 / meanfracid); // printf("idthresh = %f meanfracid = %f\n", idthresh, meanfracid); } #pragma omp parallel for default(shared) private(j,k) for (i=0; i<nseqs; i++) for (j=i+1; j<nseqs; j++) { int nthresh = seqlen * idthresh; for (k=0; nthresh > 0 && k<seqlen; k++) nthresh -= (aln[i][k] != aln[j][k]); if (nthresh > 0) { #pragma omp critical wtcount[i]++; wtcount[j]++; } } for (wtsum=i=0; i<nseqs; i++) wtsum += (weight[i] = 1.0 / (1 + wtcount[i])); // printf("wtsum = %f\n", wtsum); if (wtsum < MINEFSEQS && !overrideflg) fail("Sorry - not enough sequences or sequence diversity to proceed!\nNeff (%f) < MINEFSEQS (%d)\nIf you want to force a calculation at your own risk, adjust MINEFSEQS or use -o to override.\n", wtsum, MINEFSEQS); pa = allocmat(seqlen, 21, sizeof(double)); /* Calculate singlet frequencies with pseudocount */ for (i=0; i<seqlen; i++) { for (a=0; a<21; a++) pa[i][a] = pseudoc; for (k=0; k<nseqs; k++) { a = aln[k][i]; if (a < 21) pa[i][a] += weight[k]; } for (a=0; a<21; a++) pa[i][a] /= pseudoc * 21.0 + wtsum; } double **cmat, **rho, **ww, **wwi, **tempmat; ndim = seqlen * 21; cmat = allocmat(ndim, ndim, sizeof(double)); tempmat = allocmat(ndim, ndim, sizeof(double)); /* Form the covariance matrix */ #pragma omp parallel for default(shared) private(j,k,a,b) for (i=0; i<seqlen; i++) for (j=i; j<seqlen; j++) { double pab[21][21]; for (a=0; a<21; a++) for (b=0; b<21; b++) if (i == j) pab[a][b] = (a == b) ? pa[i][a] : 0.0; else pab[a][b] = pseudoc / 21.0; if (i != j) { for (k=0; k<nseqs; k++) { a = aln[k][i]; b = aln[k][j]; if (a < 21 && b < 21) pab[a][b] += weight[k]; } for (a=0; a<21; a++) for (b=0; b<21; b++) pab[a][b] /= pseudoc * 21.0 + wtsum; } for (a=0; a<21; a++) for (b=0; b<21; b++) if (i != j || a == b) cmat[i*21+a][j*21+b] = cmat[j*21+b][i*21+a] = pab[a][b] - pa[i][a] * pa[j][b]; } /* Shrink sample covariance matrix towards shrinkage target F = Diag(1,1,1,...,1) * smean */ if (shrinkflg) { for (smean=i=0; i<ndim; i++) smean += cmat[i][i]; smean /= (double)ndim; lambda = 0.2; for (;;) { for (i=0; i<ndim; i++) memcpy(tempmat[i], cmat[i], ndim*sizeof(double)); /* Test if positive definite using Cholesky decomposition */ if (!test_cholesky(tempmat, ndim)) break; #pragma omp parallel for default(shared) private(j,a,b) for (i=0; i<seqlen; i++) for (j=0; j<seqlen; j++) for (a=0; a<21; a++) for (b=0; b<21; b++) if (i != j) cmat[i*21+a][j*21+b] *= 1.0 - lambda; else if (a == b) cmat[i*21+a][j*21+b] = smean * lambda + (1.0 - lambda) * cmat[i*21+a][j*21+b]; } } rho = allocmat(ndim, ndim, sizeof(double)); ww = allocmat(ndim, ndim, sizeof(double)); wwi = allocmat(ndim, ndim, sizeof(double)); lastfnzero=0.0; /* Guess at a reasonable starting rho value if undefined */ if (rhodefault < 0.0) trialrho = MAX(0.001, 1.0 / wtsum); else trialrho = rhodefault; rfact = 0.0; for (;;) { double targdiff, besttd = BIG, bestrho; if (trialrho <= 0.0 || trialrho >= 1.0) { /* Give up search - recalculate with best rho found so far and exit */ trialrho = bestrho; targfnzero = 0.0; } for (i=0; i<ndim; i++) for (j=0; j<ndim; j++) rho[i][j] = trialrho; for (i=0; i<seqlen; i++) for (j=0; j<seqlen; j++) for (a=0; a<21; a++) for (b=0; b<21; b++) if ((a != b && i == j) || pa[i][20] > maxgapf || pa[j][20] > maxgapf) rho[i*21+a][j*21+b] = BIG; /* Mask out regions if block-out list provided */ if (blockfn != NULL) { ifp = fopen(blockfn, "r"); for (;;) { if (fscanf(ifp, "%d %d %lf", &i, &j, &score) != 3) break; for (a=0; a<21; a++) for (b=0; b<21; b++) { rho[(i-1)*21+a][(j-1)*21+b] = score; rho[(j-1)*21+b][(i-1)*21+a] = score; } } fclose(ifp); } glassofast(ndim, cmat, rho, thresh, maxit, approxflg, initflg, wwi, ww); /* Don't attempt interation if too few sequences */ if (targfnzero <= 0.0 || wtsum < seqlen) break; for (npair=nnzero=i=0; i<ndim; i++) for (j=i+1; j<ndim; j++,npair++) if (wwi[i][j] != 0.0) nnzero++; fnzero = (double) nnzero / npair; // printf("rho=%f fnzero = %f\n", trialrho, fnzero); /* Stop iterating if we have achieved the target sparsity level */ targdiff = fabs(fnzero - targfnzero)/targfnzero; if (targdiff < 0.01) break; if (targdiff < besttd) { besttd = targdiff; bestrho = trialrho; } if (fnzero == 0.0) { /* As we have guessed far too high, halve rho and try again */ trialrho *= 0.5; continue; } if (lastfnzero > 0.0 && fnzero != lastfnzero) { // printf("fnzero=%f lastfnzero=%f trialrho=%f oldtrialrho=%f\n", fnzero, lastfnzero, trialrho, trialrho/rfact); rfact = pow(rfact, log(targfnzero / fnzero) / log(fnzero / lastfnzero)); // printf("New rfact = %f\n", rfact); } lastfnzero = fnzero; /* Make a small trial step in the appropriate direction */ if (rfact == 0.0) rfact = (fnzero < targfnzero) ? 0.9 : 1.1; trialrho *= rfact; } /* Calculate background corrected scores using average product correction */ pcmat = allocmat(seqlen, seqlen, sizeof(double)); pcsum = allocvec(seqlen, sizeof(double)); pcmean = 0.0; for (i=0; i<seqlen; i++) for (j=i+1; j<seqlen; j++) { for (pc=a=0; a<20; a++) for (b=0; b<20; b++) pc += fabs(wwi[i*21+a][j*21+b]); pcmat[i][j] = pcmat[j][i] = pc; pcsum[i] += pc; pcsum[j] += pc; pcmean += pc; } pcmean /= seqlen * (seqlen - 1) * 0.5; /* Build final list of predicted contacts */ sclist = allocvec(seqlen * (seqlen - 1) / 2, sizeof(struct sc_entry)); for (scsum=scsumsq=ncon=i=0; i<seqlen; i++) for (j=i+minseqsep; j<seqlen; j++) if (pcmat[i][j] > 0.0) { /* Calculate APC score */ if (apcflg) sclist[ncon].sc = pcmat[i][j] - pcsum[i] * pcsum[j] / SQR(seqlen - 1.0) / pcmean; else sclist[ncon].sc = pcmat[i][j]; scsum += sclist[ncon].sc; scsumsq += SQR(sclist[ncon].sc); sclist[ncon].i = i; sclist[ncon++].j = j; } qsort(sclist, ncon, sizeof(struct sc_entry), cmpfn); mean = scsum / ncon; sd = 1.25 * sqrt(scsumsq / ncon - SQR(mean)); /* Corrected for extreme-value bias */ for (i=0; i<seqlen; i++) ccount[i] = 0; /* Print output in CASP RR format with optional PPV estimated from final Z-score */ if (rawscflg) for (i=0; i<ncon; i++) printf("%d %d 0 8 %f\n", sclist[i].i+1, sclist[i].j+1, sclist[i].sc); else for (i=0; i<ncon; i++) { zscore = (sclist[i].sc - mean) / sd; ppv = 0.904 / (1.0 + 16.61 * exp(-0.8105 * zscore)); if (ppv >= 0.5 || (!ccount[sclist[i].i] || !ccount[sclist[i].j]) || !filtflg) { printf("%d %d 0 8 %f\n", sclist[i].i+1, sclist[i].j+1, ppv); ccount[sclist[i].i]++; ccount[sclist[i].j]++; } } return 0; }
diffusion3d.mic.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <sys/time.h> #include <omp.h> #include <assert.h> #define REAL float #define NX (256) #ifndef M_PI #define M_PI (3.1415926535897932384626) #endif void init(REAL *buff, const int nx, const int ny, const int nz, const REAL kx, const REAL ky, const REAL kz, const REAL dx, const REAL dy, const REAL dz, const REAL kappa, const REAL time) { REAL ax, ay, az; ax = exp(-kappa*time*(kx*kx)); ay = exp(-kappa*time*(ky*ky)); az = exp(-kappa*time*(kz*kz)); int jz; #pragma omp parallel for for (jz = 0; jz < nz; jz++) { int jy; for (jy = 0; jy < ny; jy++) { int jx; for (jx = 0; jx < nx; jx++) { int j = jz*nx*ny + jy*nx + jx; REAL x = dx*((REAL)(jx + 0.5)); REAL y = dy*((REAL)(jy + 0.5)); REAL z = dz*((REAL)(jz + 0.5)); REAL f0 = (REAL)0.125 *(1.0 - ax*cos(kx*x)) *(1.0 - ay*cos(ky*y)) *(1.0 - az*cos(kz*z)); buff[j] = f0; } } } } REAL accuracy(const REAL *b1, REAL *b2, const int len) { REAL err = 0.0; int i; for (i = 0; i < len; i++) { err += (b1[i] - b2[i]) * (b1[i] - b2[i]); } return (REAL)sqrt(err/len); } typedef void (*diffusion_loop_t)(REAL *f1, REAL *f2, int nx, int ny, int nz, REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc, REAL dt, int count, double *); static void diffusion_baseline(REAL *f1, REAL *f2, int nx, int ny, int nz, REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc, REAL dt, int count, double *etime) { int i; for (i = 0; i < count; ++i) { int z; for (z = 0; z < nz; z++) { int y; for (y = 0; y < ny; y++) { int x; for (x = 0; x < nx; x++) { int c, w, e, n, s, b, t; c = x + y * nx + z * nx * ny; w = (x == 0) ? c : c - 1; e = (x == nx-1) ? c : c + 1; n = (y == 0) ? c : c - nx; s = (y == ny-1) ? c : c + nx; b = (z == 0) ? c : c - nx * ny; t = (z == nz-1) ? c : c + nx * ny; f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * f1[b] + ct * f1[t]; } } } REAL *t = f1; f1 = f2; f2 = t; } return; } static void diffusion_openmp(REAL *f1, REAL *f2, int nx, int ny, int nz, REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc, REAL dt, int count, double *etime) { { REAL *f1_t = f1; REAL *f2_t = f2; int i; for (i = 0; i < count; ++i) { int z; #pragma omp parallel for for (z = 0; z < nz; z++) { int y; for (y = 0; y < ny; y++) { int x; for (x = 0; x < nx; x++) { int c, w, e, n, s, b, t; c = x + y * nx + z * nx * ny; w = (x == 0) ? c : c - 1; e = (x == nx-1) ? c : c + 1; n = (y == 0) ? c : c - nx; s = (y == ny-1) ? c : c + nx; b = (z == 0) ? c : c - nx * ny; t = (z == nz-1) ? c : c + nx * ny; f2_t[c] = cc * f1_t[c] + cw * f1_t[w] + ce * f1_t[e] + cs * f1_t[s] + cn * f1_t[n] + cb * f1_t[b] + ct * f1_t[t]; } } } REAL *t = f1_t; f1_t = f2_t; f2_t = t; } } return; } #ifdef __INTEL_COMPILER __declspec(target(mic)) static double cur_second(void) { struct timeval tv; gettimeofday(&tv, NULL); return (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0; } static void diffusion_mic(REAL *restrict f1, REAL *restrict f2, int nx, int ny, int nz, REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc, REAL dt, int count, double *etime) { double runtime; #pragma offload target(mic) \ inout(f1:length(nx*ny*nz) align(2*1024*1024)) \ inout(f2:length(nx*ny*nz) align(2*1024*1024)) out(runtime) { double start = cur_second(); int i; for (i = 0; i < count; ++i) { int y, z; #pragma omp parallel for collapse(2) private(y, z) for (z = 0; z < nz; z++) { for (y = 0; y < ny; y++) { int x; #pragma ivdep for (x = 0; x < nx; x++) { int c, w, e, n, s, b, t; c = x + y * nx + z * nx * ny; w = (x == 0) ? c : c - 1; e = (x == nx-1) ? c : c + 1; n = (y == 0) ? c : c - nx; s = (y == ny-1) ? c : c + nx; b = (z == 0) ? c : c - nx * ny; t = (z == nz-1) ? c : c + nx * ny; f2[c] = cc * f1[c] + cw * f1[w] + ce * f1[e] + cs * f1[s] + cn * f1[n] + cb * f1[b] + ct * f1[t]; } } } REAL *t = f1; f1 = f2; f2 = t; } double end = cur_second(); runtime = end - start; } *etime = runtime; return; } #endif static void dump_result(REAL *f, int nx, int ny, int nz, char *out_path) { FILE *out = fopen(out_path, "w"); assert(out); size_t nitems = nx * ny * nz; //fwrite(f, sizeof(REAL), nitems, out); int i; for (i = 0; i < nitems; ++i) { fprintf(out, "%f\n", f[i]); } fclose(out); } int main(int argc, char *argv[]) { struct timeval time_begin, time_end; int nx = NX; int ny = NX; int nz = NX; #if USE_MM_MALLOC REAL *f1 = (REAL *)_mm_malloc(sizeof(REAL)*NX,4096); REAL *f2 = (REAL *)_mm_malloc(sizeof(REAL)*NX,4096); #else REAL *f1 = (REAL *)malloc(sizeof(REAL)*NX*NX*NX); REAL *f2 = (REAL *)malloc(sizeof(REAL)*NX*NX*NX); #endif REAL *f_final = NULL; REAL time = 0.0; int count = 0; REAL l, dx, dy, dz, kx, ky, kz, kappa, dt; REAL ce, cw, cn, cs, ct, cb, cc; char *version_str; l = 1.0; kappa = 0.1; dx = dy = dz = l / nx; kx = ky = kz = 2.0 * M_PI; dt = 0.1*dx*dx / kappa; //count = 0.1 / dt; count = 300; f_final = (count % 2)? f2 : f1; init(f1, nx, ny, nz, kx, ky, kz, dx, dy, dz, kappa, time); ce = cw = kappa*dt/(dx*dx); cn = cs = kappa*dt/(dy*dy); ct = cb = kappa*dt/(dz*dz); cc = 1.0 - (ce + cw + cn + cs + ct + cb); // use baseline by default diffusion_loop_t diffusion_loop = diffusion_baseline; version_str = "baseline"; if (argc == 2) { if (strcmp(argv[1], "openmp") == 0) { diffusion_loop = diffusion_openmp; version_str = "openmp"; } #ifdef __INTEL_COMPILER if (strcmp(argv[1], "mic") == 0) { printf("MIC\n"); diffusion_loop = diffusion_mic; version_str = "mic"; } #endif } double inner_elaplsed_time = 0.0; printf("Running %s diffusion kernel %d times with %dx%dx%d grid\n", version_str, count, nx, ny, nz); gettimeofday(&time_begin, NULL); diffusion_loop(f1, f2, nx, ny, nz, ce, cw, cn, cs, ct, cb, cc, dt, count, &inner_elaplsed_time); gettimeofday(&time_end, NULL); time = count * dt; char dump_path[128]; sprintf(dump_path, "%s.%s", "diffusion_result.dat", version_str); dump_result(f_final, nx, ny, nz, dump_path); REAL *answer = (REAL *)malloc(sizeof(REAL) * nx*ny*nz); init(answer, nx, ny, nz, kx, ky, kz, dx, dy, dz, kappa, time); REAL err = accuracy(f_final, answer, nx*ny*nz); double elapsed_time = (time_end.tv_sec - time_begin.tv_sec) + (time_end.tv_usec - time_begin.tv_usec)*1.0e-6; REAL mflops = (nx*ny*nz)*13.0*count/elapsed_time * 1.0e-06; double thput = (nx * ny * nz) * sizeof(REAL) * 3.0 * count / elapsed_time * 1.0e-09; fprintf(stderr, "Elapsed time : %.3f (s)\n", elapsed_time); fprintf(stderr, "FLOPS : %.3f (MFlops)\n", mflops); fprintf(stderr, "Throughput : %.3f (GB/s)\n", thput); fprintf(stderr, "Accuracy : %e\n", err); free(answer); fprintf(stderr, "Time (w/o PCI): %.3f\n", inner_elaplsed_time); fprintf(stderr, "FLOPS (w/o PCI): %.3f (MFLOPS)\n", (nx*ny*nz)*13.0*count/inner_elaplsed_time * 1.0e-06); fprintf(stderr, "Throughput (w/o PCI): %.3f\n", nx *ny * nz * sizeof(REAL) * 3 * count / inner_elaplsed_time * 1.0e-09); free(f1); free(f2); return 0; }
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
axpby.c
#include "axpby.h" DLL_EXPORT int saxpby(float * x, float * y, float * out, float a, float b, long size, int nThreads){ long i = 0; int nThreads_initial; threads_setup(nThreads, &nThreads_initial); #pragma omp parallel { #pragma omp for for (i=0; i < size; i++) { *(out + i ) = a * ( *(x + i) ) + b * ( *(y + i) ); } } omp_set_num_threads(nThreads_initial); return 0; } DLL_EXPORT int daxpby(double * x, double * y, double * out, double a, double b, long size, int nThreads) { long i = 0; #pragma omp parallel { #pragma omp for for (i = 0; i < size; i++) { *(out + i) = a * (*(x + i)) + b * (*(y + i)); } } return 0; }
convolution_1x1_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; const int size = w * h; Mat bottom_im2col = bottom_blob; bottom_im2col.w = size; bottom_im2col.h = 1; im2col_sgemm_int8_neon(bottom_im2col, top_blob, kernel, opt); } static void conv1x1s2_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = w - 2 * outw + w; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const signed char* r0 = bottom_blob.channel(p); signed char* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { outptr[0] = r0[0]; outptr[1] = r0[2]; outptr[2] = r0[4]; outptr[3] = r0[6]; r0 += 8; outptr += 4; } for (; j + 1 < outw; j += 2) { outptr[0] = r0[0]; outptr[1] = r0[2]; r0 += 4; outptr += 2; } for (; j < outw; j++) { outptr[0] = r0[0]; r0 += 2; outptr += 1; } r0 += tailstep; } } conv1x1s1_sgemm_int8_neon(bottom_blob_shrinked, top_blob, kernel, opt); }
convolution_sgemm_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv_im2col_sgemm_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char* kernel = _kernel; // im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i + 1); const signed char* img2 = bottom_im2row.row<signed char>(i + 2); const signed char* img3 = bottom_im2row.row<signed char>(i + 3); signed char* tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p + 0) * inch * kernel_size; const signed char* k1 = kernel + (p + 1) * inch * kernel_size; const signed char* k2 = kernel + (p + 2) * inch * kernel_size; const signed char* k3 = kernel + (p + 3) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { const signed char* k0 = kernel + (p + 0) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; int* output0 = top_blob.channel(i); int* output1 = top_blob.channel(i + 1); int* output2 = top_blob.channel(i + 2); int* output3 = top_blob.channel(i + 3); int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; // k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; // k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; // k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; // k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { int* output = top_blob.channel(i); int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = sum[n]; } output += 4; } for (; j < N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum; output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } } static void conv_im2col_sgemm_int8_dequant_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat& _bias, std::vector<float> scale_dequant, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char* kernel = _kernel; const float* bias = _bias; // im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i + 1); const signed char* img2 = bottom_im2row.row<signed char>(i + 2); const signed char* img3 = bottom_im2row.row<signed char>(i + 3); signed char* tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p + 0) * inch * kernel_size; const signed char* k1 = kernel + (p + 1) * inch * kernel_size; const signed char* k2 = kernel + (p + 2) * inch * kernel_size; const signed char* k3 = kernel + (p + 3) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { const signed char* k0 = kernel + (p + 0) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; const float bias0 = bias ? bias[i] : 0.f; const float bias1 = bias ? bias[i + 1] : 0.f; const float bias2 = bias ? bias[i + 2] : 0.f; const float bias3 = bias ? bias[i + 3] : 0.f; const float scale_dequant0 = scale_dequant[i]; const float scale_dequant1 = scale_dequant[i + 1]; const float scale_dequant2 = scale_dequant[i + 2]; const float scale_dequant3 = scale_dequant[i + 3]; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i + 1); float* output2 = top_blob.channel(i + 2); float* output3 = top_blob.channel(i + 3); int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; // k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; // k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; // k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; // k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = (float)sum0[n] * scale_dequant0 + bias0; output1[n] = (float)sum1[n] * scale_dequant1 + bias1; output2[n] = (float)sum2[n] * scale_dequant2 + bias2; output3[n] = (float)sum3[n] * scale_dequant3 + bias3; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = (float)sum0 * scale_dequant0 + bias0; output1[0] = (float)sum1 * scale_dequant1 + bias1; output2[0] = (float)sum2 * scale_dequant2 + bias2; output3[0] = (float)sum3 * scale_dequant3 + bias3; output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; const float scale_dequant0 = scale_dequant[i]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = (float)sum[n] * scale_dequant0 + bias0; } output += 4; } for (; j < N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = (float)sum * scale_dequant0 + bias0; output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } } static void conv_im2col_sgemm_int8_requant_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat& _bias, std::vector<float> scale_requant, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char* kernel = _kernel; const float* bias = _bias; // im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i + 1); const signed char* img2 = bottom_im2row.row<signed char>(i + 2); const signed char* img3 = bottom_im2row.row<signed char>(i + 3); signed char* tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p + 0) * inch * kernel_size; const signed char* k1 = kernel + (p + 1) * inch * kernel_size; const signed char* k2 = kernel + (p + 2) * inch * kernel_size; const signed char* k3 = kernel + (p + 3) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { const signed char* k0 = kernel + (p + 0) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; signed char* output0 = top_blob.channel(i); signed char* output1 = top_blob.channel(i + 1); signed char* output2 = top_blob.channel(i + 2); signed char* output3 = top_blob.channel(i + 3); const float bias0 = bias ? bias[i] : 0.f; const float bias1 = bias ? bias[i + 1] : 0.f; const float bias2 = bias ? bias[i + 2] : 0.f; const float bias3 = bias ? bias[i + 3] : 0.f; const float scale_requant_in0 = scale_requant[2 * i]; const float scale_requant_out0 = scale_requant[2 * i + 1]; const float scale_requant_in1 = scale_requant[2 * (i + 1)]; const float scale_requant_out1 = scale_requant[2 * (i + 1) + 1]; const float scale_requant_in2 = scale_requant[2 * (i + 2)]; const float scale_requant_out2 = scale_requant[2 * (i + 2) + 1]; const float scale_requant_in3 = scale_requant[2 * (i + 3)]; const float scale_requant_out3 = scale_requant[2 * (i + 3) + 1]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; // k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; // k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; // k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; // k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = float2int8(((float)sum0[n] * scale_requant_in0 + bias0) * scale_requant_out0); output1[n] = float2int8(((float)sum1[n] * scale_requant_in1 + bias1) * scale_requant_out1); output2[n] = float2int8(((float)sum2[n] * scale_requant_in2 + bias2) * scale_requant_out2); output3[n] = float2int8(((float)sum3[n] * scale_requant_in3 + bias3) * scale_requant_out3); } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0); output1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1); output2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2); output3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3); output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { signed char* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; const float scale_requant_in0 = scale_requant[2 * i]; const float scale_requant_out0 = scale_requant[2 * i + 1]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = float2int8(((float)sum[n] * scale_requant_in0 + bias0) * scale_requant_out0); } output += 4; } for (; j < N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = float2int8(((float)sum * scale_requant_in0 + bias0) * scale_requant_out0); output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } }
ccl_massfunc.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <gsl/gsl_integration.h> #include <gsl/gsl_interp.h> #include <gsl/gsl_spline.h> #include <gsl/gsl_errno.h> #include "ccl.h" /*----- ROUTINE: dc_NakamuraSuto ----- INPUT: cosmology, scale factor TASK: Computes the peak threshold: delta_c(z) assuming LCDM. Cosmology dependence of the critical linear density according to the spherical-collapse model. Fitting function from Nakamura & Suto (1997; arXiv:astro-ph/9710107). */ double dc_NakamuraSuto(ccl_cosmology *cosmo, double a, int *status){ double Om_mz = ccl_omega_x(cosmo, a, ccl_species_m_label, status); double dc0 = (3./20.)*pow(12.*M_PI,2./3.); double dc = dc0*(1.+0.012299*log10(Om_mz)); return dc; } /*----- ROUTINE: Dv_BryanNorman ----- INPUT: cosmology, scale factor TASK: Computes the virial collapse density contrast with respect to the matter density assuming LCDM. Cosmology dependence of the virial collapse density according to the spherical-collapse model Fitting function from Bryan & Norman (1998; arXiv:astro-ph/9710107) */ double Dv_BryanNorman(ccl_cosmology *cosmo, double a, int *status){ double Om_mz = ccl_omega_x(cosmo, a, ccl_species_m_label, status); double x = Om_mz-1.; double Dv0 = 18.*pow(M_PI,2); double Dv = (Dv0+82.*x-39.*pow(x,2))/Om_mz; return Dv; } static double sigmaM_m2r(ccl_cosmology *cosmo, double halomass, int *status) { double rho_m, smooth_radius; // Comoving matter density rho_m = ccl_rho_x(cosmo, 1., ccl_species_m_label, 1, status); smooth_radius = pow((3.0*halomass) / (4*M_PI*rho_m), (1.0/3.0)); return smooth_radius; } void ccl_cosmology_compute_sigma(ccl_cosmology *cosmo, ccl_f2d_t *psp, int *status) { if(cosmo->computed_sigma) return; int na = cosmo->spline_params.A_SPLINE_NA_SM + cosmo->spline_params.A_SPLINE_NLOG_SM - 1; int nm = cosmo->spline_params.LOGM_SPLINE_NM; double *m = NULL; double *y = NULL; double *aa = NULL; // create linearly-spaced values of log-mass. m = ccl_linear_spacing(cosmo->spline_params.LOGM_SPLINE_MIN, cosmo->spline_params.LOGM_SPLINE_MAX, nm); if (m == NULL || (fabs(m[0]-cosmo->spline_params.LOGM_SPLINE_MIN)>1e-5) || (fabs(m[nm-1]-cosmo->spline_params.LOGM_SPLINE_MAX)>1e-5) || (m[nm-1]>10E17)) { *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_massfunc.c: ccl_cosmology_compute_sigma(): " "Error creating linear spacing in m\n"); } // create scale factor array if (*status == 0) { aa = ccl_linlog_spacing(cosmo->spline_params.A_SPLINE_MINLOG_SM, cosmo->spline_params.A_SPLINE_MIN_SM, cosmo->spline_params.A_SPLINE_MAX, cosmo->spline_params.A_SPLINE_NLOG_SM, cosmo->spline_params.A_SPLINE_NA_SM); if (aa == NULL) { *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_massfunc.c: ccl_cosmology_compute_sigma(): " "Error creating scale factor array\n"); } } // create space for y, to be filled with sigma if (*status == 0) { y = malloc(sizeof(double)*nm*na); if (y == NULL) { *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_massfunc.c: ccl_cosmology_compute_sigma(): " "memory allocation\n"); } } // fill in sigma, if no errors have been triggered at this time. if (*status == 0) { #pragma omp parallel shared(na, aa, nm, m, y, status, cosmo, psp) \ default(none) { int i, j; double a_sf, smooth_radius; int local_status = *status; #pragma omp for for (j=0; j<na; j++) { a_sf = aa[j]; for (i=0; i<nm; i++) { smooth_radius = sigmaM_m2r(cosmo, pow(10,m[i]), &local_status); y[j*nm + i] = log(ccl_sigmaR(cosmo, smooth_radius, a_sf, psp, &local_status)); } } //end omp for if (local_status) { #pragma omp atomic write *status = local_status; } } //end omp parallel } gsl_spline2d *lsM = NULL; if (*status == 0) { lsM = gsl_spline2d_alloc(gsl_interp2d_bicubic, nm, na); if (lsM == NULL) { *status = CCL_ERROR_MEMORY; ccl_cosmology_set_status_message(cosmo, "ccl_massfunc.c: ccl_cosmology_compute_sigma(): " "error allocating 2D spline\n"); } } if(*status == 0) { int s2dstatus=gsl_spline2d_init(lsM, m, aa, y, nm, na); if (s2dstatus) { *status = CCL_ERROR_SPLINE; ccl_cosmology_set_status_message(cosmo, "ccl_massfunc.c: ccl_cosmology_compute_sigma(): " "error initializing spline\n"); } } if (*status == 0) { cosmo->computed_sigma = true; cosmo->data.logsigma = lsM; } else gsl_spline2d_free(lsM); free(aa); free(m); free(y); } /*----- ROUTINE: ccl_sigma_M ----- INPUT: ccl_cosmology * cosmo, double halo mass in units of Msun, double scale factor TASK: returns sigma from the sigmaM interpolation. Also computes the sigma interpolation if necessary. */ double ccl_sigmaM(ccl_cosmology *cosmo, double log_halomass, double a, int *status) { // Check if sigma has already been calculated if (!cosmo->computed_sigma) { *status = CCL_ERROR_SIGMA_INIT; ccl_cosmology_set_status_message(cosmo, "ccl_massfunc.c: ccl_sigmaM(): " "sigma(M) spline has not been computed!"); return NAN; } double lgsigmaM; int gslstatus = gsl_spline2d_eval_e(cosmo->data.logsigma, log_halomass, a, NULL, NULL, &lgsigmaM); if(gslstatus != GSL_SUCCESS) { ccl_raise_gsl_warning(gslstatus, "ccl_massfunc.c: ccl_sigmaM():"); *status |= gslstatus; } return exp(lgsigmaM); } /*----- ROUTINE: ccl_dlnsigM_dlogM ----- INPUT: ccl_cosmology *cosmo, double halo mass in units of Msun TASK: returns the value of the derivative of ln(sigma^-1) with respect to log10 in halo mass. */ double ccl_dlnsigM_dlogM(ccl_cosmology *cosmo, double log_halomass, double a, int *status) { // Check if sigma has already been calculated if (!cosmo->computed_sigma) { *status = CCL_ERROR_SIGMA_INIT; ccl_cosmology_set_status_message(cosmo, "ccl_massfunc.c: ccl_dlnsigM_dlogM(): " "sigma(M) spline has not been computed!"); return NAN; } double dlsdlgm; int gslstatus = gsl_spline2d_eval_deriv_x_e(cosmo->data.logsigma, log_halomass, a, NULL, NULL, &dlsdlgm); if(gslstatus) { ccl_raise_gsl_warning(gslstatus, "ccl_massfunc.c: ccl_dlnsigM_dlogM():"); *status |= gslstatus; } return -dlsdlgm; }
VerletClusterCells.h
/** * @file VerletClusterCells.h * @author jspahl * @date 25.3.19 */ #pragma once #include <algorithm> #include <cmath> #include <cstdlib> #include <vector> #include "autopas/cells/FullParticleCell.h" #include "autopas/containers/CellBasedParticleContainer.h" #include "autopas/containers/CellBorderAndFlagManager.h" #include "autopas/containers/ParticleDeletedObserver.h" #include "autopas/containers/UnknowingCellBorderAndFlagManager.h" #include "autopas/containers/cellPairTraversals/CellPairTraversal.h" #include "autopas/containers/verletClusterCells/VerletClusterCellsParticleIterator.h" #include "autopas/containers/verletClusterCells/traversals/VCCTraversalInterface.h" #include "autopas/iterators/ParticleIterator.h" #include "autopas/iterators/RegionParticleIterator.h" #include "autopas/utils/ArrayMath.h" #include "autopas/utils/CudaDeviceVector.h" namespace autopas { /** * Particles are divided into clusters. * The VerletClusterCells class uses neighborhood lists for each cluster pair * to calculate pairwise interactions. * It is optimized for a constant, i.e. particle independent, cutoff radius of * the interaction. * @tparam Particle */ template <class Particle> class VerletClusterCells : public CellBasedParticleContainer<FullParticleCell<Particle>>, public internal::ParticleDeletedObserver { public: /** * Constructor of the VerletClusterCells class. * The neighbor lists are build using an estimated density. * The box is divided into cuboids with roughly the * same side length. The rebuildFrequency should be chosen, s.t. the particles do * not move more than a distance of skin/2 between two rebuilds of the lists. * @param boxMin the lower corner of the domain * @param boxMax the upper corner of the domain * @param cutoff the cutoff radius of the interaction * @param skin the skin radius * @param clusterSize size of clusters */ VerletClusterCells(const std::array<double, 3> boxMin, const std::array<double, 3> boxMax, double cutoff, double skin = 0, int clusterSize = 32) : CellBasedParticleContainer<FullParticleCell<Particle>>(boxMin, boxMax, cutoff, skin), _boxMinWithHalo(utils::ArrayMath::subScalar(boxMin, cutoff + skin)), _boxMaxWithHalo(utils::ArrayMath::addScalar(boxMax, cutoff + skin)), _clusterSize(clusterSize) { this->_cells.resize(1); _dummyStarts = {0}; } CellType getParticleCellTypeEnum() override { return CellType::FullParticleCell; } /** * @copydoc ParticleContainerInterface::getContainerType() */ [[nodiscard]] ContainerOption getContainerType() const override { return ContainerOption::verletClusterCells; } /** * Function to iterate over all pairs of particles. * This function only handles short-range interactions. * @param traversal to be used used */ void iteratePairwise(TraversalInterface *traversal) override { auto *traversalInterface = dynamic_cast<VCCTraversalInterface<FullParticleCell<Particle>> *>(traversal); auto *cellPairTraversal = dynamic_cast<CellPairTraversal<FullParticleCell<Particle>> *>(traversal); if ((!traversalInterface) or (!cellPairTraversal)) { autopas::utils::ExceptionHandler::exception( "trying to use a traversal of wrong type in VerletClusterCells::iteratePairwise"); } traversalInterface->setVerletListPointer(&_neighborCellIds, &_neighborMatrixDim, &_neighborMatrix); if (traversalInterface->getSignature() != _lastTraversalSig or _isValid != ValidityState::cellsAndListsValid) { utils::ExceptionHandler::exception( "VerletClusterCells::iteratePairwise called even though the lists are not valid or the traversal has " "changed."); } cellPairTraversal->setCellsToTraverse(this->_cells); traversal->initTraversal(); traversal->traverseParticlePairs(); traversal->endTraversal(); } /** * @copydoc VerletLists::addParticleImpl() */ void addParticleImpl(const Particle &p) override { _isValid = ValidityState::invalid; removeDummiesFromFirstCell(); // add particle somewhere, because lists will be rebuild anyways this->_cells[0].addParticle(p); ++_dummyStarts[0]; } /** * @copydoc VerletLists::addHaloParticleImpl() */ void addHaloParticleImpl(const Particle &haloParticle) override { Particle p_copy = haloParticle; _isValid = ValidityState::invalid; removeDummiesFromFirstCell(); p_copy.setOwnershipState(OwnershipState::halo); // add particle somewhere, because lists will be rebuild anyways this->_cells[0].addParticle(p_copy); ++_dummyStarts[0]; } /** * Update a halo particle of the container with the given haloParticle. * @param haloParticle Particle to be updated. * @return Returns true if the particle was updated, false if no particle could be found. */ bool updateHaloParticle(const Particle &haloParticle) override { Particle pCopy = haloParticle; pCopy.setOwnershipState(OwnershipState::halo); for (auto it = getRegionIterator(utils::ArrayMath::subScalar(pCopy.getR(), this->getSkin() / 2), utils::ArrayMath::addScalar(pCopy.getR(), this->getSkin() / 2), IteratorBehavior::haloOnly); it.isValid(); ++it) { if (pCopy.getID() == it->getID()) { *it = pCopy; return true; } } return false; } /** * Rebuilds the neighbor lists. * @param traversal The used traversal. */ void rebuildNeighborLists(TraversalInterface *traversal) override { auto *traversalInterface = dynamic_cast<VCCTraversalInterface<FullParticleCell<Particle>> *>(traversal); if (!traversalInterface) { autopas::utils::ExceptionHandler::exception( "trying to use a traversal of wrong type in VerletClusterCells::iteratePairwise"); } if (_isValid == ValidityState::invalid) { rebuildClusterStructure(); } traversalInterface->setVerletListPointer(&_neighborCellIds, &_neighborMatrixDim, &_neighborMatrix); traversalInterface->rebuildVerlet(_cellsPerDim, this->_cells, _boundingBoxes, std::ceil(this->getInteractionLength() * _gridSideLengthReciprocal), this->getInteractionLength()); _lastTraversalSig = traversalInterface->getSignature(); _isValid = ValidityState::cellsAndListsValid; } /** * @copydoc VerletLists::deleteHaloParticles */ void deleteHaloParticles() override { _isValid = ValidityState::invalid; for (size_t i = 0; i < this->_cells.size(); ++i) { for (size_t j = 0; j < _dummyStarts[i];) { if (not this->_cells[i][j].isOwned()) { // set position outside the domain with other dummy particles auto pos = this->_cells[i][j].getR(); pos[0] += _boxMaxWithHalo[2] + 8 * this->getInteractionLength(); this->_cells[i][j].setR(pos); // one more dummy particle --_dummyStarts[i]; // swap last non dummy particle with the halo particle to remove std::swap(this->_cells[i][j], this->_cells[i][_dummyStarts[i]]); } else { // move on if no halo particle was removed ++j; } } } } /** * @copydoc VerletLists::updateContainer() */ std::vector<Particle> updateContainer() override { // first delete all halo particles. this->deleteHaloParticles(); // Delete dummy particles. #ifdef AUTOPAS_OPENMP #pragma omp parallel for #endif for (auto i = 0ul; i < this->_cells.size(); ++i) { this->_cells[i].deleteDummyParticles(); } // next find invalid particles std::vector<Particle> invalidParticles; #ifdef AUTOPAS_OPENMP #pragma omp parallel #endif { std::vector<Particle> myInvalidParticles; for (auto iter = this->begin(IteratorBehavior::ownedOnly); iter.isValid(); ++iter) { if (not utils::inBox(iter->getR(), this->getBoxMin(), this->getBoxMax())) { myInvalidParticles.push_back(*iter); internal::deleteParticle(iter); } } #ifdef AUTOPAS_OPENMP #pragma omp critical #endif invalidParticles.insert(invalidParticles.end(), myInvalidParticles.begin(), myInvalidParticles.end()); } _isValid = ValidityState::invalid; return invalidParticles; } /** * @copydoc ParticleContainerInterface::getTraversalSelectorInfo() */ TraversalSelectorInfo getTraversalSelectorInfo() const override { return TraversalSelectorInfo(_cellsPerDim, this->getInteractionLength(), {_gridSideLength, _gridSideLength, this->getBoxMax()[2] - this->getBoxMin()[2]}, _clusterSize); } ParticleIteratorWrapper<Particle, true> begin(IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { return ParticleIteratorWrapper<Particle, true>( new internal::VerletClusterCellsParticleIterator<Particle, FullParticleCell<Particle>, true>( &this->_cells, _dummyStarts, _boxMaxWithHalo[0] + 8 * this->getInteractionLength(), behavior, // if the container is valid, we will have to pass the this-ptr as ParticleDeletedObserver, to ensure that // the container is set to invalid if a particle is deleted. _isValid != ValidityState::invalid ? this : nullptr)); } ParticleIteratorWrapper<Particle, false> begin( IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override { return ParticleIteratorWrapper<Particle, false>( new internal::VerletClusterCellsParticleIterator<Particle, FullParticleCell<Particle>, false>( &this->_cells, _dummyStarts, _boxMaxWithHalo[0] + 8 * this->getInteractionLength(), behavior)); } ParticleIteratorWrapper<Particle, true> getRegionIterator( const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior = IteratorBehavior::haloAndOwned) override { // Special iterator requires sorted cells #ifdef AUTOPAS_OPENMP #pragma omp single #endif if (_isValid == ValidityState::invalid) { rebuildClusterStructure(); } // there is an implicit barrier at end of single! // restrict search area to the region where particles are const auto lowerCornerInBounds = utils::ArrayMath::max(lowerCorner, _boxMinWithHalo); const auto upperCornerInBounds = utils::ArrayMath::min(higherCorner, _boxMaxWithHalo); // Find cells intersecting the search region size_t xmin = (size_t)((lowerCornerInBounds[0] - _boxMinWithHalo[0] - this->getSkin()) * _gridSideLengthReciprocal); size_t ymin = (size_t)((lowerCornerInBounds[1] - _boxMinWithHalo[1] - this->getSkin()) * _gridSideLengthReciprocal); size_t xlength = ((size_t)((upperCornerInBounds[0] - _boxMinWithHalo[0] + this->getSkin()) * _gridSideLengthReciprocal) - xmin) + 1; size_t ylength = ((size_t)((upperCornerInBounds[1] - _boxMinWithHalo[1] + this->getSkin()) * _gridSideLengthReciprocal) - ymin) + 1; std::vector<size_t> cellsOfInterest(xlength * ylength); auto cellsOfInterestIterator = cellsOfInterest.begin(); int start = xmin + ymin * _cellsPerDim[0]; for (size_t i = 0; i < ylength; ++i) { std::iota(cellsOfInterestIterator, cellsOfInterestIterator + xlength, start + i * _cellsPerDim[0]); cellsOfInterestIterator += xlength; } return ParticleIteratorWrapper<Particle, true>( new internal::VerletClusterCellsRegionParticleIterator<Particle, FullParticleCell<Particle>, true>( &this->_cells, _dummyStarts, lowerCornerInBounds, upperCornerInBounds, cellsOfInterest, _boxMaxWithHalo[0] + 8 * this->getInteractionLength(), behavior, this->getSkin(), // if the container is valid (in this case, it is, as we rebuilt it before), we will have to pass the // this-ptr as ParticleDeletedObserver, to ensure that the container is set to invalid if a particle is // deleted. this)); } ParticleIteratorWrapper<Particle, false> getRegionIterator( const std::array<double, 3> &lowerCorner, const std::array<double, 3> &higherCorner, IteratorBehavior behavior = IteratorBehavior::haloAndOwned) const override { // restrict search area to the region where particles are const auto lowerCornerInBounds = utils::ArrayMath::max(lowerCorner, _boxMinWithHalo); const auto upperCornerInBounds = utils::ArrayMath::min(higherCorner, _boxMaxWithHalo); // Special iterator requires sorted cells. // Otherwise all cells are traversed with the general Iterator. if (_isValid != ValidityState::invalid) { // Find cells intersecting the search region size_t xmin = (size_t)((lowerCornerInBounds[0] - _boxMinWithHalo[0] - this->getSkin()) * _gridSideLengthReciprocal); size_t ymin = (size_t)((lowerCornerInBounds[1] - _boxMinWithHalo[1] - this->getSkin()) * _gridSideLengthReciprocal); size_t xlength = (((upperCornerInBounds[0] - _boxMinWithHalo[0] + this->getSkin()) * _gridSideLengthReciprocal) - xmin) + 1; size_t ylength = (((upperCornerInBounds[1] - _boxMinWithHalo[1] + this->getSkin()) * _gridSideLengthReciprocal) - ymin) + 1; std::vector<size_t> cellsOfInterest(xlength * ylength); auto cellsOfInterestIterator = cellsOfInterest.begin(); int start = xmin + ymin * _cellsPerDim[0]; for (size_t i = 0; i < ylength; ++i) { std::iota(cellsOfInterestIterator, cellsOfInterestIterator + xlength, start + i * _cellsPerDim[0]); cellsOfInterestIterator += xlength; } return ParticleIteratorWrapper<Particle, false>( new internal::VerletClusterCellsRegionParticleIterator<Particle, FullParticleCell<Particle>, false>( &this->_cells, _dummyStarts, lowerCornerInBounds, upperCornerInBounds, cellsOfInterest, _boxMaxWithHalo[0] + 8 * this->getInteractionLength(), behavior, this->getSkin())); } else { // check all cells // As dummy particles are outside the domain they are only found if the search region is outside the domain. std::vector<size_t> cellsOfInterest(this->_cells.size()); std::iota(cellsOfInterest.begin(), cellsOfInterest.end(), 0); return ParticleIteratorWrapper<Particle, false>( new internal::RegionParticleIterator<Particle, FullParticleCell<Particle>, false>( &this->_cells, lowerCornerInBounds, upperCornerInBounds, cellsOfInterest, &internal::UnknowingCellBorderAndFlagManager::get(), behavior)); } } /** * Get the number of particles excluding dummy Particles saved in the container. * @return Number of particles in the container. */ unsigned long getNumParticles() const override { size_t numParticles = 0ul; #ifdef AUTOPAS_OPENMP /// @todo: find a sensible value for magic number // numThreads should be at least 1 and maximal max_threads int numThreads = std::max(1, std::min(omp_get_max_threads(), (int)(this->_cells.size() / 1000))); AutoPasLog(trace, "Using {} threads", numThreads); #pragma omp parallel for num_threads(numThreads) reduction(+ : numParticles) #endif for (size_t index = 0; index < _dummyStarts.size(); ++index) { numParticles += _dummyStarts[index]; } return numParticles; } /** * Deletes all particles from the container. */ void deleteAllParticles() override { _isValid = ValidityState::invalid; std::fill(_dummyStarts.begin(), _dummyStarts.end(), 0); CellBasedParticleContainer<FullParticleCell<Particle>>::deleteAllParticles(); } /** * Deletes all Dummy Particles in the container */ void deleteDummyParticles() { for (size_t i = 0; i < this->_cells.size(); ++i) { // removes dummy particles in first cell, if the cell is not empty! if (this->_cells[i].begin().isValid()) { this->_cells[i].resize(_dummyStarts[i], *(this->_cells[i].begin())); } } _isValid = ValidityState::invalid; } protected: /** * Rebuild the container structure: * - Recalculate grids and clusters * - Adds padding to clusters. * Does NOT rebuild the neighbor lists. */ void rebuildClusterStructure() { deleteDummyParticles(); _boundingBoxes.clear(); // get the dimensions and volumes of the box std::array<double, 3> boxSize{}; double volume = 1.0; for (int d = 0; d < 3; ++d) { boxSize[d] = _boxMaxWithHalo[d] - _boxMinWithHalo[d]; volume *= boxSize[d]; } // get all particles and clear clusters std::vector<Particle> invalidParticles; for (size_t i = 0; i < this->_cells.size(); ++i) { for (auto it = this->_cells[i].begin(); it != this->_cells[i].end(); ++it) { Particle &p = *it; if (utils::inBox(p.getR(), this->getBoxMin(), this->getBoxMax())) { invalidParticles.push_back(p); } else { if (p.isOwned()) { autopas::utils::ExceptionHandler::exception( "VerletClusterCells::rebuildClusterStructure(): Detected particle leak: Possible reason: \n" "Please ensure that you call an updateContainer(true) after manually adding or removing particles! " "\nThe particle that will be lost: {}", p.toString()); } else { invalidParticles.push_back(p); } } } this->_cells[i].clear(); } // estimate particle density double density = (std::max(1.0, (double)invalidParticles.size())) / volume; // guess optimal grid side length _gridSideLength = std::cbrt(((double)_clusterSize) / density); _gridSideLengthReciprocal = 1 / _gridSideLength; // get cells per dimension size_t sizeGrid = 1; for (int d = 0; d < 2; d++) { _cellsPerDim[d] = static_cast<size_t>(std::ceil(boxSize[d] * _gridSideLengthReciprocal)); sizeGrid *= _cellsPerDim[d]; } _cellsPerDim[2] = static_cast<size_t>(1); // resize to number of grids this->_cells.resize(sizeGrid); _dummyStarts.clear(); _dummyStarts.resize(sizeGrid); _boundingBoxes.resize(sizeGrid); // put particles into grid cells for (size_t i = 0; i < invalidParticles.size(); ++i) { size_t index = (size_t)((invalidParticles[i].getR()[0] - _boxMinWithHalo[0]) * _gridSideLengthReciprocal) + (size_t)((invalidParticles[i].getR()[1] - _boxMinWithHalo[1]) * _gridSideLengthReciprocal) * _cellsPerDim[0]; this->_cells[index].addParticle(invalidParticles[i]); } // sort by last dimension and add dummy particles #if defined(AUTOPAS_OPENMP) #pragma omp parallel for schedule(guided) #endif for (size_t i = 0; i < sizeGrid; ++i) { this->_cells[i].sortByDim(2); const auto numParticles = this->_cells[i].numParticles(); _dummyStarts[i] = numParticles; auto sizeLastCluster = (numParticles % _clusterSize); unsigned int numDummys = sizeLastCluster != 0 ? _clusterSize - sizeLastCluster : 0; if (numDummys != 0) { Particle dummyParticle = *(this->_cells[i].begin()); for (unsigned int j = 0; j < numDummys; ++j) { dummyParticle.setR({_boxMaxWithHalo[0] + 8 * this->getInteractionLength() + static_cast<double>(i), _boxMaxWithHalo[1] + 8 * this->getInteractionLength() + static_cast<double>(j), _boxMaxWithHalo[2] + 8 * this->getInteractionLength()}); dummyParticle.setID(std::numeric_limits<size_t>::max()); dummyParticle.setOwnershipState(OwnershipState::dummy); this->_cells[i].addParticle(dummyParticle); } } } // make bounding boxes #if defined(AUTOPAS_OPENMP) #pragma omp parallel for schedule(guided) #endif for (size_t i = 0; i < sizeGrid; ++i) { const size_t nClusters = this->_cells[i].numParticles() / _clusterSize; _boundingBoxes[i].resize(nClusters, {_boxMaxWithHalo[0], _boxMaxWithHalo[1], _boxMaxWithHalo[2], _boxMinWithHalo[0], _boxMinWithHalo[1], _boxMinWithHalo[2]}); for (size_t cid = 0; cid < nClusters; ++cid) for (size_t pid = cid * _clusterSize; pid < _dummyStarts[i]; ++pid) { expandBoundingBox(_boundingBoxes[i][cid], this->_cells[i][pid]); } } _isValid = ValidityState::cellsValidListsInvalid; } /** * If a particle is deleted, we want _isValid to be set to invalid, as the tower structure is invalidated. * * This function is not called, if a particle from the _particlesToAdd vector is deleted! */ void notifyParticleDeleted() override { // this is potentially called from a threaded environment, so we have to make this atomic here! _isValid.store(ValidityState::invalid, std::memory_order::memory_order_relaxed); } private: /** * Expands a bounding Box such the Particle is in it. * @param box * @param p */ void expandBoundingBox(std::array<double, 6> &box, const Particle &p) { for (int i = 0; i < 3; ++i) { box[i] = std::min(box[i], p.getR()[i]); box[3 + i] = std::max(box[3 + i], p.getR()[i]); } } /** * Checks if particle is within skin of bounding box. * @param box * @param p */ bool particleInSkinOfBox(const std::array<double, 6> &box, const Particle &p) const { for (int i = 0; i < 3; ++i) { if (box[0 + i] - this->getSkin() > p.getR()[i] or box[3 + i] + this->getSkin() < p.getR()[i]) return false; } return true; } /** * Removes dummy particles from the first cell. */ void removeDummiesFromFirstCell() { // removes dummy particles in first cell, if the cell is not empty! if (this->_cells[0].begin().isValid()) { this->_cells[0].resize(_dummyStarts[0], *(this->_cells[0].begin())); } } std::array<double, 3> _boxMinWithHalo, _boxMaxWithHalo; /// indices where dummy particles in the cells start std::vector<size_t> _dummyStarts; // number of particles in a cluster unsigned int _clusterSize; // id of neighbor clusters of a clusters in the form [mycell][mycluster] pair(othercell, othercluster) std::vector<std::vector<std::vector<std::pair<size_t, size_t>>>> _neighborCellIds; size_t _neighborMatrixDim; utils::CudaDeviceVector<unsigned int> _neighborMatrix; // bounding boxes of all clusters (xmin,ymin,zmin,xmax,ymax,zmax) std::vector<std::vector<std::array<double, 6>>> _boundingBoxes; // side length of xy-grid and reciprocal double _gridSideLength; double _gridSideLengthReciprocal; // dimensions of grid std::array<size_t, 3> _cellsPerDim; /** * Enum to specify the validity of this container. */ enum class ValidityState : unsigned char { invalid = 0, // nothing is valid. cellsValidListsInvalid = 1, // only the cell structure is valid, but the lists are not. cellsAndListsValid = 2 // the cells and lists are valid }; // specifies the validity of the cells and std::atomic<ValidityState> _isValid{ValidityState::invalid}; /// Signature of the last Traversal to trigger rebuild when a new one is used std::tuple<TraversalOption, DataLayoutOption, bool> _lastTraversalSig; }; } // namespace autopas
omp_sections.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> void funcA(); void funcB(); int main() { #pragma omp parallel { #pragma omp sections { #pragma omp section (void) funcA(); #pragma omp section (void) funcB(); } /*-- End of sections block --*/ } /*-- End of parallel region --*/ return(0); } void funcA() { printf("In funcA: this section is executed by thread %d\n", omp_get_thread_num()); } void funcB() { printf("In funcB: this section is executed by thread %d\n", omp_get_thread_num()); }
queue.h
// -*- C++ -*- // Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/queue.h * @brief Lock-free double-ended queue. * This file is a GNU parallel extension to the Standard C++ Library. */ // Written by Johannes Singler. #ifndef _GLIBCXX_PARALLEL_QUEUE_H #define _GLIBCXX_PARALLEL_QUEUE_H 1 #include <parallel/types.h> #include <parallel/base.h> #include <parallel/compatibility.h> /** @brief Decide whether to declare certain variable volatile in this file. */ #define _GLIBCXX_VOLATILE volatile namespace __gnu_parallel { /**@brief Double-ended queue of bounded size, allowing lock-free * atomic access. push_front() and pop_front() must not be called * concurrently to each other, while pop_back() can be called * concurrently at all times. * @c empty(), @c size(), and @c top() are intentionally not provided. * Calling them would not make sense in a concurrent setting. * @param T Contained element type. */ template<typename T> class RestrictedBoundedConcurrentQueue { private: /** @brief Array of elements, seen as cyclic buffer. */ T* base; /** @brief Maximal number of elements contained at the same time. */ sequence_index_t max_size; /** @brief Cyclic begin and end pointers contained in one atomically changeable value. */ _GLIBCXX_VOLATILE lcas_t borders; public: /** @brief Constructor. Not to be called concurrent, of course. * @param max_size Maximal number of elements to be contained. */ RestrictedBoundedConcurrentQueue(sequence_index_t max_size) { this->max_size = max_size; base = new T[max_size]; borders = encode2(0, 0); #pragma omp flush } /** @brief Destructor. Not to be called concurrent, of course. */ ~RestrictedBoundedConcurrentQueue() { delete[] base; } /** @brief Pushes one element into the queue at the front end. * Must not be called concurrently with pop_front(). */ void push_front(const T& t) { lcas_t former_borders = borders; int former_front, former_back; decode2(former_borders, former_front, former_back); *(base + former_front % max_size) = t; #if _GLIBCXX_ASSERTIONS // Otherwise: front - back > max_size eventually. _GLIBCXX_PARALLEL_ASSERT(((former_front + 1) - former_back) <= max_size); #endif fetch_and_add(&borders, encode2(1, 0)); } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_front(T& t) { int former_front, former_back; #pragma omp flush decode2(borders, former_front, former_back); while (former_front > former_back) { // Chance. lcas_t former_borders = encode2(former_front, former_back); lcas_t new_borders = encode2(former_front - 1, former_back); if (compare_and_swap(&borders, former_borders, new_borders)) { t = *(base + (former_front - 1) % max_size); return true; } #pragma omp flush decode2(borders, former_front, former_back); } return false; } /** @brief Pops one element from the queue at the front end. * Must not be called concurrently with pop_front(). */ bool pop_back(T& t) //queue behavior { int former_front, former_back; #pragma omp flush decode2(borders, former_front, former_back); while (former_front > former_back) { // Chance. lcas_t former_borders = encode2(former_front, former_back); lcas_t new_borders = encode2(former_front, former_back + 1); if (compare_and_swap(&borders, former_borders, new_borders)) { t = *(base + former_back % max_size); return true; } #pragma omp flush decode2(borders, former_front, former_back); } return false; } }; } //namespace __gnu_parallel #undef _GLIBCXX_VOLATILE #endif /* _GLIBCXX_PARALLEL_QUEUE_H */
IPCComm.h
/*! @brief Flag for checking if this header has already been included. */ #ifndef YGGIPCCOMM_H_ #define YGGIPCCOMM_H_ // OpenSimRoot has a conflicting definition for 'msg' #ifdef USE_OSR_YGG #undef IPCINSTALLED #endif #ifdef IPCINSTALLED #include <fcntl.h> /* For O_* constants */ #include <sys/stat.h> /* For mode constants */ #include <sys/msg.h> #include <sys/types.h> #include <sys/sem.h> #include <sys/shm.h> #endif /*IPCINSTALLED*/ #include <CommBase.h> #ifdef __cplusplus /* If this is a C++ compiler, use C linkage */ extern "C" { #endif #ifdef IPCINSTALLED /*! @brief Maximum number of channels. */ #define _yggTrackChannels 256 /*! @brief Names of channels in use. */ static int _yggChannelNames[_yggTrackChannels]; //static char * _yggChannelNames[_yggTrackChannels]; /*! @brief Number of channels in use. */ static unsigned _yggChannelsUsed = 0; static unsigned _ipc_rand_seeded = 0; /*! @brief Message buffer structure. */ typedef struct msgbuf_t { long mtype; //!< Message buffer type char data[YGG_MSG_MAX]; //!< Buffer for the message } msgbuf_t; /*! @brief Check if an IPC channel can be initialized. @param[in] comm comm_t* Comm structure with name that should be checked. @returns int -1 if the channel can't be initialized. */ static inline int check_channels(comm_t* comm) { // Fail if name is empty if (strlen(comm->name) == 0) { ygglog_error("Cannot create channel with empty name."); return -1; } // Fail if trying to re-use the same channel twice unsigned i; char *key = comm->address; int error_code = 0; #ifdef _OPENMP #pragma omp critical (ipc) { #endif for (i = 0; i < _yggChannelsUsed; i++ ) { if (_yggChannelNames[i] == atoi(comm->address)) { /* if (0 == strcmp(_yggChannelNames[i], key)) { */ ygglog_error("Attempt to re-use channel: name=%s, key=%s, i=%d", comm->name, key, i); error_code = -1; break; } } // Fail if > _yggTrackChannels channels used if ((!error_code) && (_yggChannelsUsed >= _yggTrackChannels)) { ygglog_error("Too many channels in use, max: %d", _yggTrackChannels); error_code = -1; } #ifdef _OPENMP } #endif return error_code; }; /*! @brief Add a new channel to the list of existing channels. @param[in] name const char * Name of channel to record. */ static inline void add_channel(const comm_t* comm) { #ifdef _OPENMP #pragma omp critical (ipc) { #endif // printf("add_channel(%s): %d, %s\n", comm->name, _yggChannelsUsed, comm->address); _yggChannelNames[_yggChannelsUsed++] = atoi(comm->address); #ifdef _OPENMP } #endif }; /*! @brief Remove a channel. @param[in] comm comm_t* Comm with channel that should be removed. @param[in] close_comm int If 1, the queue will be closed, otherwise it will just be removed from the register and it is assumed that another process will close it. @returns int -1 if removal not successful. */ static inline int remove_comm(const comm_t* comm, const int close_comm) { int ret; if (close_comm) { ret = msgctl(((int*)(comm->handle))[0], IPC_RMID, NULL); /* if (ret < 0) { */ /* ygglog_error("remove_comm(%s): Could not close comm.", comm->name); */ /* return ret; */ /* } */ } ret = -1; unsigned i; int ich = atoi(comm->address); #ifdef _OPENMP #pragma omp critical (ipc) { #endif for (i = 0; i < _yggChannelsUsed; i++) { if (ich == _yggChannelNames[i]) { memmove(_yggChannelNames + i, _yggChannelNames + i + 1, (_yggTrackChannels - (i + 1))*sizeof(int)); _yggChannelsUsed--; ret = 0; break; } } if (ret < 0) { ygglog_error("remove_comm(%s): Could not locate comm in register.", comm->name); } /* if ((ret != -1) && (ich == (int)(_yggChannelsUsed - 1))) { */ /* /\* memmove(_yggChannelNames + ich, _yggChannelNames + ich + 1, *\/ */ /* /\* (_yggTrackChannels - (ich + 1))*sizeof(char*)); *\/ */ /* _yggChannelsUsed--; */ /* } */ #ifdef _OPENMP } #endif return ret; }; /*! @brief Create a new channel. @param[in] comm comm_t * Comm structure initialized with new_comm_base. @returns int -1 if the address could not be created. */ static inline int new_ipc_address(comm_t *comm) { int ret; // TODO: small chance of reusing same number int key = 0; #ifdef _OPENMP #pragma omp critical (ipc) { #endif if (!(_ipc_rand_seeded)) { srand(ptr2seed(comm)); _ipc_rand_seeded = 1; } #ifdef _OPENMP } #endif while (key == 0) { key = rand(); } if (strlen(comm->name) == 0) { sprintf(comm->name, "tempnewIPC.%d", key); } else { ret = check_channels(comm); if (ret < 0) return ret; } sprintf(comm->address, "%d", key); int *fid = (int*)malloc(sizeof(int)); if (fid == NULL) { ygglog_error("new_ipc_address: Could not malloc queue fid."); return -1; } fid[0] = msgget(key, (IPC_CREAT | 0777)); if (fid[0] < 0) { ygglog_error("new_ipc_address: msgget(%d, %d | 0777) ret(%d), errno(%d): %s", key, IPC_CREAT, fid[0], errno, strerror(errno)); return -1; } comm->handle = (void*)fid; add_channel(comm); return 0; }; /*! @brief Initialize a sysv_ipc communicator. @param[in] comm comm_t * Comm structure initialized with init_comm_base. @returns int -1 if the comm could not be initialized. */ static inline int init_ipc_comm(comm_t *comm) { if (!(comm->flags & COMM_FLAG_VALID)) return -1; if (strlen(comm->name) == 0) { sprintf(comm->name, "tempinitIPC.%s", comm->address); } else { int ret = check_channels(comm); if (ret < 0) return ret; } add_channel(comm); int qkey = atoi(comm->address); int *fid = (int *)malloc(sizeof(int)); if (fid == NULL) { ygglog_error("init_ipc_comm: Could not malloc queue fid."); return -1; } fid[0] = msgget(qkey, 0600); if (fid[0] < 0) { ygglog_error("init_ipc_address: msgget(%d, 0600) ret(%d), errno(%d): %s", qkey, fid[0], errno, strerror(errno)); return -1; } comm->handle = (void*)fid; return 0; }; /*! @brief Perform deallocation for basic communicator. @param[in] x comm_t* Pointer to communicator to deallocate. @returns int 1 if there is an error, 0 otherwise. */ static inline int free_ipc_comm(comm_t *x) { if (x->handle != NULL) { if (strcmp(x->direction, "recv") == 0) { remove_comm(x, 1); } else { remove_comm(x, 0); } free(x->handle); x->handle = NULL; } return 0; }; /*! @brief Get number of messages in the comm. @param[in] comm_t* Communicator to check. @returns int Number of messages. -1 indicates an error. */ static inline int ipc_comm_nmsg(const comm_t *x) { struct msqid_ds buf; if (x->handle == NULL) { ygglog_error("ipc_comm_nmsg: Queue handle is NULL."); return -1; } int rc = msgctl(((int*)x->handle)[0], IPC_STAT, &buf); if (rc != 0) { /* ygglog_error("ipc_comm_nmsg: Could not access queue."); */ return 0; } int ret = buf.msg_qnum; return ret; }; /*! @brief Send a message to the comm. Send a message smaller than YGG_MSG_MAX bytes to an output comm. If the message is larger, it will not be sent. @param[in] x comm_t* structure that comm should be sent to. @param[in] data character pointer to message that should be sent. @param[in] len size_t length of message to be sent. @returns int 0 if send succesfull, -1 if send unsuccessful. */ static inline int ipc_comm_send(const comm_t *x, const char *data, const size_t len) { ygglog_debug("ipc_comm_send(%s): %d bytes", x->name, len); if (comm_base_send(x, data, len) == -1) return -1; msgbuf_t t; t.mtype = 1; memcpy(t.data, data, len); int ret = -1; int handle = ((int*)(x->handle))[0]; while (1) { ret = msgsnd(handle, &t, len, IPC_NOWAIT); ygglog_debug("ipc_comm_send(%s): msgsnd returned %d", x->name, ret); if (ret == 0) break; if ((ret == -1) && (errno == EAGAIN)) { ygglog_debug("ipc_comm_send(%s): msgsnd, sleep", x->name); usleep(YGG_SLEEP_TIME); } else { struct msqid_ds buf; int rtrn = msgctl(handle, IPC_STAT, &buf); if ((rtrn == 0) && ((buf.msg_qnum + len) > buf.msg_qbytes)) { ygglog_debug("ipc_comm_send(%s): msgsnd, queue full, sleep", x->name); usleep(YGG_SLEEP_TIME); } else { ygglog_error("ipc_comm_send: msgsend(%d, %p, %d, IPC_NOWAIT) ret(%d), errno(%d): %s", ((int*)(x->handle))[0], &t, len, ret, errno, strerror(errno)); ret = -1; break; } } } ygglog_debug("ipc_comm_send(%s): returning %d", x->name, ret); return ret; }; /*! @brief Receive a message from an input comm. Receive a message smaller than YGG_MSG_MAX bytes from an input comm. @param[in] x comm_t* structure that message should be sent to. @param[out] data char ** pointer to allocated buffer where the message should be saved. This should be a malloc'd buffer if allow_realloc is 1. @param[in] len const size_t length of the allocated message buffer in bytes. @param[in] allow_realloc const int If 1, the buffer will be realloced if it is not large enought. Otherwise an error will be returned. @returns int -1 if message could not be received. Length of the received message if message was received. */ static inline int ipc_comm_recv(const comm_t *x, char **data, const size_t len, const int allow_realloc) { ygglog_debug("ipc_comm_recv(%s)", x->name); msgbuf_t t; t.mtype = 1; int ret = -1; int len_recv = -1; while (1) { ret = msgrcv(((int*)(x->handle))[0], &t, YGG_MSG_MAX, 0, IPC_NOWAIT); if (ret == -1 && errno == ENOMSG) { ygglog_debug("ipc_comm_recv(%s): no input, sleep", x->name); usleep(YGG_SLEEP_TIME); } else { ygglog_debug("ipc_comm_recv(%s): received input: %d bytes, ret=%d", x->name, strlen(t.data), ret); break; } } if (ret <= 0) { ygglog_debug("ipc_comm_recv: msgrecv(%d, %p, %d, 0, IPC_NOWAIT): %s", (int*)(x->handle), &t, (int)YGG_MSG_MAX, strerror(errno)); return -1; } len_recv = ret + 1; if (len_recv > (int)len) { if (allow_realloc) { ygglog_debug("ipc_comm_recv(%s): reallocating buffer from %d to %d bytes.", x->name, (int)len, len_recv); (*data) = (char*)realloc(*data, len_recv); if (*data == NULL) { ygglog_error("ipc_comm_recv(%s): failed to realloc buffer.", x->name); return -1; } } else { ygglog_error("ipc_comm_recv(%s): buffer (%d bytes) is not large enough for message (%d bytes)", x->name, len, len_recv); return -(len_recv - 1); } } memcpy(*data, t.data, len_recv); (*data)[len_recv - 1] = '\0'; ret = len_recv - 1; ygglog_debug("ipc_comm_recv(%s): returns %d bytes", x->name, ret); return ret; }; /*! @brief Send a large message to an output comm. Send a message larger than YGG_MSG_MAX bytes to an output comm by breaking it up between several smaller messages and sending initial message with the size of the message that should be expected. Must be partnered with ipc_comm_recv_nolimit for communication to make sense. @param[in] x comm_t* structure that message should be sent to. @param[in] data character pointer to message that should be sent. @param[in] len size_t length of message to be sent. @returns int 0 if send succesfull, -1 if send unsuccessful. */ static inline int ipc_comm_send_nolimit(const comm_t *x, const char *data, const size_t len){ ygglog_debug("ipc_comm_send_nolimit(%s): %d bytes", x->name, len); int ret = -1; size_t msgsiz = 0; char msg[YGG_MSG_MAX]; sprintf(msg, "%ld", (long)(len)); ret = ipc_comm_send(x, msg, strlen(msg)); if (ret != 0) { ygglog_debug("ipc_comm_send_nolimit(%s): sending size of payload failed.", x->name); return ret; } size_t prev = 0; while (prev < len) { if ((len - prev) > YGG_MSG_MAX) msgsiz = YGG_MSG_MAX; else msgsiz = len - prev; ret = ipc_comm_send(x, data + prev, msgsiz); if (ret != 0) { ygglog_debug("ipc_comm_send_nolimit(%s): send interupted at %d of %d bytes.", x->name, (int)prev, (int)len); break; } prev += msgsiz; ygglog_debug("ipc_comm_send_nolimit(%s): %d of %d bytes sent", x->name, prev, len); } if (ret == 0) ygglog_debug("ipc_comm_send_nolimit(%s): %d bytes completed", x->name, len); return ret; }; // Definitions in the case where IPC libraries not installed #else /*IPCINSTALLED*/ /*! @brief Print error message about IPC library not being installed. */ static inline void ipc_install_error() { ygglog_error("Compiler flag 'IPCINSTALLED' not defined so IPC bindings are disabled."); }; /*! @brief Perform deallocation for basic communicator. @param[in] x comm_t* Pointer to communicator to deallocate. @returns int 1 if there is an error, 0 otherwise. */ static inline int free_ipc_comm(comm_t *x) { // Prevent C4100 warning on windows by referencing param #ifdef _WIN32 UNUSED(x); #endif ipc_install_error(); return 1; }; /*! @brief Create a new channel. @param[in] comm comm_t * Comm structure initialized with new_comm_base. @returns int -1 if the address could not be created. */ static inline int new_ipc_address(comm_t *comm) { // Prevent C4100 warning on windows by referencing param #ifdef _WIN32 UNUSED(comm); #endif ipc_install_error(); return -1; }; /*! @brief Initialize a sysv_ipc communicator. @param[in] comm comm_t * Comm structure initialized with init_comm_base. @returns int -1 if the comm could not be initialized. */ static inline int init_ipc_comm(comm_t *comm) { // Prevent C4100 warning on windows by referencing param #ifdef _WIN32 UNUSED(comm); #endif ipc_install_error(); return -1; }; /*! @brief Get number of messages in the comm. @param[in] x comm_t Communicator to check. @returns int Number of messages. -1 indicates an error. */ static inline int ipc_comm_nmsg(const comm_t *x) { // Prevent C4100 warning on windows by referencing param #ifdef _WIN32 UNUSED(x); #endif ipc_install_error(); return -1; }; /*! @brief Send a message to the comm. Send a message smaller than YGG_MSG_MAX bytes to an output comm. If the message is larger, it will not be sent. @param[in] x comm_t* structure that comm should be sent to. @param[in] data character pointer to message that should be sent. @param[in] len size_t length of message to be sent. @returns int 0 if send succesfull, -1 if send unsuccessful. */ static inline int ipc_comm_send(const comm_t *x, const char *data, const size_t len) { // Prevent C4100 warning on windows by referencing param #ifdef _WIN32 UNUSED(x); UNUSED(data); UNUSED(len); #endif ipc_install_error(); return -1; }; /*! @brief Receive a message from an input comm. Receive a message smaller than YGG_MSG_MAX bytes from an input comm. @param[in] x comm_t* structure that message should be sent to. @param[out] data char ** pointer to allocated buffer where the message should be saved. This should be a malloc'd buffer if allow_realloc is 1. @param[in] len const size_t length of the allocated message buffer in bytes. @param[in] allow_realloc const int If 1, the buffer will be realloced if it is not large enought. Otherwise an error will be returned. @returns int -1 if message could not be received. Length of the received message if message was received. */ static inline int ipc_comm_recv(const comm_t *x, char **data, const size_t len, const int allow_realloc) { // Prevent C4100 warning on windows by referencing param #ifdef _WIN32 UNUSED(x); UNUSED(data); UNUSED(len); UNUSED(allow_realloc); #endif ipc_install_error(); return -1; }; #endif /*IPCINSTALLED*/ #ifdef __cplusplus /* If this is a C++ compiler, end C linkage */ } #endif #endif /*YGGIPCCOMM_H_*/
cifradoHill.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> typedef struct text { int size; char * restrict body; }Text; typedef struct node { int data; struct node * restrict next; struct node * restrict prev; }Node; typedef struct queue { int size; Node * restrict bottom; Node * restrict top; }Queue; static Queue * restrict queue = NULL; static int key[3][3] = { {1, 2, 3}, {0, 4, 5}, {1, 0, 6} }; Queue *createStack(); void enqueue(int); int dequeue(); Text *readFile(FILE *); void charToInt(Text *); int *cipherText(); int main(int argc, char *argv[]) { if (argc == 2) { Text * restrict ptr = readFile(fopen("lorem.txt", "rb")); queue = createStack(); int * restrict tmp = NULL; charToInt(ptr); while (queue -> size > 0) { tmp = cipherText(); for (int idx = 0; idx < 3; idx++) printf("%c", (char)(tmp[idx]) + 65); free(tmp); tmp = NULL; } printf("\n"); free(ptr -> body); free(ptr); return EXIT_SUCCESS; } else { printf("Especifica el archivo a cifrar\n"); return EXIT_FAILURE; } } Text *readFile(FILE * restrict ap) { Text * restrict block = (Text * restrict)calloc(1, sizeof(Text)); fseek(ap, 0, SEEK_END); block -> size = ftell(ap); rewind(ap); block -> body = (char * restrict)calloc(block -> size, sizeof(char)); fread(block -> body, sizeof(char), block -> size, ap); fclose(ap); return block; } void charToInt(Text * restrict block) { for (register int idx = 0; idx < block -> size; idx++) { if (block -> body[idx] < 0 && block -> body[idx + 1] < 0) enqueue(13); else if (block -> body[idx] > 64 && block -> body[idx] < 91) enqueue((int)(block -> body[idx] - 65)); else if (block -> body[idx] > 96 && block -> body[idx] < 123) enqueue((int)(block -> body[idx] - 97)); } } Queue *createStack() { return (Queue * restrict)calloc(1, sizeof(Queue)); } void enqueue(int val) { Node * restrict new = (Node * restrict)calloc(1, sizeof(Node)); new -> data = val; if (!queue -> bottom) { queue -> bottom = new; queue -> top = queue -> bottom; } else { new -> prev = queue -> top; queue -> top -> next = new; queue -> top = queue -> top -> next; } queue -> size++; } int dequeue() { if (queue -> size > 0) { int val = queue -> bottom -> data; Node * restrict tmp = queue -> bottom; queue -> bottom = queue -> bottom -> next; queue -> size--; free(tmp); return val; } else return 23; } int *cipherText() { int tmp[3] = {0, 0, 0}; int *ret = (int * restrict)calloc(3, sizeof(int)); for (int idx = 0; idx < 3; idx++) tmp[idx] = dequeue(); omp_set_num_threads(3); #pragma omp parallel { for(int idx = 0; idx < 3; idx++) { #pragma omp for for (int jdx = 0; jdx < 3; jdx++) ret[idx] += key[idx][jdx] * tmp[jdx]; ret[idx] = ret[idx] % 26; } } return ret; }
O3Indirect2D.c
#include <mpi.h> extern int *cn_c; extern int *ce_c; extern int *ec_c; extern int *cn_crem; extern int *ce_crem; extern int *ec_crem; extern int *neighbor_map; extern int *cedge_map; extern int *ecell_map; extern int *neighbor_maprem; extern int *cedge_maprem; extern int *ecell_maprem; extern GVAL **neighbor_2Dbuf; extern GVAL **neighbor_3Dbuf; extern GVAL **cedge_2Dbuf; extern GVAL **cedge_3Dbuf; extern GVAL **ecell_2Dbuf; extern GVAL **ecell_3Dbuf; extern GVAL **neighbor_2Dbufrem; extern GVAL **neighbor_3Dbufrem; extern GVAL **cedge_2Dbufrem; extern GVAL **cedge_3Dbufrem; extern GVAL **ecell_2Dbufrem; extern GVAL **ecell_3Dbufrem; extern MPI_Request *mpi_send_requests; extern MPI_Request *mpi_recv_requests; extern int comm_tag; #include "grid.h" extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_temp; extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_ind2Dparam; extern struct { char *name; int loc; int dim; union { GVAL *restrict * restrict p2; GVAL *restrict * restrict * restrict p3; } data_pointer; } *gv_ind2Dvar; void O3Indirect2D(GRID * g) { { { comm_tag++; for (int pn = 0; pn < g->mpi_world_size; pn++) { if (pn != g->mpi_rank) { for (int i = 0; i < (ec_crem[pn] - (pn ? ec_crem[pn - 1] : 0)); i++) { ecell_2Dbufrem[pn][i] = gv_ind2Dparam->data_pointer.p2[ecell_maprem[(pn ? ec_crem[pn - 1] * 2 : 0) + 2 * i]][ecell_maprem[(pn ? ec_crem[pn - 1] * 2 : 0) + 2 * i + 1]]; } MPI_Isend(ecell_2Dbufrem[pn], (ec_crem[pn] - (pn ? ec_crem[pn - 1] : 0)), MPI_FLOAT, pn, comm_tag, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(ecell_2Dbuf[pn], (ec_c[pn] - (pn ? ec_c[pn - 1] : 0)), MPI_FLOAT, pn, comm_tag, MPI_COMM_WORLD, &mpi_recv_requests[pn]); } } MPI_Waitall(g->mpi_world_size * 2, mpi_send_requests, MPI_STATUSES_IGNORE); for (int pn = 0; pn < g->mpi_world_size; pn++) { if (pn != g->mpi_rank) { for (int i = 0; i < (ec_c[pn] - (pn ? ec_c[pn - 1] : 0)); i++) { gv_ind2Dparam->data_pointer.p2[ecell_map[(pn ? ec_c[pn - 1] * 5 : 0) + 5 * i + 3]][ecell_map[(pn ? ec_c[pn - 1] * 5 : 0) + 5 * i + 4]] = ecell_2Dbuf[pn][i]; } } } } { comm_tag++; for (int pn = 0; pn < g->mpi_world_size; pn++) { if (pn != g->mpi_rank) { for (int i = 0; i < (ec_crem[pn] - (pn ? ec_crem[pn - 1] : 0)); i++) { for (int k = 0; k < g->height; k++) ecell_3Dbufrem[pn][g->height * i + k] = gv_temp->data_pointer.p3[ecell_maprem[(pn ? ec_crem[pn - 1] * 2 : 0) + 2 * i]][k][ecell_maprem[(pn ? ec_crem[pn - 1] * 2 : 0) + 2 * i + 1]]; } MPI_Isend(ecell_3Dbufrem[pn], (ec_crem[pn] - (pn ? ec_crem[pn - 1] : 0)) * g->height, MPI_FLOAT, pn, comm_tag, MPI_COMM_WORLD, &mpi_send_requests[pn]); MPI_Irecv(ecell_3Dbuf[pn], (ec_c[pn] - (pn ? ec_c[pn - 1] : 0)) * g->height, MPI_FLOAT, pn, comm_tag, MPI_COMM_WORLD, &mpi_recv_requests[pn]); } } MPI_Waitall(g->mpi_world_size * 2, mpi_send_requests, MPI_STATUSES_IGNORE); for (int pn = 0; pn < g->mpi_world_size; pn++) { if (pn != g->mpi_rank) { for (int i = 0; i < (ec_c[pn] - (pn ? ec_c[pn - 1] : 0)); i++) { for (int k = 0; k < g->height; k++) gv_temp->data_pointer.p3[ecell_map[(pn ? ec_c[pn - 1] * 5 : 0) + 5 * i + 3]][k][ecell_map[(pn ? ec_c[pn - 1] * 5 : 0) + 5 * i + 4]] = ecell_3Dbuf[pn][g->height * i + k]; } } } } size_t min_block = g->mpi_rank == (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : 0; size_t max_block = g->mpi_rank < (0) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) || g->mpi_rank > (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? 0 : g->mpi_rank == (g->eBlkCnt - 1) / (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) ? g->eBlkCnt % (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size) : (((g->eBlkCnt) + g->mpi_world_size - 1) / g->mpi_world_size); #pragma omp parallel for for (size_t block_index = (min_block); block_index < (max_block); block_index++) { for (size_t height_index = (0); height_index < (g->height); height_index++) { for (size_t edge_index = (0); edge_index < (g->blkSize); edge_index++) { gv_ind2Dvar->data_pointer.p3[(block_index)][(height_index)][(edge_index)] = gv_ind2Dparam->data_pointer.p2[(g->eCellBlk[(0)]->data_pointer.p2[(block_index)][(edge_index)])][(g->eCellIdx[(0)]->data_pointer.p2[(block_index)][(edge_index)])] * gv_temp->data_pointer.p3[(g->eCellBlk[(0)]->data_pointer.p2[(block_index)][(edge_index)])][(height_index)][(g->eCellIdx[(0)]->data_pointer.p2[(block_index)][(edge_index)])] - gv_ind2Dparam->data_pointer.p2[(g->eCellBlk[(1)]->data_pointer.p2[(block_index)][(edge_index)])][(g->eCellIdx[(1)]->data_pointer.p2[(block_index)][(edge_index)])] * gv_temp->data_pointer.p3[(g->eCellBlk[(1)]->data_pointer.p2[(block_index)][(edge_index)])][(height_index)][(g->eCellIdx[(1)]->data_pointer.p2[(block_index)][(edge_index)])]; } } } } }
fixit.c
/****************************************************************************** * FILE: fixit.c * * This very simple program contains errors. Find them and fix. * ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 500 int main (int argc, char *argv[]) { int nthreads, tid, i, j; double a[N][N]; /* /* Fork a team of threads */ #pragma omp parallel shared(nthreads) private(i,j,tid,a) { /* Obtain/print thread info */ tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("Thread %d starting...\n", tid); /* Each thread works on its own private copy of the array */ for (i=0; i<N; i++) for (j=0; j<N; j++) a[i][j] = tid + i + j; /* For confirmation */ printf("Thread %d done. Last element= %lf\n",tid,a[N-1][N-1]); /* %d, %lf - print a decimal number and a long floating (double) number repectively */ } /* All threads join master thread and disband */ }
openmp-ex06.c
#include <stdio.h> #include <unistd.h> #include <omp.h> int main(void) { int num_threads, my_thread; num_threads = omp_get_num_threads(); my_thread = omp_get_thread_num(); printf ("\"You're all individuals!\" said %d of %d.\n", my_thread, num_threads); /* You can also declare private variable(s) that shadow existing variables * with the private() clause of the openmp directive */ #pragma omp parallel private(num_threads,my_thread) { num_threads = omp_get_num_threads(); my_thread = omp_get_thread_num(); sleep(1); printf("\"Yes, we're all individuals!\" replied %d of %d, sleepily.\n", my_thread, num_threads); } num_threads = omp_get_num_threads(); my_thread = omp_get_thread_num(); printf ("\"I'm not,\" said %d of %d.\n", my_thread, num_threads); return 0; }
DRB064-outeronly2-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Only the outmost loop can be parallelized. The inner loop has loop carried true data dependence. However, the loop is not parallelized so no race condition. */ #include "omprace.h" #include <omp.h> int n=100, m=100; double b[100][100]; void foo() { int i,j; #pragma omp parallel for private(j) for (i=0;i<n;i++) for (j=1;j<m;j++) // Be careful about bounds of j b[i][j]=b[i][j-1]; } int main() { omprace_init(); foo(); omprace_fini(); return 0; }
dynamic_smagorinsky_utilities.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Jordi Cotela // // System includes #include <vector> #include <map> // External includes // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/element.h" #include "utilities/openmp_utils.h" #include "utilities/parallel_utilities.h" #include "utilities/geometry_utilities.h" #include "includes/cfd_variables.h" #include "fluid_dynamics_application_variables.h" #include "includes/global_pointer_variables.h" #ifndef KRATOS_DYNAMIC_SMAGORINSKY_UTILITIES_H_INCLUDED #define KRATOS_DYNAMIC_SMAGORINSKY_UTILITIES_H_INCLUDED namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Classes ///@{ /// Helper class to dynamically determine a value for the Smagorinsly parameter. /** This class uses the Variational Germano Identity to determine a value for the Smagorinsky parameter. This value is stored in the elemental variable C_SMAGORINSKY, the element implementation is responsible for using it. The ability to assign different values to different patches of elements (identified by the PATCH_INDEX variable) is supported, although it tends to produce unreliable results due to a 0/0 indetermination in patches with smooth velocity fields. This class is based in Oberai, A.A. and Wanderer, J., Variational formulation of the Germano identity for the Navier Stokes equations, Journal of Turbulence, 2005, vol 6. Note that the formulation described there requires a nested mesh. It takes the model part containing a coarse mesh as input and assumes that all elements will be subdivided before CalculateC() is called. Remember to call StoreCoarseMesh before refining the element, otherwise the coarse mesh will be lost. @see VMS for an element implementation that uses the Smagorinsky model. @see Local_Refine_Triangle_Mesh,Local_Refine_Tetrahedra_Mesh for the element refinement process. */ class DynamicSmagorinskyUtils { public: ///@name Life Cycle ///@{ /// Constructor /** @param rModelPart Reference to the model part containing the coarse mesh @param DomainSize Spatial dimension (2 or 3) */ DynamicSmagorinskyUtils(ModelPart& rModelPart, unsigned int DomainSize): mrModelPart(rModelPart), mDomainSize(DomainSize), mCoarseMesh(), mPatchIndices() {} /// Destructor ~DynamicSmagorinskyUtils() {} ///@} ///@name Operations ///@{ /// Store current mesh as coarse mesh. Call before refining. /** If you are refining more than once, this only has to be called before last refinement. */ void StoreCoarseMesh() { // Clear existing mesh (if any) mCoarseMesh.clear(); // Store current mesh for( ModelPart::ElementsContainerType::ptr_iterator itpElem = mrModelPart.Elements().ptr_begin(); itpElem != mrModelPart.Elements().ptr_end(); ++itpElem) { // (*itpElem)->GetValue(C_SMAGORINSKY) = 0.0; // Set the Smagorinsky parameter to zero for the coarse mesh (do this once to reset any input values) mCoarseMesh.push_back(*itpElem); } // Count the number of patches in the model (in parallel) const int NumThreads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector ElementPartition; OpenMPUtils::DivideInPartitions(mCoarseMesh.size(),NumThreads,ElementPartition); std::vector< std::vector<int> > LocalIndices(NumThreads); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator ElemBegin = mCoarseMesh.begin() + ElementPartition[k]; ModelPart::ElementsContainerType::iterator ElemEnd = mCoarseMesh.begin() + ElementPartition[k+1]; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { this->AddNewIndex(LocalIndices[k],itElem->GetValue(PATCH_INDEX)); } } // Combine the partial lists and create a map for PATCH_INDEX -> Vector position unsigned int Counter = 0; std::pair<int, unsigned int> NewVal; std::pair< std::map<int, unsigned int>::iterator, bool > Result; for( std::vector< std::vector<int> >::iterator itList = LocalIndices.begin(); itList != LocalIndices.end(); ++itList ) { for( std::vector<int>::iterator itIndex = itList->begin(); itIndex != itList->end(); ++itIndex) { // Note that instering in map already sorts and checks for uniqueness NewVal.first = *itIndex; NewVal.second = Counter; Result = mPatchIndices.insert(NewVal); if (Result.second) ++Counter; } } } /// Provide a value for the Smagorinsky coefficient using the Variational Germano Identity void CalculateC() { // Update the velocity values for the terms that belong to the coarse mesh this->SetCoarseVel(); // Partitioning const int NumThreads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector CoarseElementPartition,FineElementPartition; OpenMPUtils::DivideInPartitions(mCoarseMesh.size(),NumThreads,CoarseElementPartition); OpenMPUtils::DivideInPartitions(mrModelPart.Elements().size(),NumThreads,FineElementPartition); // Initialize temporary containers unsigned int PatchNumber = mPatchIndices.size(); std::vector< std::vector<double> > GlobalPatchNum(NumThreads); // Numerator on each patch std::vector< std::vector<double> > GlobalPatchDen(NumThreads); // Denominator on each patch const double EnergyTol = 0.005; double TotalDissipation = 0; #pragma omp parallel reduction(+:TotalDissipation) { int k = OpenMPUtils::ThisThread(); // Initialize the iterator boundaries for this thread ModelPart::ElementsContainerType::iterator CoarseElemBegin = mCoarseMesh.begin() + CoarseElementPartition[k]; ModelPart::ElementsContainerType::iterator CoarseElemEnd = mCoarseMesh.begin() + CoarseElementPartition[k+1]; ModelPart::ElementsContainerType::iterator FineElemBegin = mrModelPart.ElementsBegin() + FineElementPartition[k]; ModelPart::ElementsContainerType::iterator FineElemEnd = mrModelPart.ElementsBegin() + FineElementPartition[k+1]; // Initialize some thread-local variables Vector LocalValues, LocalCoarseVel; Matrix LocalMassMatrix; ProcessInfo& rProcessInfo = mrModelPart.GetProcessInfo(); double Residual,Model; unsigned int PatchPosition; // Thread-local containers for the values in each patch std::vector<double>& rPatchNum = GlobalPatchNum[k]; std::vector<double>& rPatchDen = GlobalPatchDen[k]; rPatchNum.resize(PatchNumber,0.0);// Fill with zeros rPatchDen.resize(PatchNumber,0.0); if (mDomainSize == 2) { LocalValues.resize(9); LocalCoarseVel.resize(9); LocalMassMatrix.resize(9,9,false); array_1d<double,3> N; BoundedMatrix<double,3,2> DN_DX; BoundedMatrix<double,2,2> dv_dx; // Evaluate the N-S and model terms in each coarse element for( ModelPart::ElementsContainerType::iterator itElem = CoarseElemBegin; itElem != CoarseElemEnd; ++itElem) { PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms2D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] += Residual; rPatchDen[PatchPosition] += Model; TotalDissipation += Residual; } // Now evaluate the corresponding terms in the fine mesh for( ModelPart::ElementsContainerType::iterator itElem = FineElemBegin; itElem != FineElemEnd; ++itElem) { // Deactivate Smagorinsky to compute the residual of galerkin+stabilization terms only itElem->GetValue(C_SMAGORINSKY) = 0.0; PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms2D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] -= Residual; rPatchDen[PatchPosition] -= Model; } } else // mDomainSize == 3 { LocalValues.resize(16); LocalCoarseVel.resize(16); LocalMassMatrix.resize(16,16,false); array_1d<double,4> N; BoundedMatrix<double,4,3> DN_DX; BoundedMatrix<double,3,3> dv_dx; // Evaluate the N-S and model terms in each coarse element for( ModelPart::ElementsContainerType::iterator itElem = CoarseElemBegin; itElem != CoarseElemEnd; ++itElem) { PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms3D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] += Residual; rPatchDen[PatchPosition] += Model; TotalDissipation += Residual; } // Now evaluate the corresponding terms in the fine mesh for( ModelPart::ElementsContainerType::iterator itElem = FineElemBegin; itElem != FineElemEnd; ++itElem) { // Deactivate Smagorinsky to compute the residual of galerkin+stabilization terms only itElem->GetValue(C_SMAGORINSKY) = 0.0; PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; this->GermanoTerms3D(*itElem,N,DN_DX,dv_dx,LocalValues,LocalCoarseVel,LocalMassMatrix,rProcessInfo,Residual,Model); rPatchNum[PatchPosition] -= Residual; rPatchDen[PatchPosition] -= Model; } } } // Combine the results of each thread in position 0 for( std::vector< std::vector<double> >::iterator itNum = GlobalPatchNum.begin()+1, itDen = GlobalPatchDen.begin()+1; itNum != GlobalPatchNum.end(); ++itNum, ++itDen) { for( std::vector<double>::iterator TotalNum = GlobalPatchNum[0].begin(), LocalNum = itNum->begin(), TotalDen = GlobalPatchDen[0].begin(), LocalDen = itDen->begin(); TotalNum != GlobalPatchNum[0].end(); ++TotalNum,++LocalNum,++TotalDen,++LocalDen) { *TotalNum += *LocalNum; *TotalDen += *LocalDen; } } // Compute the smagorinsky coefficient for each patch by combining the values from each thread std::vector<double> PatchC(PatchNumber); double NumTol = EnergyTol * fabs(TotalDissipation); for( std::vector<double>::iterator itNum = GlobalPatchNum[0].begin(), itDen = GlobalPatchDen[0].begin(), itC = PatchC.begin(); itC != PatchC.end(); ++itNum, ++itDen, ++itC) { // If the dissipation we are "missing" by not considering Smagorinsky is small, do not use Smagorinsky (this avoids a division by ~0, as the denominator should go to zero too) if ( (fabs(*itNum) < NumTol) )//|| (fabs(*itDen) < 1.0e-12) ) *itC = 0.0; else *itC = sqrt( 0.5 * fabs( *itNum / *itDen ) ); } // Finally, assign each element its new smagorinsky value #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::ElementsContainerType::iterator ElemBegin = mrModelPart.ElementsBegin() + FineElementPartition[k]; ModelPart::ElementsContainerType::iterator ElemEnd = mrModelPart.ElementsBegin() + FineElementPartition[k+1]; unsigned int PatchPosition; for( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { PatchPosition = mPatchIndices[ itElem->GetValue(PATCH_INDEX) ]; itElem->GetValue(C_SMAGORINSKY) = PatchC[PatchPosition]; } } } /// For the bridge analysis problem, correct the boundary flag after the refinement. /** Remember to run this AFTER EACH REFINEMENT STEP Possible values for the variable: 1.0 inlet, 2.0 bridge surface, 3.0 outlet, 0.0 otherwise @param rThisVariable The Kratos variable used to identify the boundary */ void CorrectFlagValues(Variable<double>& rThisVariable = FLAG_VARIABLE) { // Loop over coarse mesh to evaluate all terms that do not involve the fine mesh const int NumThreads = ParallelUtilities::GetNumThreads(); OpenMPUtils::PartitionVector NodePartition; OpenMPUtils::DivideInPartitions(mrModelPart.NumberOfNodes(),NumThreads,NodePartition); #pragma omp parallel { int k = OpenMPUtils::ThisThread(); ModelPart::NodeIterator NodesBegin = mrModelPart.NodesBegin() + NodePartition[k]; ModelPart::NodeIterator NodesEnd = mrModelPart.NodesBegin() + NodePartition[k+1]; double Value0, Value1; for( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if( itNode->GetValue(FATHER_NODES).size() == 2 ) // If the node is refined { Value0 = itNode->GetValue(FATHER_NODES)[0].FastGetSolutionStepValue(rThisVariable); Value1 = itNode->GetValue(FATHER_NODES)[1].FastGetSolutionStepValue(rThisVariable); if( Value0 != Value1 ) // If this node is problematic { if ( Value0 == 0.0 || Value1 == 0.0 ) { // if either of the parents is not on the boundary, this node is not on the boundary itNode->FastGetSolutionStepValue(rThisVariable) = 0.0; } /* All remaining cases are unlikely in well-posed problems, I'm arbitrarily giving priority to the outlet, so that the node is only inlet or bridge surface if both parents are */ else if( Value0 == 3.0 ) { itNode->FastGetSolutionStepValue(rThisVariable) = Value0; } else if( Value1 == 3.0 ) { // The node is only bridge surface if both parents are itNode->FastGetSolutionStepValue(rThisVariable) = Value1; } else // Default behaviour: Parent 0 takes precedence { itNode->FastGetSolutionStepValue(rThisVariable) = Value0; } } } } } } ///@} private: ///@name Member Variables ///@{ /// ModelPart of the fluid problem ModelPart& mrModelPart; /// Spatial dimenstion unsigned int mDomainSize; /// Container for the coarse mesh (the fine mesh is stored by the model part) ModelPart::ElementsContainerType mCoarseMesh; /// A map relating patch indices to positions in the internal storage arrays std::map<int, unsigned int> mPatchIndices; ///@name Private Operations ///@{ /// Calculate the "Coarse Mesh" velocity /** The operations on the coarse mesh are evaluated on the fine mesh, but using an averaged velocity on the nodes that only exist on the fine mesh. Velocity gradients calculated on the fine mesh using this average velocity will be equal to those that would be obtained using the coarse mesh. This function assigns the "coarse" velocity value to all nodes */ void SetCoarseVel() { /* Note: This loop can't be parallelized, as we are relying on the fact that refined nodes are at the end of the list and their parents will be updated before the refined nodes are reached. There is an alternative solution (always calculate the coarse mesh velocity from the historic database) which can be parallelized but won't work for multiple levels of refinement */ for( ModelPart::NodeIterator itNode = mrModelPart.NodesBegin(); itNode != mrModelPart.NodesEnd(); ++itNode) { if( itNode->GetValue(FATHER_NODES).size() == 2 ) { Node<3>& rParent1 = itNode->GetValue(FATHER_NODES)[0]; Node<3>& rParent2 = itNode->GetValue(FATHER_NODES)[1]; itNode->GetValue(COARSE_VELOCITY) = 0.5 * ( rParent1.FastGetSolutionStepValue(VELOCITY) + rParent2.FastGetSolutionStepValue(VELOCITY) ); } else { itNode->GetValue(COARSE_VELOCITY) = itNode->FastGetSolutionStepValue(VELOCITY); } } } /// Return the Galerkin (+stabilization) and Model terms for this element (2D version) void GermanoTerms2D(Element& rElem, array_1d<double,3>& rShapeFunc, BoundedMatrix<double,3,2>& rShapeDeriv, BoundedMatrix<double,2,2>& rGradient, Vector& rNodalResidualContainer, Vector& rNodalVelocityContainer, Matrix& rMassMatrix, ProcessInfo& rProcessInfo, double& rResidual, double& rModel) { const double Dim = 2; const double NumNodes = 3; // Initialize double Area; double Density = 0.0; rGradient = ZeroMatrix(Dim,Dim); rResidual = 0.0; rModel = 0.0; // Calculate the residual this->CalculateResidual(rElem,rMassMatrix,rNodalVelocityContainer,rNodalResidualContainer,rProcessInfo); // We use rNodalVelocityContainer as an auxiliaty variable this->GetCoarseVelocity2D(rElem,rNodalVelocityContainer); for( Vector::iterator itRHS = rNodalResidualContainer.begin(), itVel = rNodalVelocityContainer.begin(); itRHS != rNodalResidualContainer.end(); ++itRHS, ++itVel) rResidual += (*itVel) * (*itRHS); // Calculate the model term GeometryUtils::CalculateGeometryData( rElem.GetGeometry(), rShapeDeriv, rShapeFunc, Area); // Compute Grad(u), Density and < Grad(w), Grad(u) > for (unsigned int j = 0; j < NumNodes; ++j) // Columns of <Grad(Ni),Grad(Nj)> { Density += rShapeFunc[j] * rElem.GetGeometry()[j].FastGetSolutionStepValue(DENSITY); const array_1d< double,3 >& rNodeVel = rElem.GetGeometry()[j].FastGetSolutionStepValue(VELOCITY); // Nodal velocity for (unsigned int i = 0; i < NumNodes; ++i) // Rows of <Grad(Ni),Grad(Nj)> { const array_1d< double,3 >& rNodeTest = rElem.GetGeometry()[i].GetValue(COARSE_VELOCITY); // Test function (particularized to coarse velocity) for (unsigned int k = 0; k < Dim; ++k) // Space Dimensions rModel += rNodeTest[k] * rShapeDeriv(i,k) * rShapeDeriv(j,k) * rNodeVel[k]; } for (unsigned int m = 0; m < Dim; ++m) // Calculate symmetric gradient { for (unsigned int n = 0; n < m; ++n) // Off-diagonal rGradient(m,n) += 0.5 * (rShapeDeriv(j,n) * rNodeVel[m] + rShapeDeriv(j,m) * rNodeVel[n]); // Symmetric gradient, only lower half is written rGradient(m,m) += rShapeDeriv(j,m) * rNodeVel[m]; // Diagonal } } rModel *= Area; // To this point, rModel contains the integral over the element of Grad(U_coarse):Grad(U) // Norm[ Grad(u) ] double SqNorm = 0.0; for (unsigned int i = 0; i < Dim; ++i) { for (unsigned int j = 0; j < i; ++j) SqNorm += 2.0 * rGradient(i,j) * rGradient(i,j); // Adding off-diagonal terms (twice, as matrix is symmetric) SqNorm += rGradient(i,i) * rGradient(i,i); // Diagonal terms } // "Fixed" part of Smagorinsky viscosity: Density * FilterWidth^2 * Norm(SymmetricGrad(U)). 2*C^2 is accounted for in the caller function const double sqH = 2*Area; rModel *= Density * sqH * sqrt(SqNorm); } /// Return the Galerkin (+stabilization) and Model terms for this element (3D version) void GermanoTerms3D(Element& rElem, array_1d<double,4>& rShapeFunc, BoundedMatrix<double,4,3>& rShapeDeriv, BoundedMatrix<double,3,3>& rGradient, Vector& rNodalResidualContainer, Vector& rNodalVelocityContainer, Matrix& rMassMatrix, ProcessInfo& rProcessInfo, double& rResidual, double& rModel) { const double Dim = 3; const double NumNodes = 4; // Initialize double Volume; double Density = 0.0; rGradient = ZeroMatrix(Dim,Dim); rResidual = 0.0; rModel = 0.0; // Calculate the residual this->CalculateResidual(rElem,rMassMatrix,rNodalVelocityContainer,rNodalResidualContainer,rProcessInfo); // We use rNodalVelocityContainer as an auxiliaty variable this->GetCoarseVelocity3D(rElem,rNodalVelocityContainer); for( Vector::iterator itRHS = rNodalResidualContainer.begin(), itVel = rNodalVelocityContainer.begin(); itRHS != rNodalResidualContainer.end(); ++itRHS, ++itVel) rResidual += (*itVel) * (*itRHS); // Calculate the model term GeometryUtils::CalculateGeometryData( rElem.GetGeometry(), rShapeDeriv, rShapeFunc, Volume); // Compute Grad(u), Density and < Grad(w), Grad(u) > for (unsigned int j = 0; j < NumNodes; ++j) // Columns of <Grad(Ni),Grad(Nj)> { Density += rShapeFunc[j] * rElem.GetGeometry()[j].FastGetSolutionStepValue(DENSITY); const array_1d< double,3 >& rNodeVel = rElem.GetGeometry()[j].FastGetSolutionStepValue(VELOCITY); // Nodal velocity for (unsigned int i = 0; i < NumNodes; ++i) // Rows of <Grad(Ni),Grad(Nj)> { const array_1d< double,3 >& rNodeTest = rElem.GetGeometry()[i].GetValue(COARSE_VELOCITY); // Test function (particularized to coarse velocity) for (unsigned int k = 0; k < Dim; ++k) // Space Dimensions rModel += rNodeTest[k] * rShapeDeriv(i,k) * rShapeDeriv(j,k) * rNodeVel[k]; } for (unsigned int m = 0; m < Dim; ++m) // Calculate symmetric gradient { for (unsigned int n = 0; n < m; ++n) // Off-diagonal rGradient(m,n) += 0.5 * (rShapeDeriv(j,n) * rNodeVel[m] + rShapeDeriv(j,m) * rNodeVel[n]); // Symmetric gradient, only lower half is written rGradient(m,m) += rShapeDeriv(j,m) * rNodeVel[m]; // Diagonal } } rModel *= Volume; // To this point, rModel contains the integral over the element of Grad(U_coarse):Grad(U) // Norm[ Symmetric Grad(u) ] = ( 2 * Sij * Sij )^(1/2), we compute the Sij * Sij part in the following loop: double SqNorm = 0.0; for (unsigned int i = 0; i < Dim; ++i) { for (unsigned int j = 0; j < i; ++j) SqNorm += 2.0 * rGradient(i,j) * rGradient(i,j); // Adding off-diagonal terms (twice, as matrix is symmetric) SqNorm += rGradient(i,i) * rGradient(i,i); // Diagonal terms } const double cubeH = 6*Volume; rModel *= Density * pow(cubeH, 2.0/3.0) * sqrt(2.0 * SqNorm); } /// Equivalent to VMS2DSmagorinsky::GetFirstDerivativesVector(), using the velocity evaluated on the coarse mesh void GetCoarseVelocity2D(Element& rElement, Vector& rVar) { unsigned int LocalIndex = 0; const Element::GeometryType& rGeom = rElement.GetGeometry(); for (unsigned int itNode = 0; itNode < 3; ++itNode) { const array_1d< double,3>& rCoarseVel = rGeom[itNode].GetValue(COARSE_VELOCITY); rVar[LocalIndex++] = rCoarseVel[0]; rVar[LocalIndex++] = rCoarseVel[1]; rVar[LocalIndex++] = 0.0; // Pressure Dof } } /// Equivalent to VMS3DSmagorinsky::GetFirstDerivativesVector(), using the velocity evaluated on the coarse mesh void GetCoarseVelocity3D(Element& rElement, Vector& rVar) { unsigned int LocalIndex = 0; const Element::GeometryType& rGeom = rElement.GetGeometry(); for (unsigned int itNode = 0; itNode < 4; ++itNode) { const array_1d< double,3>& rCoarseVel = rGeom[itNode].GetValue(COARSE_VELOCITY); rVar[LocalIndex++] = rCoarseVel[0]; rVar[LocalIndex++] = rCoarseVel[1]; rVar[LocalIndex++] = rCoarseVel[2]; rVar[LocalIndex++] = 0.0; // Pressure Dof } } /// Call the element's member functions to obtain its residual void CalculateResidual(Element& rElement, Matrix& rMassMatrix, ///@todo This matrix and the next vector should be transformed to static members once we find a threadsafe way to do so Vector& rAuxVector, Vector& rResidual, const ProcessInfo& rCurrentProcessInfo) { const auto& r_const_elem_ref = rElement; rElement.InitializeNonLinearIteration(rCurrentProcessInfo); // Dynamic stabilization terms rElement.CalculateRightHandSide(rResidual,rCurrentProcessInfo); // Dynamic Terms rElement.CalculateMassMatrix(rMassMatrix,rCurrentProcessInfo); r_const_elem_ref.GetSecondDerivativesVector(rAuxVector,0); noalias(rResidual) -= prod(rMassMatrix,rAuxVector); // Velocity Terms rElement.CalculateLocalVelocityContribution(rMassMatrix,rResidual,rCurrentProcessInfo); // Note that once we are here, we no longer need the mass matrix } /// Check if a patch index is known void AddNewIndex( std::vector<int>& rIndices, int ThisIndex ) { bool IsNew = true; for( std::vector<int>::iterator itIndex = rIndices.begin(); itIndex != rIndices.end(); ++itIndex) { if( ThisIndex == *itIndex) { IsNew = false; break; } } if (IsNew) rIndices.push_back(ThisIndex); } ///@} // Private operations }; ///@} Kratos classes ///@} Application group } // namespace Kratos #endif /* KRATOS_DYNAMIC_SMAGORINSKY_UTILITIES_H_INCLUDED */
fixed_version2.c
#include<math.h> #include <stdio.h> #include <stdlib.h> int main(){ int sum = 0; int DATA_MAG = 100; int H[100]; int LUT[100]; int scale_factor = 10; // Gets the number of OpenMP threads used char *str_omp_threads = getenv("OMP_NUM_THREADS"); int omp_threads = atoi(str_omp_threads); #pragma omp parallel for for (int i =0; i < DATA_MAG;i++) { H[i] = i; } // Creates an array equal to the number of threads int *thread_base_sum = (int*) malloc(omp_threads*sizeof(sum)); for (int j=0; j < omp_threads; j++) { // Gauss' addition formula int final_N = j*(DATA_MAG/omp_threads); // Removes the number itself thread_base_sum[j] = final_N*(final_N + 1)/2 - final_N; } #pragma omp parallel for // Divides the work to each thread for (int j=0; j < omp_threads; j++) { int sum = thread_base_sum[j]; // Last thread also adds the last elements if (j == (omp_threads-1)) { for (int k=j*(DATA_MAG/omp_threads); k < DATA_MAG; k++) { //printf("%d\n", k); sum += H[k]; LUT[k] = sum*scale_factor; } } else { for (int k=j*(DATA_MAG/omp_threads); k < (j+1)*(DATA_MAG/omp_threads); k++) { //printf("%d\n", k); sum += H[k]; LUT[k] = sum*scale_factor; } } } for (int i = 0; i < 100; i++) { printf("%d\n",LUT[i]); } free(thread_base_sum); return 0; }
if-clause.c
/* * if-clause.c * * Created on: 28/04/2014 * Author: Carlos de la Torre */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif int main(int argc, char **argv) { int i, n = 20, tid; int a[n], suma = 0, sumalocal; if (argc < 2) { fprintf(stderr, "[ERROR]-Falta iteraciones\n"); exit(-1); } n = atoi(argv[1]); if (n > 20) n = 20; for (i = 0; i < n; i++) { a[i] = i; } #pragma omp parallel if(omp_get_thread_num()>4) default(none) private(sumalocal,tid) shared(a,suma,n) { sumalocal = 0; tid = omp_get_thread_num(); #pragma omp for private(i) schedule(static) nowait for (i = 0; i < n; i++) { sumalocal += a[i]; printf(" thread %d suma de a[%d]=%d sumalocal=%d \n", tid, i, a[i], sumalocal); } #pragma omp atomic suma += sumalocal; #pragma omp barrier #pragma omp master printf(" thread master=%d imprime suma=%d\n", tid, suma); } return 0; }
GB_unop__identity_fc32_uint32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc32_uint32) // op(A') function: GB (_unop_tran__identity_fc32_uint32) // C type: GxB_FC32_t // A type: uint32_t // cast: GxB_FC32_t cij = GxB_CMPLXF ((float) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC32 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc32_uint32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const uint32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint32_t aij = Ax [p] ; GxB_FC32_t z = GxB_CMPLXF ((float) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc32_uint32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
expected_output.c
#include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> #include <polybench.h> #include "gemm.h" /** * This version is stamped on May 10, 2016 * * Contact: * Louis-Noel Pouchet <pouchet.ohio-state.edu> * Tomofumi Yuki <tomofumi.yuki.fr> * * Web address: http://polybench.sourceforge.net */ /*gemm.c: this file is part of PolyBench/C*/ /*Include polybench common header.*/ /*Include benchmark-specific header.*/ /*Array initialization.*/ static void init_array(int ni, int nj, int nk, double *alpha, double *beta, double C[1000][1100], double A[1000][1200], double B[1200][1100]) { int i, j; *alpha = 1.5; *beta = 1.2; for(i = 0; i < ni; i++) for(j = 0; j < nj; j++) C[i][j] = (double) ((i * j + 1) % ni) / ni; for(i = 0; i < ni; i++) for(j = 0; j < nk; j++) A[i][j] = (double) (i * (j + 1) % nk) / nk; for(i = 0; i < nk; i++) for(j = 0; j < nj; j++) B[i][j] = (double) (i * (j + 2) % nj) / nj; } /*DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output.*/ static void print_array(int ni, int nj, double C[1000][1100]) { int i, j; fprintf(stderr, "==BEGIN DUMP_ARRAYS==\n"); fprintf(stderr, "begin dump: %s", "C"); for(i = 0; i < ni; i++) for(j = 0; j < nj; j++) { if((i * ni + j) % 20 == 0) fprintf(stderr, "\n"); fprintf(stderr, "%0.2lf ", C[i][j]); } fprintf(stderr, "\nend dump: %s\n", "C"); fprintf(stderr, "==END DUMP_ARRAYS==\n"); } /*Main computational kernel. The whole function will be timed, including the call and return.*/ static void kernel_gemm(int ni, int nj, int nk, double alpha, double beta, double C[1000][1100], double A[1000][1200], double B[1200][1100]) { int i, j, k; #pragma omp parallel for default(shared) private(i, j, k) firstprivate(ni, nj, beta, nk, alpha, A, B) for(i = 0; i < ni; i++) { // #pragma omp parallel for default(shared) private(j) firstprivate(nj, i, beta) for(j = 0; j < nj; j++) C[i][j] *= beta; // #pragma omp parallel for default(shared) private(k, j) firstprivate(nk, nj, alpha, i, A, B) for(k = 0; k < nk; k++) { // #pragma omp parallel for default(shared) private(j) firstprivate(nj, alpha, i, k, A, B) for(j = 0; j < nj; j++) C[i][j] += alpha * A[i][k] * B[k][j]; } } } int main(int argc, char **argv) { /*Retrieve problem size.*/ int ni = 1000; int nj = 1100; int nk = 1200; /*Variable declaration/allocation.*/ double alpha; double beta; double (*C)[1000][1100]; C = (double (*)[1000][1100]) polybench_alloc_data((1000 + 0) * (1100 + 0), sizeof(double)); ; double (*A)[1000][1200]; A = (double (*)[1000][1200]) polybench_alloc_data((1000 + 0) * (1200 + 0), sizeof(double)); ; double (*B)[1200][1100]; B = (double (*)[1200][1100]) polybench_alloc_data((1200 + 0) * (1100 + 0), sizeof(double)); ; /*Initialize array(s).*/ init_array(ni, nj, nk, &alpha, &beta, *C, *A, *B); /*Start timer.*/ ; /*Run kernel.*/ kernel_gemm(ni, nj, nk, alpha, beta, *C, *A, *B); /*Stop and print timer.*/ ; ; /*Prevent dead-code elimination. All live-out data must be printed by the function call in argument.*/ if(argc > 42 && !strcmp(argv[0], "")) print_array(ni, nj, *C); /*Be clean.*/ free((void *) C); ; free((void *) A); ; free((void *) B); ; return 0; }
GB_AxB_flopcount.c
//------------------------------------------------------------------------------ // GB_AxB_flopcount: compute flops for C=A*B, C<M>=A*B, or C<!M>=A*B //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // On input, A, B, and M (optional) are matrices for C=A*B, C<M>=A*B, or // C<!M>=A*B. The flop count for each B(:,j) is computed, and returned as a // cumulative sum. This function is CSR/CSC agnostic, but for simplicity of // this description, assume A and B are both CSC matrices, so that ncols(A) == // nrows(B). For both CSR and CSC, A->vdim == B->vlen holds. A and/or B may // be hypersparse, in any combination. // Bflops has size (B->nvec)+1, for both standard and hypersparse B. Let // n=B->vdim be the column dimension of B (that is, B is m-by-n). // If B is a standard CSC matrix then Bflops has size n+1 == B->nvec+1, and on // output, Bflops [j] is the # of flops required to compute C (:, 0:j-1). B->h // is NULL, and is implicitly the vector 0:(n-1). // If B is hypersparse, then let Bh = B->h. Its size is B->nvec, and j = Bh // [kk] is the (kk)th column in the data structure for B. C will also be // hypersparse, and only C(:,Bh) will be computed (C may have fewer non-empty // columns than B). On output, Bflops [kk] is the number of needed flops to // compute C (:, Bh [0:kk-1]). // In both cases, Bflops [0] = 0, and Bflops [B->nvec] = total number of flops. // The size of Bflops is B->nvec+1 so that it has the same size as B->p. The // first entry of B->p and Bflops are both zero. This allows B to be sliced // either by # of entries in B (by slicing B->p) or by the flop count required // (by slicing Bflops). // This algorithm does not look at the values of M, A, or B, just their // patterns. The flop count of C=A*B, C<M>=A*B, or C<!M>=A*B is computed for a // saxpy-based method; the work for A'*B for the dot product method is not // computed. // The algorithm scans all nonzeros in B. It only scans at most the min and // max (first and last) row indices in A and M (if M is present). If A and M // are not hypersparse, the time taken is O(nnz(B)+n). If all matrices are // hypersparse, the time is O(nnz(B)*log(h)) where h = max # of vectors present // in A and M. In pseudo-MATLAB, and assuming B is in standard (not // hypersparse) form: /* [m n] = size (B) ; Bflops = zeros (1,n+1) ; % (set to zero in the caller) Mwork = 0 ; for each column j in B: if (B (:,j) is empty) continue ; mjnz = nnz (M (:,j)) if (M is present, not complemented, and M (:,j) is empty) continue ; im_first = min row index of nonzeros in M(:,j) im_last = max row index of nonzeros in M(:,j) Bflops (j) = mjnz if M present, to scatter M(:,j) (M or !M case) Mwork += mjnz for each k where B (k,j) is nonzero: aknz = nnz (A (:,k)) if (aknz == 0) continue ; alo = min row index of nonzeros in A(:,k) ahi = max row index of nonzeros in A(:,k) if (M is present and not complemented) if (intersection (alo:ahi, im_first:im_last) empty) continue end % numerical phase will compute: C(:,j)<#M(:,j)> += A(:,k)*B(k,j) % where #M is no mask, M, or !M. This typically takes aknz flops, % or with a binary search if nnz(M(:,j)) << nnz(A(:,k)). Bflops (j) += aknz end end */ #include "GB_mxm.h" #include "GB_ek_slice.h" #include "GB_bracket.h" #define GB_FREE_WORK \ { \ GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice, ntasks) ; \ GB_FREE_MEMORY (Wfirst, ntasks, sizeof (int64_t)) ; \ GB_FREE_MEMORY (Wlast, ntasks, sizeof (int64_t)) ; \ } GrB_Info GB_AxB_flopcount ( int64_t *Mwork, // amount of work to handle the mask M int64_t *Bflops, // size B->nvec+1 and all zero const GrB_Matrix M, // optional mask matrix const bool Mask_comp, // if true, mask is complemented const GrB_Matrix A, const GrB_Matrix B, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT_MATRIX_OK_OR_NULL (M, "M for flop count A*B", GB0) ; ASSERT_MATRIX_OK (A, "A for flop count A*B", GB0) ; ASSERT_MATRIX_OK (B, "B for flop count A*B", GB0) ; ASSERT (!GB_PENDING (M)) ; ASSERT (!GB_ZOMBIES (M)) ; ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ; ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ; ASSERT (A->vdim == B->vlen) ; ASSERT (Bflops != NULL) ; ASSERT (Mwork != NULL) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- int64_t bnz = GB_NNZ (B) ; int64_t bnvec = B->nvec ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (bnz + bnvec, chunk, nthreads_max) ; #ifdef GB_DEBUG // Bflops must be set to zero in the caller for (int64_t kk = 0 ; kk <= bnvec ; kk++) { ASSERT (Bflops [kk] == 0) ; } #endif //-------------------------------------------------------------------------- // get the mask, if present //-------------------------------------------------------------------------- bool mask_is_M = (M != NULL && !Mask_comp) ; const int64_t *GB_RESTRICT Mh = NULL ; const int64_t *GB_RESTRICT Mp = NULL ; const int64_t *GB_RESTRICT Mi = NULL ; int64_t mnvec = 0 ; bool M_is_hyper = GB_IS_HYPER (M) ; if (M != NULL) { Mh = M->h ; Mp = M->p ; Mi = M->i ; mnvec = M->nvec ; } //-------------------------------------------------------------------------- // get A and B //-------------------------------------------------------------------------- const int64_t *GB_RESTRICT Ah = A->h ; const int64_t *GB_RESTRICT Ap = A->p ; const int64_t *GB_RESTRICT Ai = A->i ; int64_t anvec = A->nvec ; bool A_is_hyper = GB_IS_HYPER (A) ; const int64_t *GB_RESTRICT Bh = B->h ; const int64_t *GB_RESTRICT Bp = B->p ; const int64_t *GB_RESTRICT Bi = B->i ; bool B_is_hyper = GB_IS_HYPER (B) ; //-------------------------------------------------------------------------- // construct the parallel tasks //-------------------------------------------------------------------------- // taskid does entries pstart_slice [taskid] to pstart_slice [taskid+1]-1 // and vectors kfirst_slice [taskid] to klast_slice [taskid]. The first // and last vectors may be shared with prior slices and subsequent slices. int64_t *GB_RESTRICT Wfirst = NULL ; // size ntasks int64_t *GB_RESTRICT Wlast = NULL ; // size ntasks int ntasks = (nthreads == 1) ? 1 : (64 * nthreads) ; ntasks = GB_IMIN (ntasks, bnz) ; ntasks = GB_IMAX (ntasks, 1) ; int64_t *pstart_slice, *kfirst_slice, *klast_slice ; if (!GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, B, ntasks)) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_MALLOC_MEMORY (Wfirst, ntasks, sizeof (int64_t)) ; GB_MALLOC_MEMORY (Wlast, ntasks, sizeof (int64_t)) ; if (Wfirst == NULL || Wlast == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // compute flop counts for C=A*B, C<M>=A*B, or C<!M>=A*B //-------------------------------------------------------------------------- int64_t total_Mwork = 0 ; int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:total_Mwork) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = kfirst_slice [taskid] ; int64_t klast = klast_slice [taskid] ; Wfirst [taskid] = 0 ; Wlast [taskid] = 0 ; int64_t mpleft = 0 ; // for GB_lookup of the mask M int64_t task_Mwork = 0 ; //---------------------------------------------------------------------- // count flops for vectors kfirst to klast of B //---------------------------------------------------------------------- for (int64_t kk = kfirst ; kk <= klast ; kk++) { // nnz (B (:,j)), for all tasks int64_t bjnz = Bp [kk+1] - Bp [kk] ; // C(:,j) is empty if the entire vector B(:,j) is empty if (bjnz == 0) continue ; //------------------------------------------------------------------ // find the part of B(:,j) to be computed by this task //------------------------------------------------------------------ int64_t pB, pB_end ; GB_get_pA_and_pC (&pB, &pB_end, NULL, taskid, kk, kfirst, klast, pstart_slice, NULL, NULL, Bp) ; int64_t my_bjnz = pB_end - pB ; int64_t j = (B_is_hyper) ? Bh [kk] : kk ; //------------------------------------------------------------------ // see if M(:,j) is present and non-empty //------------------------------------------------------------------ int64_t bjflops = 0 ; int64_t im_first = -1, im_last = -1 ; int64_t mjnz = 0 ; if (M != NULL) { int64_t mpright = mnvec - 1 ; int64_t pM, pM_end ; GB_lookup (M_is_hyper, Mh, Mp, &mpleft, mpright, j, &pM, &pM_end) ; mjnz = pM_end - pM ; // If M not complemented: C(:,j) is empty if M(:,j) is empty. if (mjnz == 0 && !Mask_comp) continue ; if (mjnz > 0) { // M(:,j) not empty; get 1st and last index in M(:,j) im_first = Mi [pM] ; im_last = Mi [pM_end-1] ; if (pB == Bp [kk]) { // this task owns the top part of B(:,j), so it can // account for the work to access M(:,j), without the // work being duplicated by other tasks working on // B(:,j) bjflops = mjnz ; // keep track of total work spent examining the mask. // If any B(:,j) is empty, M(:,j) can be ignored. So // total_Mwork will be <= nnz (M). task_Mwork += mjnz ; } } } int64_t mjnz_much = 64 * mjnz ; //------------------------------------------------------------------ // trim Ah on right //------------------------------------------------------------------ // Ah [0..A->nvec-1] holds the set of non-empty vectors of A, but // only vectors k corresponding to nonzero entries B(k,j) are // accessed for this vector B(:,j). If nnz (B(:,j)) > 2, prune the // search space on the right, so the remaining calls to GB_lookup // will only need to search Ah [pleft...pright-1]. pright does not // change. pleft is advanced as B(:,j) is traversed, since the // indices in B(:,j) are sorted in ascending order. int64_t pleft = 0 ; int64_t pright = anvec-1 ; if (A_is_hyper && my_bjnz > 2) { // trim Ah [0..pright] to remove any entries past last B(:,j) GB_bracket_right (Bi [pB_end-1], Ah, 0, &pright) ; } //------------------------------------------------------------------ // count the flops to compute C(:,j)<#M(:,j)> = A*B(:,j) //------------------------------------------------------------------ // where #M is either not present, M, or !M for ( ; pB < pB_end ; pB++) { // B(k,j) is nonzero int64_t k = Bi [pB] ; // find A(:,k), reusing pleft since Bi [...] is sorted int64_t pA, pA_end ; GB_lookup (A_is_hyper, Ah, Ap, &pleft, pright, k, &pA, &pA_end); // skip if A(:,k) empty int64_t aknz = pA_end - pA ; if (aknz == 0) continue ; double bkjflops ; // skip if intersection of A(:,k) and M(:,j) is empty // and mask is not complemented (C<M>=A*B) if (mask_is_M) { // A(:,k) is non-empty; get first and last index of A(:,k) int64_t alo = Ai [pA] ; int64_t ahi = Ai [pA_end-1] ; if (ahi < im_first || alo > im_last) continue ; if (aknz > 256 && mjnz_much < aknz) { // scan M(:j), and do binary search for A(i,j) bkjflops = mjnz * (1 + 4 * log2 ((double) aknz)) ; } else { // scan A(:k), and lookup M(i,j) bkjflops = aknz ; } } else { // A(:,k)*B(k,j) requires aknz flops bkjflops = aknz ; } // increment by flops for the single entry B(k,j) // C(:,j)<#M(:,j)> += A(:,k)*B(k,j). bjflops += bkjflops ; } //------------------------------------------------------------------ // log the flops for B(:,j) //------------------------------------------------------------------ if (kk == kfirst) { Wfirst [taskid] = bjflops ; } else if (kk == klast) { Wlast [taskid] = bjflops ; } else { Bflops [kk] = bjflops ; } } // compute the total work to access the mask, which is <= nnz (M) total_Mwork += task_Mwork ; } //-------------------------------------------------------------------------- // reduce the first and last vector of each slice //-------------------------------------------------------------------------- // See also Template/GB_reduce_each_vector.c int64_t kprior = -1 ; for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // sum up the partial flops that taskid computed for kfirst //---------------------------------------------------------------------- int64_t kfirst = kfirst_slice [taskid] ; int64_t klast = klast_slice [taskid] ; if (kfirst <= klast) { int64_t pB = pstart_slice [taskid] ; int64_t pB_end = GB_IMIN (Bp [kfirst+1], pstart_slice [taskid+1]) ; if (pB < pB_end) { if (kprior < kfirst) { // This task is the first one that did work on // B(:,kfirst), so use it to start the reduction. Bflops [kfirst] = Wfirst [taskid] ; } else { // subsequent task for B(:,kfirst) Bflops [kfirst] += Wfirst [taskid] ; } kprior = kfirst ; } } //---------------------------------------------------------------------- // sum up the partial flops that taskid computed for klast //---------------------------------------------------------------------- if (kfirst < klast) { int64_t pB = Bp [klast] ; int64_t pB_end = pstart_slice [taskid+1] ; if (pB < pB_end) { /* if */ ASSERT (kprior < klast) ; { // This task is the first one that did work on // B(:,klast), so use it to start the reduction. Bflops [klast] = Wlast [taskid] ; } /* else { // If kfirst < klast and B(:,klast) is not empty, // then this task is always the first one to do // work on B(:,klast), so this case is never used. ASSERT (GB_DEAD_CODE) ; // subsequent task to work on B(:,klast) Bflops [klast] += Wlast [taskid] ; } */ kprior = klast ; } } } //-------------------------------------------------------------------------- // cumulative sum of Bflops //-------------------------------------------------------------------------- // Bflops = cumsum ([0 Bflops]) ; ASSERT (Bflops [bnvec] == 0) ; GB_cumsum (Bflops, bnvec, NULL, nthreads) ; // Bflops [bnvec] is now the total flop count, including the time to // compute A*B and to handle the mask. total_Mwork is part of this total // flop count, but is also returned separtely. //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- GB_FREE_WORK ; (*Mwork) = total_Mwork ; return (GrB_SUCCESS) ; }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
muxers.c
/***************************************************************************** * muxers.c: h264 file i/o plugins ***************************************************************************** * Copyright (C) 2003-2008 x264 project * * Authors: Laurent Aimar <fenrir@via.ecp.fr> * Loren Merritt <lorenm@u.washington.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. *****************************************************************************/ #include "common/common.h" #include <omp.h> #include "x264.h" #include "matroska.h" #include "muxers.h" #ifndef _MSC_VER #include "config.h" #endif #include <sys/types.h> #ifdef AVIS_INPUT #include <windows.h> #include <vfw.h> #endif #ifdef MP4_OUTPUT #include <gpac/isomedia.h> #endif static int64_t gcd( int64_t a, int64_t b ) { while (1) { int64_t c = a % b; if( !c ) return b; a = b; b = c; } } typedef struct { FILE *fh; int width, height; int next_frame; } yuv_input_t; /* raw 420 yuv file operation */ int open_file_yuv( char *psz_filename, hnd_t *p_handle, x264_param_t *p_param ) { yuv_input_t *h = malloc(sizeof(yuv_input_t)); h->width = p_param->i_width; h->height = p_param->i_height; h->next_frame = 0; if( !strcmp(psz_filename, "-") ) h->fh = stdin; else h->fh = fopen(psz_filename, "rb"); if( h->fh == NULL ) return -1; *p_handle = (hnd_t)h; return 0; } int get_frame_total_yuv( hnd_t handle ) { yuv_input_t *h = handle; int i_frame_total = 0; if( !fseek( h->fh, 0, SEEK_END ) ) { uint64_t i_size = ftell( h->fh ); fseek( h->fh, 0, SEEK_SET ); i_frame_total = (int)(i_size / ( h->width * h->height * 3 / 2 )); } return i_frame_total; } int read_frame_yuv( x264_picture_t *p_pic, hnd_t handle, int i_frame ) { yuv_input_t *h = handle; if( i_frame != h->next_frame ) if( fseek( h->fh, (uint64_t)i_frame * h->width * h->height * 3 / 2, SEEK_SET ) ) return -1; if( fread( p_pic->img.plane[0], 1, h->width * h->height, h->fh ) <= 0 || fread( p_pic->img.plane[1], 1, h->width * h->height / 4, h->fh ) <= 0 || fread( p_pic->img.plane[2], 1, h->width * h->height / 4, h->fh ) <= 0 ) return -1; h->next_frame = i_frame+1; return 0; } int close_file_yuv(hnd_t handle) { yuv_input_t *h = handle; if( !h || !h->fh ) return 0; fclose( h->fh ); free( h ); return 0; } /* YUV4MPEG2 raw 420 yuv file operation */ typedef struct { FILE *fh; int width, height; int next_frame; int seq_header_len, frame_header_len; int frame_size; } y4m_input_t; #define Y4M_MAGIC "YUV4MPEG2" #define MAX_YUV4_HEADER 80 #define Y4M_FRAME_MAGIC "FRAME" #define MAX_FRAME_HEADER 80 int open_file_y4m( char *psz_filename, hnd_t *p_handle, x264_param_t *p_param ) { int i, n, d; int interlaced; char header[MAX_YUV4_HEADER+10]; char *tokstart, *tokend, *header_end; y4m_input_t *h = malloc(sizeof(y4m_input_t)); h->next_frame = 0; if( !strcmp(psz_filename, "-") ) h->fh = stdin; else h->fh = fopen(psz_filename, "rb"); if( h->fh == NULL ) return -1; h->frame_header_len = strlen(Y4M_FRAME_MAGIC)+1; /* Read header */ for( i=0; i<MAX_YUV4_HEADER; i++ ) { header[i] = fgetc(h->fh); if( header[i] == '\n' ) { /* Add a space after last option. Makes parsing "444" vs "444alpha" easier. */ header[i+1] = 0x20; header[i+2] = 0; break; } } if( i == MAX_YUV4_HEADER || strncmp(header, Y4M_MAGIC, strlen(Y4M_MAGIC)) ) return -1; /* Scan properties */ header_end = &header[i+1]; /* Include space */ h->seq_header_len = i+1; for( tokstart = &header[strlen(Y4M_MAGIC)+1]; tokstart < header_end; tokstart++ ) { if(*tokstart==0x20) continue; switch(*tokstart++) { case 'W': /* Width. Required. */ h->width = p_param->i_width = strtol(tokstart, &tokend, 10); tokstart=tokend; break; case 'H': /* Height. Required. */ h->height = p_param->i_height = strtol(tokstart, &tokend, 10); tokstart=tokend; break; case 'C': /* Color space */ if( strncmp("420", tokstart, 3) ) { fprintf(stderr, "Colorspace unhandled\n"); return -1; } tokstart = strchr(tokstart, 0x20); break; case 'I': /* Interlace type */ switch(*tokstart++) { case 'p': interlaced = 0; break; case '?': case 't': case 'b': case 'm': default: interlaced = 1; fprintf(stderr, "Warning, this sequence might be interlaced\n"); } break; case 'F': /* Frame rate - 0:0 if unknown */ if( sscanf(tokstart, "%d:%d", &n, &d) == 2 && n && d ) { x264_reduce_fraction( &n, &d ); p_param->i_fps_num = n; p_param->i_fps_den = d; } tokstart = strchr(tokstart, 0x20); break; case 'A': /* Pixel aspect - 0:0 if unknown */ /* Don't override the aspect ratio if sar has been explicitly set on the commandline. */ if( sscanf(tokstart, "%d:%d", &n, &d) == 2 && n && d && !p_param->vui.i_sar_width && !p_param->vui.i_sar_height ) { x264_reduce_fraction( &n, &d ); p_param->vui.i_sar_width = n; p_param->vui.i_sar_height = d; } tokstart = strchr(tokstart, 0x20); break; case 'X': /* Vendor extensions */ if( !strncmp("YSCSS=",tokstart,6) ) { /* Older nonstandard pixel format representation */ tokstart += 6; if( strncmp("420JPEG",tokstart,7) && strncmp("420MPEG2",tokstart,8) && strncmp("420PALDV",tokstart,8) ) { fprintf(stderr, "Unsupported extended colorspace\n"); return -1; } } tokstart = strchr(tokstart, 0x20); break; } } fprintf(stderr, "yuv4mpeg: %ix%i@%i/%ifps, %i:%i\n", h->width, h->height, p_param->i_fps_num, p_param->i_fps_den, p_param->vui.i_sar_width, p_param->vui.i_sar_height); *p_handle = (hnd_t)h; return 0; } /* Most common case: frame_header = "FRAME" */ int get_frame_total_y4m( hnd_t handle ) { y4m_input_t *h = handle; int i_frame_total = 0; uint64_t init_pos = ftell(h->fh); if( !fseek( h->fh, 0, SEEK_END ) ) { uint64_t i_size = ftell( h->fh ); fseek( h->fh, init_pos, SEEK_SET ); i_frame_total = (int)((i_size - h->seq_header_len) / (3*(h->width*h->height)/2+h->frame_header_len)); } return i_frame_total; } int read_frame_y4m( x264_picture_t *p_pic, hnd_t handle, int i_frame ) { int slen = strlen(Y4M_FRAME_MAGIC); int i = 0; char header[16]; y4m_input_t *h = handle; if( i_frame != h->next_frame ) { if (fseek(h->fh, (uint64_t)i_frame*(3*(h->width*h->height)/2+h->frame_header_len) + h->seq_header_len, SEEK_SET)) return -1; } /* Read frame header - without terminating '\n' */ if (fread(header, 1, slen, h->fh) != slen) return -1; header[slen] = 0; if (strncmp(header, Y4M_FRAME_MAGIC, slen)) { fprintf(stderr, "Bad header magic (%08X <=> %s)\n", *((uint32_t*)header), header); return -1; } /* Skip most of it */ while (i<MAX_FRAME_HEADER && fgetc(h->fh) != '\n') i++; if (i == MAX_FRAME_HEADER) { fprintf(stderr, "Bad frame header!\n"); return -1; } h->frame_header_len = i+slen+1; if( fread(p_pic->img.plane[0], 1, h->width*h->height, h->fh) <= 0 || fread(p_pic->img.plane[1], 1, h->width * h->height / 4, h->fh) <= 0 || fread(p_pic->img.plane[2], 1, h->width * h->height / 4, h->fh) <= 0) return -1; h->next_frame = i_frame+1; return 0; } int close_file_y4m(hnd_t handle) { y4m_input_t *h = handle; if( !h || !h->fh ) return 0; fclose( h->fh ); free( h ); return 0; } /* avs/avi input file support under cygwin */ #ifdef AVIS_INPUT typedef struct { PAVISTREAM p_avi; int width, height; } avis_input_t; int open_file_avis( char *psz_filename, hnd_t *p_handle, x264_param_t *p_param ) { avis_input_t *h = malloc(sizeof(avis_input_t)); AVISTREAMINFO info; int i; *p_handle = (hnd_t)h; AVIFileInit(); if( AVIStreamOpenFromFile( &h->p_avi, psz_filename, streamtypeVIDEO, 0, OF_READ, NULL ) ) { AVIFileExit(); return -1; } if( AVIStreamInfo(h->p_avi, &info, sizeof(AVISTREAMINFO)) ) { AVIStreamRelease(h->p_avi); AVIFileExit(); return -1; } // check input format if (info.fccHandler != MAKEFOURCC('Y', 'V', '1', '2')) { fprintf( stderr, "avis [error]: unsupported input format (%c%c%c%c)\n", (char)(info.fccHandler & 0xff), (char)((info.fccHandler >> 8) & 0xff), (char)((info.fccHandler >> 16) & 0xff), (char)((info.fccHandler >> 24)) ); AVIStreamRelease(h->p_avi); AVIFileExit(); return -1; } h->width = p_param->i_width = info.rcFrame.right - info.rcFrame.left; h->height = p_param->i_height = info.rcFrame.bottom - info.rcFrame.top; i = gcd(info.dwRate, info.dwScale); p_param->i_fps_den = info.dwScale / i; p_param->i_fps_num = info.dwRate / i; fprintf( stderr, "avis [info]: %dx%d @ %.2f fps (%d frames)\n", p_param->i_width, p_param->i_height, (double)p_param->i_fps_num / (double)p_param->i_fps_den, (int)info.dwLength ); return 0; } int get_frame_total_avis( hnd_t handle ) { avis_input_t *h = handle; AVISTREAMINFO info; if( AVIStreamInfo(h->p_avi, &info, sizeof(AVISTREAMINFO)) ) return -1; return info.dwLength; } int read_frame_avis( x264_picture_t *p_pic, hnd_t handle, int i_frame ) { avis_input_t *h = handle; p_pic->img.i_csp = X264_CSP_YV12; if( AVIStreamRead(h->p_avi, i_frame, 1, p_pic->img.plane[0], h->width * h->height * 3 / 2, NULL, NULL ) ) return -1; return 0; } int close_file_avis( hnd_t handle ) { avis_input_t *h = handle; AVIStreamRelease(h->p_avi); AVIFileExit(); free(h); return 0; } #endif #ifdef HAVE_PTHREAD typedef struct { int (*p_read_frame)( x264_picture_t *p_pic, hnd_t handle, int i_frame ); int (*p_close_infile)( hnd_t handle ); hnd_t p_handle; x264_picture_t pic; x264_pthread_t tid; int next_frame; int frame_total; int in_progress; struct thread_input_arg_t *next_args; } thread_input_t; typedef struct thread_input_arg_t { thread_input_t *h; x264_picture_t *pic; int i_frame; int status; } thread_input_arg_t; int open_file_thread( char *psz_filename, hnd_t *p_handle, x264_param_t *p_param ) { thread_input_t *h = malloc(sizeof(thread_input_t)); x264_picture_alloc( &h->pic, X264_CSP_I420, p_param->i_width, p_param->i_height ); h->p_read_frame = p_read_frame; h->p_close_infile = p_close_infile; h->p_handle = *p_handle; h->in_progress = 0; h->next_frame = -1; h->next_args = malloc(sizeof(thread_input_arg_t)); h->next_args->h = h; h->next_args->status = 0; h->frame_total = p_get_frame_total( h->p_handle ); *p_handle = (hnd_t)h; return 0; } int get_frame_total_thread( hnd_t handle ) { thread_input_t *h = handle; return h->frame_total; } static void read_frame_thread_int( thread_input_arg_t *i ) { i->status = i->h->p_read_frame( i->pic, i->h->p_handle, i->i_frame ); } int read_frame_thread( x264_picture_t *p_pic, hnd_t handle, int i_frame ) { thread_input_t *h = handle; UNUSED void *stuff; int ret = 0; if( h->next_frame >= 0 ) { #pragma omp taskwait ret |= h->next_args->status; h->in_progress = 0; } if( h->next_frame == i_frame ) { XCHG( x264_picture_t, *p_pic, h->pic ); } else { ret |= h->p_read_frame( p_pic, h->p_handle, i_frame ); } if( !h->frame_total || i_frame+1 < h->frame_total ) { h->next_frame = h->next_args->i_frame = i_frame+1; h->next_args->pic = &h->pic; #pragma omp taskout (*h) label ( read_frame_thread_int ) read_frame_thread_int (h-> next_args ); h->in_progress = 1; } else h->next_frame = -1; return ret; } int close_file_thread( hnd_t handle ) { thread_input_t *h = handle; h->p_close_infile( h->p_handle ); x264_picture_clean( &h->pic ); if( h->in_progress ) x264_pthread_join( h->tid, NULL ); free( h->next_args ); free( h ); return 0; } #endif int open_file_bsf( char *psz_filename, hnd_t *p_handle ) { if ((*p_handle = fopen(psz_filename, "w+b")) == NULL) return -1; return 0; } int set_param_bsf( hnd_t handle, x264_param_t *p_param ) { return 0; } int write_nalu_bsf( hnd_t handle, uint8_t *p_nalu, int i_size ) { if (fwrite(p_nalu, i_size, 1, (FILE *)handle) > 0) return i_size; return -1; } int set_eop_bsf( hnd_t handle, x264_picture_t *p_picture ) { return 0; } int close_file_bsf( hnd_t handle ) { if ((handle == NULL) || (handle == stdout)) return 0; return fclose((FILE *)handle); } /* -- mp4 muxing support ------------------------------------------------- */ #ifdef MP4_OUTPUT typedef struct { GF_ISOFile *p_file; GF_AVCConfig *p_config; GF_ISOSample *p_sample; int i_track; uint32_t i_descidx; int i_time_inc; int i_time_res; int i_numframe; int i_init_delay; uint8_t b_sps; uint8_t b_pps; } mp4_t; static void recompute_bitrate_mp4(GF_ISOFile *p_file, int i_track) { u32 i, count, di, timescale, time_wnd, rate; u64 offset; Double br; GF_ESD *esd; esd = gf_isom_get_esd(p_file, i_track, 1); if (!esd) return; esd->decoderConfig->avgBitrate = 0; esd->decoderConfig->maxBitrate = 0; rate = time_wnd = 0; timescale = gf_isom_get_media_timescale(p_file, i_track); count = gf_isom_get_sample_count(p_file, i_track); for (i=0; i<count; i++) { GF_ISOSample *samp = gf_isom_get_sample_info(p_file, i_track, i+1, &di, &offset); if (samp->dataLength>esd->decoderConfig->bufferSizeDB) esd->decoderConfig->bufferSizeDB = samp->dataLength; if (esd->decoderConfig->bufferSizeDB < samp->dataLength) esd->decoderConfig->bufferSizeDB = samp->dataLength; esd->decoderConfig->avgBitrate += samp->dataLength; rate += samp->dataLength; if (samp->DTS > time_wnd + timescale) { if (rate > esd->decoderConfig->maxBitrate) esd->decoderConfig->maxBitrate = rate; time_wnd = samp->DTS; rate = 0; } gf_isom_sample_del(&samp); } br = (Double) (s64) gf_isom_get_media_duration(p_file, i_track); br /= timescale; esd->decoderConfig->avgBitrate = (u32) (esd->decoderConfig->avgBitrate / br); /*move to bps*/ esd->decoderConfig->avgBitrate *= 8; esd->decoderConfig->maxBitrate *= 8; gf_isom_change_mpeg4_description(p_file, i_track, 1, esd); gf_odf_desc_del((GF_Descriptor *) esd); } int close_file_mp4( hnd_t handle ) { mp4_t *p_mp4 = (mp4_t *)handle; if (p_mp4 == NULL) return 0; if (p_mp4->p_config) gf_odf_avc_cfg_del(p_mp4->p_config); if (p_mp4->p_sample) { if (p_mp4->p_sample->data) free(p_mp4->p_sample->data); gf_isom_sample_del(&p_mp4->p_sample); } if (p_mp4->p_file) { recompute_bitrate_mp4(p_mp4->p_file, p_mp4->i_track); gf_isom_set_pl_indication(p_mp4->p_file, GF_ISOM_PL_VISUAL, 0x15); gf_isom_set_storage_mode(p_mp4->p_file, GF_ISOM_STORE_FLAT); gf_isom_close(p_mp4->p_file); } free(p_mp4); return 0; } int open_file_mp4( char *psz_filename, hnd_t *p_handle ) { mp4_t *p_mp4; *p_handle = NULL; if ((p_mp4 = (mp4_t *)malloc(sizeof(mp4_t))) == NULL) return -1; memset(p_mp4, 0, sizeof(mp4_t)); p_mp4->p_file = gf_isom_open(psz_filename, GF_ISOM_OPEN_WRITE, NULL); if ((p_mp4->p_sample = gf_isom_sample_new()) == NULL) { close_file_mp4( p_mp4 ); return -1; } gf_isom_set_brand_info(p_mp4->p_file, GF_ISOM_BRAND_AVC1, 0); *p_handle = p_mp4; return 0; } int set_param_mp4( hnd_t handle, x264_param_t *p_param ) { mp4_t *p_mp4 = (mp4_t *)handle; p_mp4->i_track = gf_isom_new_track(p_mp4->p_file, 0, GF_ISOM_MEDIA_VISUAL, p_param->i_fps_num); p_mp4->p_config = gf_odf_avc_cfg_new(); gf_isom_avc_config_new(p_mp4->p_file, p_mp4->i_track, p_mp4->p_config, NULL, NULL, &p_mp4->i_descidx); gf_isom_set_track_enabled(p_mp4->p_file, p_mp4->i_track, 1); gf_isom_set_visual_info(p_mp4->p_file, p_mp4->i_track, p_mp4->i_descidx, p_param->i_width, p_param->i_height); if( p_param->vui.i_sar_width && p_param->vui.i_sar_height ) { uint64_t dw = p_param->i_width << 16; uint64_t dh = p_param->i_height << 16; double sar = (double)p_param->vui.i_sar_width / p_param->vui.i_sar_height; if( sar > 1.0 ) dw *= sar ; else dh /= sar; gf_isom_set_track_layout_info( p_mp4->p_file, p_mp4->i_track, dw, dh, 0, 0, 0 ); } p_mp4->p_sample->data = (char *)malloc(p_param->i_width * p_param->i_height * 3 / 2); if (p_mp4->p_sample->data == NULL) return -1; p_mp4->i_time_res = p_param->i_fps_num; p_mp4->i_time_inc = p_param->i_fps_den; p_mp4->i_init_delay = p_param->i_bframe ? (p_param->b_bframe_pyramid ? 2 : 1) : 0; p_mp4->i_init_delay *= p_mp4->i_time_inc; fprintf(stderr, "mp4 [info]: initial delay %d (scale %d)\n", p_mp4->i_init_delay, p_mp4->i_time_res); return 0; } int write_nalu_mp4( hnd_t handle, uint8_t *p_nalu, int i_size ) { mp4_t *p_mp4 = (mp4_t *)handle; GF_AVCConfigSlot *p_slot; uint8_t type = p_nalu[4] & 0x1f; int psize; switch(type) { // sps case 0x07: if (!p_mp4->b_sps) { p_mp4->p_config->configurationVersion = 1; p_mp4->p_config->AVCProfileIndication = p_nalu[5]; p_mp4->p_config->profile_compatibility = p_nalu[6]; p_mp4->p_config->AVCLevelIndication = p_nalu[7]; p_slot = (GF_AVCConfigSlot *)malloc(sizeof(GF_AVCConfigSlot)); p_slot->size = i_size - 4; p_slot->data = (char *)malloc(p_slot->size); memcpy(p_slot->data, p_nalu + 4, i_size - 4); gf_list_add(p_mp4->p_config->sequenceParameterSets, p_slot); p_slot = NULL; p_mp4->b_sps = 1; } break; // pps case 0x08: if (!p_mp4->b_pps) { p_slot = (GF_AVCConfigSlot *)malloc(sizeof(GF_AVCConfigSlot)); p_slot->size = i_size - 4; p_slot->data = (char *)malloc(p_slot->size); memcpy(p_slot->data, p_nalu + 4, i_size - 4); gf_list_add(p_mp4->p_config->pictureParameterSets, p_slot); p_slot = NULL; p_mp4->b_pps = 1; if (p_mp4->b_sps) gf_isom_avc_config_update(p_mp4->p_file, p_mp4->i_track, 1, p_mp4->p_config); } break; // slice, sei case 0x1: case 0x5: case 0x6: psize = i_size - 4 ; memcpy(p_mp4->p_sample->data + p_mp4->p_sample->dataLength, p_nalu, i_size); p_mp4->p_sample->data[p_mp4->p_sample->dataLength + 0] = (psize >> 24) & 0xff; p_mp4->p_sample->data[p_mp4->p_sample->dataLength + 1] = (psize >> 16) & 0xff; p_mp4->p_sample->data[p_mp4->p_sample->dataLength + 2] = (psize >> 8) & 0xff; p_mp4->p_sample->data[p_mp4->p_sample->dataLength + 3] = (psize >> 0) & 0xff; p_mp4->p_sample->dataLength += i_size; break; } return i_size; } int set_eop_mp4( hnd_t handle, x264_picture_t *p_picture ) { mp4_t *p_mp4 = (mp4_t *)handle; uint64_t dts = (uint64_t)p_mp4->i_numframe * p_mp4->i_time_inc; uint64_t pts = (uint64_t)p_picture->i_pts; int32_t offset = p_mp4->i_init_delay + pts - dts; p_mp4->p_sample->IsRAP = p_picture->i_type == X264_TYPE_IDR ? 1 : 0; p_mp4->p_sample->DTS = dts; p_mp4->p_sample->CTS_Offset = offset; gf_isom_add_sample(p_mp4->p_file, p_mp4->i_track, p_mp4->i_descidx, p_mp4->p_sample); p_mp4->p_sample->dataLength = 0; p_mp4->i_numframe++; return 0; } #endif /* -- mkv muxing support ------------------------------------------------- */ typedef struct { mk_Writer *w; uint8_t *sps, *pps; int sps_len, pps_len; int width, height, d_width, d_height; int64_t frame_duration; int fps_num; int b_header_written; char b_writing_frame; } mkv_t; static int write_header_mkv( mkv_t *p_mkv ) { int ret; uint8_t *avcC; int avcC_len; if( p_mkv->sps == NULL || p_mkv->pps == NULL || p_mkv->width == 0 || p_mkv->height == 0 || p_mkv->d_width == 0 || p_mkv->d_height == 0) return -1; avcC_len = 5 + 1 + 2 + p_mkv->sps_len + 1 + 2 + p_mkv->pps_len; avcC = malloc(avcC_len); if (avcC == NULL) return -1; avcC[0] = 1; avcC[1] = p_mkv->sps[1]; avcC[2] = p_mkv->sps[2]; avcC[3] = p_mkv->sps[3]; avcC[4] = 0xff; // nalu size length is four bytes avcC[5] = 0xe1; // one sps avcC[6] = p_mkv->sps_len >> 8; avcC[7] = p_mkv->sps_len; memcpy(avcC+8, p_mkv->sps, p_mkv->sps_len); avcC[8+p_mkv->sps_len] = 1; // one pps avcC[9+p_mkv->sps_len] = p_mkv->pps_len >> 8; avcC[10+p_mkv->sps_len] = p_mkv->pps_len; memcpy( avcC+11+p_mkv->sps_len, p_mkv->pps, p_mkv->pps_len ); ret = mk_writeHeader( p_mkv->w, "x264", "V_MPEG4/ISO/AVC", avcC, avcC_len, p_mkv->frame_duration, 50000, p_mkv->width, p_mkv->height, p_mkv->d_width, p_mkv->d_height ); free( avcC ); p_mkv->b_header_written = 1; return ret; } int open_file_mkv( char *psz_filename, hnd_t *p_handle ) { mkv_t *p_mkv; *p_handle = NULL; p_mkv = malloc(sizeof(*p_mkv)); if (p_mkv == NULL) return -1; memset(p_mkv, 0, sizeof(*p_mkv)); p_mkv->w = mk_createWriter(psz_filename); if (p_mkv->w == NULL) { free(p_mkv); return -1; } *p_handle = p_mkv; return 0; } int set_param_mkv( hnd_t handle, x264_param_t *p_param ) { mkv_t *p_mkv = handle; int64_t dw, dh; if( p_param->i_fps_num > 0 ) { p_mkv->frame_duration = (int64_t)p_param->i_fps_den * (int64_t)1000000000 / p_param->i_fps_num; p_mkv->fps_num = p_param->i_fps_num; } else { p_mkv->frame_duration = 0; p_mkv->fps_num = 1; } p_mkv->width = p_param->i_width; p_mkv->height = p_param->i_height; if( p_param->vui.i_sar_width && p_param->vui.i_sar_height ) { dw = (int64_t)p_param->i_width * p_param->vui.i_sar_width; dh = (int64_t)p_param->i_height * p_param->vui.i_sar_height; } else { dw = p_param->i_width; dh = p_param->i_height; } if( dw > 0 && dh > 0 ) { int64_t x = gcd( dw, dh ); dw /= x; dh /= x; } p_mkv->d_width = (int)dw; p_mkv->d_height = (int)dh; return 0; } int write_nalu_mkv( hnd_t handle, uint8_t *p_nalu, int i_size ) { mkv_t *p_mkv = handle; uint8_t type = p_nalu[4] & 0x1f; uint8_t dsize[4]; int psize; switch( type ) { // sps case 0x07: if( !p_mkv->sps ) { p_mkv->sps = malloc(i_size - 4); if (p_mkv->sps == NULL) return -1; p_mkv->sps_len = i_size - 4; memcpy(p_mkv->sps, p_nalu + 4, i_size - 4); } break; // pps case 0x08: if( !p_mkv->pps ) { p_mkv->pps = malloc(i_size - 4); if (p_mkv->pps == NULL) return -1; p_mkv->pps_len = i_size - 4; memcpy(p_mkv->pps, p_nalu + 4, i_size - 4); } break; // slice, sei case 0x1: case 0x5: case 0x6: if( !p_mkv->b_writing_frame ) { if( mk_startFrame(p_mkv->w) < 0 ) return -1; p_mkv->b_writing_frame = 1; } psize = i_size - 4 ; dsize[0] = psize >> 24; dsize[1] = psize >> 16; dsize[2] = psize >> 8; dsize[3] = psize; if( mk_addFrameData(p_mkv->w, dsize, 4) < 0 || mk_addFrameData(p_mkv->w, p_nalu + 4, i_size - 4) < 0 ) return -1; break; default: break; } if( !p_mkv->b_header_written && p_mkv->pps && p_mkv->sps && write_header_mkv(p_mkv) < 0 ) return -1; return i_size; } int set_eop_mkv( hnd_t handle, x264_picture_t *p_picture ) { mkv_t *p_mkv = handle; int64_t i_stamp = (int64_t)(p_picture->i_pts * 1e9 / p_mkv->fps_num); p_mkv->b_writing_frame = 0; return mk_setFrameFlags( p_mkv->w, i_stamp, p_picture->i_type == X264_TYPE_IDR ); } int close_file_mkv( hnd_t handle ) { mkv_t *p_mkv = handle; int ret; if( p_mkv->sps ) free( p_mkv->sps ); if( p_mkv->pps ) free( p_mkv->pps ); ret = mk_close(p_mkv->w); free( p_mkv ); return ret; }
flog.c
/* Support for logging to file. Logging is made to a file called matilda_YYMMDD_XXXXXX.log where YYMMDD is the date and XXXXXX is a random string. When logging a mask of log categories specifies the types of messages to be written to file. Having a very high degree of detail in very fast matches actively hurts the performance. Writing to files is synchronous (with fsync) to avoid loss of data in case of crashes, but it is impossible to guarantee this in all cases. */ #include "config.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> /* localtime */ #include "alloc.h" #include "amaf_rave.h" #include "engine.h" #include "file_io.h" #include "flog.h" #include "game_record.h" #include "mcts.h" #include "pat3.h" #include "playout.h" #include "scoring.h" #include "stringm.h" #include "time_ctrl.h" #include "timem.h" #include "version.h" static int log_file = -1; /* By default print everything to standard output. Only the main matilda executable changes this by default. */ static u16 log_mode = (LOG_MODE_ERROR | LOG_MODE_WARN | LOG_MODE_PROT | LOG_MODE_INFO | LOG_MODE_DEBUG); static u16 log_dest = LOG_DEST_STDF; /* For non-default values for build_info */ extern u64 max_size_in_mbs; extern double prior_stone_scale_factor; extern u16 prior_even; extern u16 prior_nakade; extern u16 prior_self_atari; extern u16 prior_attack; extern u16 prior_defend; extern u16 prior_pat3; extern u16 prior_near_last; extern u16 prior_line2; extern u16 prior_line3; extern u16 prior_empty; extern u16 prior_corner; extern double rave_equiv; extern u16 pl_skip_saving; extern u16 pl_skip_nakade; extern u16 pl_skip_pattern; extern u16 pl_skip_capture; extern u16 pl_ban_self_atari; extern d16 komi; static void open_log_file(); static void flog( const char * severity, const char * context, const char * msg ); /* Sets the logging messages that are written to file based on a mask of the combination of available message types. See flog.h for more information. */ void flog_config_modes( u16 new_mode ) { if (new_mode == log_mode) { return; } if (new_mode != 0) { if (log_file != -1) { log_mode = new_mode; char * s = alloc(); u32 idx = 0; idx += snprintf(s + idx, MAX_PAGE_SIZ - idx, "log mask changed: "); if (log_mode == 0) { snprintf(s + idx, MAX_PAGE_SIZ - idx, "none"); } else { if (log_mode & LOG_MODE_ERROR) { idx += snprintf(s + idx, MAX_PAGE_SIZ - idx, "crit,"); } if (log_mode & LOG_MODE_WARN) { idx += snprintf(s + idx, MAX_PAGE_SIZ - idx, "warn,"); } if (log_mode & LOG_MODE_PROT) { idx += snprintf(s + idx, MAX_PAGE_SIZ - idx, "prot,"); } if (log_mode & LOG_MODE_INFO) { idx += snprintf(s + idx, MAX_PAGE_SIZ - idx, "info,"); } if (log_mode & LOG_MODE_DEBUG) { idx += snprintf(s + idx, MAX_PAGE_SIZ - idx, "dbug,"); } s[idx - 1] = 0; } flog(NULL, NULL, s); release(s); return; } } else { if (log_file != -1) { flog(NULL, NULL, "logging disabled"); close(log_file); } log_file = -1; } log_mode = new_mode; } /* Define the destinations for logging. */ void flog_config_destinations( u16 new_dest ) { log_dest = new_dest; } static bool ends_in_new_line( const char * s ) { u32 l = strlen(s); return (l > 0) && (s[l - 1] == '\n'); } static bool multiline( const char * s ) { char * t = strchr(s, '\n'); return !(t == NULL || t == s + (strlen(s) - 1)); } static void flog( const char * restrict severity, const char * restrict context, const char * restrict msg ) { if (!log_dest) { return; } /* Prepare payload */ char * s = alloc(); char * ts = alloc(); timestamp(ts); if (severity == NULL) { severity = " "; } if (context == NULL) { context = " "; } if (multiline(msg)) { snprintf(s, MAX_PAGE_SIZ, "%s | %4s | %4s | [\n%s%s]\n", ts, severity, context, msg, ends_in_new_line(msg) ? "" : "\n"); } else { snprintf(s, MAX_PAGE_SIZ, "%s | %4s | %4s | %s%s", ts, severity, context, msg, ends_in_new_line(msg) ? "" : "\n"); } if (log_dest & LOG_DEST_FILE) { open_log_file(); u32 len = strlen(s); write(log_file, s, len); fsync(log_file); } if (log_dest & LOG_DEST_STDF) { fprintf(stderr, "%s", s); } release(ts); release(s); } static void open_log_file() { if (log_file == -1) { char * log_filename = alloc(); log_file = create_and_open_file(log_filename, MAX_PAGE_SIZ, "matilda", "log"); release(log_filename); if (log_file == -1) { fprintf(stderr, "Failed to create log file.\n"); log_dest &= ~LOG_DEST_FILE; return; } char * s = alloc(); u32 idx = 0; idx += snprintf(s + idx, MAX_PAGE_SIZ - idx, "logging enabled with mask: "); if (log_mode == 0) { snprintf(s + idx, MAX_PAGE_SIZ - idx, "none"); } else { if (log_mode & LOG_MODE_ERROR) { idx += snprintf(s + idx, MAX_PAGE_SIZ - idx, "crit,"); } if (log_mode & LOG_MODE_WARN) { idx += snprintf(s + idx, MAX_PAGE_SIZ - idx, "warn,"); } if (log_mode & LOG_MODE_PROT) { idx += snprintf(s + idx, MAX_PAGE_SIZ - idx, "prot,"); } if (log_mode & LOG_MODE_INFO) { idx += snprintf(s + idx, MAX_PAGE_SIZ - idx, "info,"); } if (log_mode & LOG_MODE_DEBUG) { idx += snprintf(s + idx, MAX_PAGE_SIZ - idx, "dbug,"); } s[idx - 1] = 0; } flog(NULL, NULL, s); release(s); } } /* Obtain a textual description of the capabilities and configuration options of matilda. This mostly concerns compile time constants. RETURNS string with build information */ void build_info( char * dst ) { u32 idx = 0; idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Matilda build information\n"); if (MATILDA_RELEASE_MODE) { idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Compiled for: release\n"); } else { idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Compiled for: debugging\n"); } idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Version: %s\n", MATILDA_VERSION); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Data folder: %s\n", data_folder()); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Board size: %ux%u\n", BOARD_SIZ, BOARD_SIZ); char * kstr = alloc(); komi_to_string(kstr, komi); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Komidashi: %s stones\n", kstr); release(kstr); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Resign/pass bellow win rate: %.2f\n", UCT_RESIGN_WINRATE); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Minimum simulations: %u\n", UCT_RESIGN_PLAYOUTS); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Can stop MCTS early: %s\n", YN(UCT_CAN_STOP_EARLY)); if (UCT_CAN_STOP_EARLY) { idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " At win rate: %.2f\n", UCT_EARLY_WINRATE); } idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Winrate for passing always: %.2f\n", JUST_PASS_WINRATE); char * s = alloc(); format_mem_size(s, max_size_in_mbs * 1048576); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Transpositions table memory: %s\n", s); release(s); if (pl_skip_saving) { idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Chance of skipping save: %u/128\n", pl_skip_saving); } if (pl_skip_capture) { idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Chance of skipping capture: %u/128\n", pl_skip_capture); } if (pl_skip_pattern) { idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Chance of skipping pattern: %u/128\n", pl_skip_pattern); } if (pl_skip_nakade) { idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Chance of skipping nakade: %u/128\n", pl_skip_nakade); } if (pl_ban_self_atari) { idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Chance of prohibiting self-atari: %u/128\n", pl_ban_self_atari); } idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Use pattern weights: %s\n", YN(USE_PATTERN_WEIGHTS)); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Use AMAF/RAVE: %s\n", YN(USE_AMAF_RAVE)); if (USE_AMAF_RAVE) { idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " MSE equiv: %.2f\n", rave_equiv); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Criticality threshold: %u\n", CRITICALITY_THRESHOLD); } idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Stone value scale factor: %.1f\n", prior_stone_scale_factor); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Even: %u (x2)\n", prior_even); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Nakade: %u\n", prior_nakade); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Self-atari: -%u\n", prior_self_atari); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Attack 1/2 lib group: %u\n", prior_attack); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Defend 1/2 lib group: %u\n", prior_defend); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " MoGo patterns: %u\n", prior_pat3); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Near last play: %u\n", prior_near_last); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Empty L2/3/other: -%u/%u/%u\n", prior_line2, prior_line3, prior_empty); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, " Corners: -%u\n", prior_corner); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Max UCT depth: %u\n", MAX_UCT_DEPTH); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "UCT expansion delay: %u\n", UCT_EXPANSION_DELAY); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Playout depth over number of empty points: %u\n", MAX_PLAYOUT_DEPTH_OVER_EMPTY); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Mercy threshold: %u stones\n", MERCY_THRESHOLD); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Constant latency compensation: %u ms\n", LATENCY_COMPENSATION); idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Time allotment factor: %.2f\n", TIME_ALLOT_FACTOR); u32 num_threads; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (DEFAULT_NUM_THREADS == 0) { idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Default number of threads: automatic (%u)\n", num_threads); } else { idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Default number of threads: %u (%u)\n", DEFAULT_NUM_THREADS, num_threads); } idx += snprintf(dst + idx, MAX_PAGE_SIZ - idx, "Maximum number of threads: %u\n", MAXIMUM_NUM_THREADS); snprintf(dst + idx, MAX_PAGE_SIZ - idx, "\n"); } /* Log a message with verbosity level critical. */ void flog_crit( const char * restrict ctx, const char * restrict msg ) { if ((log_mode & LOG_MODE_ERROR) != 0) { flog("crit", ctx, msg); flog(NULL, NULL, "execution aborted due to program panic"); } else { fprintf(stderr, "execution aborted due to program panic\n"); } exit(EXIT_FAILURE); } /* Log a message with verbosity level warning. */ void flog_warn( const char * restrict ctx, const char * restrict msg ) { if ((log_mode & LOG_MODE_WARN) != 0) { flog("warn", ctx, msg); } } /* Log a message with verbosity level communication protocol. */ void flog_prot( const char * restrict ctx, const char * restrict msg ) { if ((log_mode & LOG_MODE_PROT) != 0) { flog("prot", ctx, msg); } } /* Log a message with verbosity level informational. */ void flog_info( const char * restrict ctx, const char * restrict msg ) { if ((log_mode & LOG_MODE_INFO) != 0) { flog("info", ctx, msg); } } /* Log a message with verbosity level debug. */ void flog_debug( const char * restrict ctx, const char * restrict msg ) { if ((log_mode & LOG_MODE_DEBUG) != 0) { flog("dbug", ctx, msg); } }
a.33.2.c
/* { dg-do compile } */ #include <stdio.h> #include <stdlib.h> float read_next () { float *tmp; float return_val; #pragma omp single copyprivate(tmp) { tmp = (float *) malloc (sizeof (float)); } /* copies the pointer only */ #pragma omp master { scanf ("%f", tmp); } #pragma omp barrier return_val = *tmp; #pragma omp barrier #pragma omp single nowait { free (tmp); } return return_val; }
blas_dh.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ #include "_hypre_Euclid.h" /* #include "blas_dh.h" */ #undef __FUNC__ #define __FUNC__ "matvec_euclid_seq" void matvec_euclid_seq(HYPRE_Int n, HYPRE_Int *rp, HYPRE_Int *cval, HYPRE_Real *aval, HYPRE_Real *x, HYPRE_Real *y) { START_FUNC_DH HYPRE_Int i, j; HYPRE_Int from, to, col; HYPRE_Real sum; if (np_dh > 1) SET_V_ERROR("only for sequential case!\n"); #ifdef USING_OPENMP_DH #pragma omp parallel private(j, col, sum, from, to) \ default(shared) \ firstprivate(n, rp, cval, aval, x, y) #endif { #ifdef USING_OPENMP_DH #pragma omp for schedule(static) #endif for (i=0; i<n; ++i) { sum = 0.0; from = rp[i]; to = rp[i+1]; for (j=from; j<to; ++j) { col = cval[j]; sum += (aval[j]*x[col]); } y[i] = sum; } } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "Axpy" void Axpy(HYPRE_Int n, HYPRE_Real alpha, HYPRE_Real *x, HYPRE_Real *y) { START_FUNC_DH HYPRE_Int i; #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(static) firstprivate(alpha, x, y) \ private(i) #endif for (i=0; i<n; ++i) { y[i] = alpha*x[i] + y[i]; } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "CopyVec" void CopyVec(HYPRE_Int n, HYPRE_Real *xIN, HYPRE_Real *yOUT) { START_FUNC_DH HYPRE_Int i; #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(static) firstprivate(yOUT, xIN) \ private(i) #endif for (i=0; i<n; ++i) { yOUT[i] = xIN[i]; } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "ScaleVec" void ScaleVec(HYPRE_Int n, HYPRE_Real alpha, HYPRE_Real *x) { START_FUNC_DH HYPRE_Int i; #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(static) firstprivate(alpha, x) \ private(i) #endif for (i=0; i<n; ++i) { x[i] *= alpha; } END_FUNC_DH } #undef __FUNC__ #define __FUNC__ "InnerProd" HYPRE_Real InnerProd(HYPRE_Int n, HYPRE_Real *x, HYPRE_Real *y) { START_FUNC_DH HYPRE_Real result, local_result = 0.0; HYPRE_Int i; #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(static) firstprivate(x, y) \ private(i) \ reduction(+:local_result) #endif for (i=0; i<n; ++i) { local_result += x[i] * y[i]; } if (np_dh > 1) { hypre_MPI_Allreduce(&local_result, &result, 1, hypre_MPI_REAL, hypre_MPI_SUM, comm_dh); } else { result = local_result; } END_FUNC_VAL(result) } #undef __FUNC__ #define __FUNC__ "Norm2" HYPRE_Real Norm2(HYPRE_Int n, HYPRE_Real *x) { START_FUNC_DH HYPRE_Real result, local_result = 0.0; HYPRE_Int i; #ifdef USING_OPENMP_DH #pragma omp parallel for schedule(static) firstprivate(x) \ private(i) \ reduction(+:local_result) #endif for (i=0; i<n; ++i) { local_result += (x[i]*x[i]); } if (np_dh > 1) { hypre_MPI_Allreduce(&local_result, &result, 1, hypre_MPI_REAL, hypre_MPI_SUM, comm_dh); } else { result = local_result; } result = sqrt(result); END_FUNC_VAL(result) }
matrixstrassen.h
/** * @file matrixstrassen.h matrix strassen operations. * @author TPOC: contact@palisade-crypto.org * * @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT) * All rights reserved. * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef LBCRYPTO_MATH_MATRIXSTRASSEN_H #define LBCRYPTO_MATH_MATRIXSTRASSEN_H #include <assert.h> #include "matrix.h" namespace lbcrypto { template<class Element> class MatrixStrassen { // FIXME : public Serializable { public: typedef vector<vector<Element>> data_t; typedef vector<Element> lineardata_t; typedef typename vector<Element>::iterator it_lineardata_t; typedef std::function<Element(void)> alloc_func; /** * Constructor that initializes matrix values using a zero allocator * * @param &allocZero lambda function for zero initialization. * @param &rows number of rows. * @param &rows number of columns. */ MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols) : data(), rows(rows), cols(cols), allocZero(allocZero) { data.resize(rows); for (auto row = data.begin(); row != data.end(); ++row) { for (size_t col = 0; col < cols; ++col) { row->push_back(allocZero()); } } } /** * Constructor that initializes matrix values using a distribution generation allocator * * @param &allocZero lambda function for zero initialization (used for initializing derived matrix objects) * @param &rows number of rows. * @param &rows number of columns. * @param &allocGen lambda function for intialization using a distribution generator. */ MatrixStrassen(alloc_func allocZero, size_t rows, size_t cols, alloc_func allocGen); /** * Constructor of an empty matrix; SetSize must be called on this matrix to use it * Basically this exists to support deserializing * * @param &allocZero lambda function for zero initialization. */ MatrixStrassen(alloc_func allocZero) : data(), rows(0), cols(0), allocZero(allocZero) {} void SetSize(size_t rows, size_t cols) { if( this->rows != 0 || this->cols != 0 ) throw std::logic_error("You cannot SetSize on a non-empty matrix"); this->rows = rows; this->cols = cols; data.resize(rows); for (auto row = data.begin(); row != data.end(); ++row) { for (size_t col = 0; col < cols; ++col) { row->push_back(allocZero()); } } } /** * Copy constructor * * @param &other the matrix object to be copied */ MatrixStrassen(const MatrixStrassen<Element>& other) : data(), rows(other.rows), cols(other.cols), allocZero(other.allocZero) { deepCopyData(other.data); } /** * Assignment operator * * @param &other the matrix object whose values are to be copied * @return the resulting matrix */ inline MatrixStrassen<Element>& operator=(const MatrixStrassen<Element>& other); /** * In-place change of the current matrix to a matrix of all ones * * @return the resulting matrix */ inline MatrixStrassen<Element>& Ones(); /** * Fill matrix using the same element * * @param &val the element the matrix is filled by * * @return the resulting matrix */ inline MatrixStrassen<Element>& Fill(const Element &val); /** * In-place change of the current matrix to Identity matrix * * @return the resulting matrix */ inline MatrixStrassen<Element>& Identity(); /** * Sets the first row to be powers of two * * @return the resulting matrix */ inline MatrixStrassen<Element> GadgetVector(int32_t base = 2) const; /** * Computes the infinity norm * * @return the norm in double format */ inline double Norm() const; /** * Operator for matrix multiplication * * @param &other the multiplier matrix * @return the result of multiplication */ inline MatrixStrassen<Element> operator*(MatrixStrassen<Element> const& other) const { return Mult(other); } /** * Multiplication of matrix by a scalar * * @param &other the multiplier element * @return the result of multiplication */ inline MatrixStrassen<Element> ScalarMult(Element const& other) const { MatrixStrassen<Element> result(*this); #pragma omp parallel for for (int32_t col = 0; col < result.cols; ++col) { for (int32_t row = 0; row < result.rows; ++row) { *result.data[row][col] = *result.data[row][col] * other; } } return result; } /** * Operator for scalar multiplication * * @param &other the multiplier element * @return the result of multiplication */ inline MatrixStrassen<Element> operator*(Element const& other) const { return ScalarMult(other); } /** * Equality check * * @param &other the matrix object to compare to * @return the boolean result */ inline bool Equal(MatrixStrassen<Element> const& other) const { if (rows != other.rows || cols != other.cols) { return false; } for (size_t i = 0; i < rows; ++i) { for (size_t j = 0; j < cols; ++j) { if (data[i][j] != other.data[i][j]) { return false; } } } return true; } /** * Operator for equality check * * @param &other the matrix object to compare to * @return the boolean result */ inline bool operator==(MatrixStrassen<Element> const& other) const { return Equal(other); } /** * Operator for non-equality check * * @param &other the matrix object to compare to * @return the boolean result */ inline bool operator!=(MatrixStrassen<Element> const& other) const { return !Equal(other); } /** * Get property to access the data as a vector of vectors * * @return the data as vector of vectors */ const data_t& GetData() const { return data; } /** * Get property to access the number of rows in the matrix * * @return the number of rows */ size_t GetRows() const { return rows; } /** * Get property to access the number of columns in the matrix * * @return the number of columns */ size_t GetCols() const { return cols; } /** * Get property to access the zero allocator for the matrix * * @return the lambda function corresponding to the element zero allocator */ alloc_func GetAllocator() const { return allocZero; } /** * Sets the evaluation or coefficient representation for all ring elements that support the SetFormat method * * @param &format the enum value corresponding to coefficient or evaluation representation */ void SetFormat(Format format); /** * MatrixStrassen addition * * @param &other the matrix to be added * @return the resulting matrix */ inline MatrixStrassen<Element> Add(MatrixStrassen<Element> const& other) const { if (rows != other.rows || cols != other.cols) { throw invalid_argument("Addition operands have incompatible dimensions"); } MatrixStrassen<Element> result(*this); #pragma omp parallel for for (int32_t j = 0; j < cols; ++j) { for (int32_t i = 0; i < rows; ++i) { *result.data[i][j] += *other.data[i][j]; } } return result; } /** * Operator for matrix addition * * @param &other the matrix to be added * @return the resulting matrix */ inline MatrixStrassen<Element> operator+(MatrixStrassen<Element> const& other) const { return this->Add(other); } /** * Operator for in-place addition * * @param &other the matrix to be added * @return the resulting matrix (same object) */ inline MatrixStrassen<Element>& operator+=(MatrixStrassen<Element> const& other); /** * MatrixStrassen substraction * * @param &other the matrix to be substracted * @return the resulting matrix */ inline MatrixStrassen<Element> Sub(MatrixStrassen<Element> const& other) const { if (rows != other.rows || cols != other.cols) { throw invalid_argument("Subtraction operands have incompatible dimensions"); } MatrixStrassen<Element> result(allocZero, rows, other.cols); #pragma omp parallel for for (int32_t j = 0; j < cols; ++j) { for (int32_t i = 0; i < rows; ++i) { *result.data[i][j] = *data[i][j] - *other.data[i][j]; } } return result; } /** * Operator for matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix */ inline MatrixStrassen<Element> operator-(MatrixStrassen<Element> const& other) const { return this->Sub(other); } /** * Operator for in-place matrix substraction * * @param &other the matrix to be substracted * @return the resulting matrix (same object) */ inline MatrixStrassen<Element>& operator-=(MatrixStrassen<Element> const& other); /** * MatrixStrassen transposition * * @return the resulting matrix */ inline MatrixStrassen<Element> Transpose() const; // YSP The signature of this method needs to be changed in the future /** * MatrixStrassen determinant - found using Laplace formula with complexity O(d!), where d is the dimension * * @param *result where the result is stored */ inline void Determinant(Element *result) const; /** * Cofactor matrix - the matrix of determinants of the minors A_{ij} multiplied by -1^{i+j} * * @return the cofactor matrix for the given matrix */ inline MatrixStrassen<Element> CofactorMatrixStrassen() const; /** * Add rows to bottom of the matrix * * @param &other the matrix to be added to the bottom of current matrix * @return the resulting matrix */ inline MatrixStrassen<Element>& VStack(MatrixStrassen<Element> const& other); /** * Add columns the right of the matrix * * @param &other the matrix to be added to the right of current matrix * @return the resulting matrix */ inline MatrixStrassen<Element>& HStack(MatrixStrassen<Element> const& other); /** * MatrixStrassen indexing operator - writeable instance of the element * * @param &row row index * @param &col column index * @return the element at the index */ inline Element& operator()(size_t row, size_t col) { return data[row][col]; } /** * MatrixStrassen indexing operator - read-only instance of the element * * @param &row row index * @param &col column index * @return the element at the index */ inline Element const& operator()(size_t row, size_t col) const { return data[row][col]; } /** * MatrixStrassen row extractor * * @param &row row index * @return the row at the index */ inline MatrixStrassen<Element> ExtractRow(size_t row) const { MatrixStrassen<Element> result(this->allocZero,1,this->cols); int i = 0; for (auto elem = this->GetData()[row].begin(); elem != this->GetData()[row].end(); ++elem) { result(0,i) = **elem; i++; } return result; //return *this; } /** * Call switch format for each (ring) element * */ inline void SwitchFormat(); /** * MatrixStrassen multiplication * * @param &other the multiplier matrix * @return the result of multiplication */ MatrixStrassen<Element> Mult(const MatrixStrassen<Element>& other, int nrec=0, int pad = -1) const; /* * Multiply the matrix by a vector whose elements are all 1's. This causes the elements of each * row of the matrix to be added and placed into the corresponding position in the output vector. */ MatrixStrassen<Element> MultByUnityVector() const; /* * Multiply the matrix by a vector of random 1's and 0's, which is the same as adding select * elements in each row together. * Return a vector that is a rows x 1 matrix. */ MatrixStrassen<Element> MultByRandomVector(std::vector<int> ranvec) const; private: struct MatDescriptor { int lda; int nrec; int nproc; int nprocr; int nprocc; int nproc_summa; int bs; }; const int DESC_SIZE = 7; // number of ints that make up a MatDescriptor const int rank=0, base=0; mutable data_t data; size_t rows; mutable int rowpad = 0; size_t cols; mutable int colpad = 0; alloc_func allocZero; mutable char *pattern = NULL; mutable int numAdd = 0; mutable int numMult = 0; mutable int numSub = 0; mutable MatDescriptor desc; mutable Element zeroUniquePtr = allocZero(); mutable int NUM_THREADS = 1; void multiplyInternalCAPS( it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor desc, it_lineardata_t work ) const; void strassenDFSCAPS( it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor desc, it_lineardata_t workPassThrough ) const; void block_multiplyCAPS( it_lineardata_t A, it_lineardata_t B, it_lineardata_t C, MatDescriptor d, it_lineardata_t workPassThrough ) const; void LinearizeDataCAPS(lineardata_t *lineardataPtr) const; void UnlinearizeDataCAPS(lineardata_t *lineardataPtr) const; int getRank() const; void verifyDescriptor( MatDescriptor desc ); long long numEntriesPerProc( MatDescriptor desc ) const; //deep copy of data - used for copy constructor void deepCopyData(data_t const& src); void getData(const data_t &Adata, const data_t &Bdata, const data_t &Cdata, int row, int inner, int col) const; void smartSubtractionCAPS(it_lineardata_t result, it_lineardata_t A, it_lineardata_t B) const; void smartAdditionCAPS(it_lineardata_t result, it_lineardata_t A, it_lineardata_t B) const; void addMatricesCAPS( int numEntries, it_lineardata_t C, it_lineardata_t A, it_lineardata_t B ) const; void addSubMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2, it_lineardata_t S21, it_lineardata_t S22 ) const; void subMatricesCAPS( int numEntries, it_lineardata_t C, it_lineardata_t A, it_lineardata_t B ) const; void tripleAddMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2, it_lineardata_t S21, it_lineardata_t S22, it_lineardata_t T3, it_lineardata_t S31, it_lineardata_t S32) const; void tripleSubMatricesCAPS(int numEntries, it_lineardata_t T1, it_lineardata_t S11, it_lineardata_t S12, it_lineardata_t T2, it_lineardata_t S21, it_lineardata_t S22, it_lineardata_t T3, it_lineardata_t S31, it_lineardata_t S32) const ; void distributeFrom1ProcCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I ) const; void collectTo1ProcCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I ) const; void sendBlockCAPS( int rank, int target, it_lineardata_t O, int bs, int source, it_lineardata_t I, int ldi ) const; void receiveBlockCAPS( int rank, int target, it_lineardata_t O, int bs, int source, it_lineardata_t I, int ldo ) const; void distributeFrom1ProcRecCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I, int ldi ) const; void collectTo1ProcRecCAPS( MatDescriptor desc, it_lineardata_t O, it_lineardata_t I, int ldo ) const; }; /** * Operator for scalar multiplication of matrix * * @param &e element * @param &M matrix * @return the resulting matrix */ template<class Element> inline MatrixStrassen<Element> operator*(Element const& e, MatrixStrassen<Element> const& M) { return M.ScalarMult(e); } /** * Generates a matrix of rotations. See pages 7-8 of https://eprint.iacr.org/2013/297 * * @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated * @return the resulting matrix of big binary integers */ inline MatrixStrassen<BigInteger> Rotate(MatrixStrassen<Poly> const& inMat); /** * Each element becomes a square matrix with columns of that element's * rotations in coefficient form. See pages 7-8 of https://eprint.iacr.org/2013/297 * * @param &inMat the matrix of power-of-2 cyclotomic ring elements to be rotated * @return the resulting matrix of big binary integers */ inline MatrixStrassen<BigVector> RotateVecResult(MatrixStrassen<Poly> const& inMat); /** * Stream output operator * * @param &os stream * @param &m matrix to be outputted * @return the chained stream */ template<class Element> inline std::ostream& operator<<(std::ostream& os, const MatrixStrassen<Element>& m); /** * Gives the Choleshky decomposition of the input matrix. * The assumption is that covariance matrix does not have large coefficients because it is formed by * discrete gaussians e and s; this implies int32_t can be used * This algorithm can be further improved - see the Darmstadt paper section 4.4 * http://eprint.iacr.org/2013/297.pdf * * @param &input the matrix for which the Cholesky decomposition is to be computed * @return the resulting matrix of floating-point numbers */ inline MatrixStrassen<double> Cholesky(const MatrixStrassen<int32_t> &input); /** * Convert a matrix of integers from BigInteger to int32_t * Convert from Z_q to [-q/2, q/2] * * @param &input the input matrix * @param &modulus the ring modulus * @return the resulting matrix of int32_t */ inline MatrixStrassen<int32_t> ConvertToInt32(const MatrixStrassen<BigInteger> &input, const BigInteger& modulus); /** * Convert a matrix of BigVector to int32_t * Convert from Z_q to [-q/2, q/2] * * @param &input the input matrix * @param &modulus the ring modulus * @return the resulting matrix of int32_t */ inline MatrixStrassen<int32_t> ConvertToInt32(const MatrixStrassen<BigVector> &input, const BigInteger& modulus); /** * Split a vector of int32_t into a vector of ring elements with ring dimension n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ inline MatrixStrassen<Poly> SplitInt32IntoPolyElements(MatrixStrassen<int32_t> const& other, size_t n, const shared_ptr<ILParams> params); /** * Another method for splitting a vector of int32_t into a vector of ring elements with ring dimension n * * @param &other the input matrix * @param &n the ring dimension * @param &params Poly element params * @return the resulting matrix of Poly */ inline MatrixStrassen<Poly> SplitInt32AltIntoPolyElements(MatrixStrassen<int32_t> const& other, size_t n, const shared_ptr<ILParams> params); } #endif // LBCRYPTO_MATH_MATRIXSTRASSEN_H
par_add_cycle.c
/*BHEADER********************************************************************** * Copyright (c) 2017, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * Written by Ulrike Yang (yang11@llnl.gov) et al. CODE-LLNL-738-322. * This file is part of AMG. See files README and COPYRIGHT for details. * * AMG is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * This software is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTIBILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the terms and conditions of the * GNU General Public License for more details. * ***********************************************************************EHEADER*/ /****************************************************************************** * * ParAMG cycling routine * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGCycle *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGAdditiveCycle( void *amg_vdata) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata; /* Data Structure variables */ hypre_ParCSRMatrix **A_array; hypre_ParCSRMatrix **P_array; hypre_ParCSRMatrix **R_array; hypre_ParCSRMatrix *Lambda; hypre_ParCSRMatrix *Atilde; hypre_ParVector **F_array; hypre_ParVector **U_array; hypre_ParVector *Vtemp; hypre_ParVector *Ztemp; hypre_ParVector *Xtilde, *Rtilde; HYPRE_Int **CF_marker_array; HYPRE_Int num_levels; HYPRE_Int addlvl, add_end; HYPRE_Int additive; HYPRE_Int mult_additive; HYPRE_Int simple; HYPRE_Int add_last_lvl; HYPRE_Int i, j, num_rows; HYPRE_Int n_global; HYPRE_Int rlx_order; /* Local variables */ HYPRE_Int Solve_err_flag = 0; HYPRE_Int level; HYPRE_Int coarse_grid; HYPRE_Int fine_grid; HYPRE_Int rlx_down; HYPRE_Int rlx_up; HYPRE_Int rlx_coarse; HYPRE_Int *grid_relax_type; HYPRE_Int *num_grid_sweeps; HYPRE_Real **l1_norms; HYPRE_Real alpha, beta; HYPRE_Real *u_data; HYPRE_Real *v_data; HYPRE_Real *l1_norms_lvl; HYPRE_Real *D_inv; HYPRE_Real *x_global; HYPRE_Real *r_global; HYPRE_Real *relax_weight; HYPRE_Real *omega; #if 0 HYPRE_Real *D_mat; HYPRE_Real *S_vec; #endif /* Acquire data and allocate storage */ A_array = hypre_ParAMGDataAArray(amg_data); F_array = hypre_ParAMGDataFArray(amg_data); U_array = hypre_ParAMGDataUArray(amg_data); P_array = hypre_ParAMGDataPArray(amg_data); R_array = hypre_ParAMGDataRArray(amg_data); CF_marker_array = hypre_ParAMGDataCFMarkerArray(amg_data); Vtemp = hypre_ParAMGDataVtemp(amg_data); Ztemp = hypre_ParAMGDataZtemp(amg_data); num_levels = hypre_ParAMGDataNumLevels(amg_data); additive = hypre_ParAMGDataAdditive(amg_data); mult_additive = hypre_ParAMGDataMultAdditive(amg_data); simple = hypre_ParAMGDataSimple(amg_data); add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data); grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data); Lambda = hypre_ParAMGDataLambda(amg_data); Atilde = hypre_ParAMGDataAtilde(amg_data); Xtilde = hypre_ParAMGDataXtilde(amg_data); Rtilde = hypre_ParAMGDataRtilde(amg_data); l1_norms = hypre_ParAMGDataL1Norms(amg_data); D_inv = hypre_ParAMGDataDinv(amg_data); relax_weight = hypre_ParAMGDataRelaxWeight(amg_data); omega = hypre_ParAMGDataOmega(amg_data); rlx_order = hypre_ParAMGDataRelaxOrder(amg_data); num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data); /* Initialize */ addlvl = hypre_max(additive, mult_additive); addlvl = hypre_max(addlvl, simple); if (add_last_lvl == -1 ) add_end = num_levels-1; else add_end = add_last_lvl; Solve_err_flag = 0; /*--------------------------------------------------------------------- * Main loop of cycling --- multiplicative version --- V-cycle *--------------------------------------------------------------------*/ /* down cycle */ rlx_down = grid_relax_type[1]; rlx_up = grid_relax_type[2]; rlx_coarse = grid_relax_type[3]; for (level = 0; level < num_levels-1; level++) { fine_grid = level; coarse_grid = level + 1; u_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[fine_grid])); v_data = hypre_VectorData(hypre_ParVectorLocalVector(Vtemp)); l1_norms_lvl = l1_norms[level]; hypre_ParVectorSetConstantValues(U_array[coarse_grid], 0.0); if (level < addlvl || level > add_end) /* multiplicative version */ { /* smoothing step */ if (rlx_down == 0) { HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A_array[fine_grid])); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A_array[fine_grid])); num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid])); for (j=0; j < num_grid_sweeps[1]; j++) { hypre_ParVectorCopy(F_array[fine_grid],Vtemp); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) u_data[i] = relax_weight[level]*v_data[i] / A_data[A_i[i]]; } } else if (rlx_down != 18) { /*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_down,0,*/ for (j=0; j < num_grid_sweeps[1]; j++) { hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid], CF_marker_array[fine_grid], rlx_down,rlx_order,1, relax_weight[fine_grid], omega[fine_grid], l1_norms[level], U_array[fine_grid], Vtemp, Ztemp); hypre_ParVectorCopy(F_array[fine_grid],Vtemp); } } else { num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid])); for (j=0; j < num_grid_sweeps[1]; j++) { hypre_ParVectorCopy(F_array[fine_grid],Vtemp); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) u_data[i] += v_data[i] / l1_norms_lvl[i]; } } alpha = -1.0; beta = 1.0; hypre_ParCSRMatrixMatvec(alpha, A_array[fine_grid], U_array[fine_grid], beta, Vtemp); alpha = 1.0; beta = 0.0; hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp, beta,F_array[coarse_grid]); } else /* additive version */ { hypre_ParVectorCopy(F_array[fine_grid],Vtemp); if (level == 0) /* compute residual */ { hypre_ParVectorCopy(Vtemp, Rtilde); hypre_ParVectorCopy(U_array[fine_grid],Xtilde); } alpha = 1.0; beta = 0.0; hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp, beta,F_array[coarse_grid]); } } /* additive smoothing and solve coarse grid */ if (addlvl < num_levels) { if (simple > -1) { x_global = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde)); r_global = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde)); n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Xtilde)); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_global; i++) x_global[i] += D_inv[i]*r_global[i]; } else { if (num_grid_sweeps[1] > 1) { n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Rtilde)); hypre_ParVector *Tmptilde = hypre_CTAlloc(hypre_ParVector, 1); hypre_Vector *Tmptilde_local = hypre_SeqVectorCreate(n_global); hypre_SeqVectorInitialize(Tmptilde_local); hypre_ParVectorLocalVector(Tmptilde) = Tmptilde_local; hypre_ParVectorOwnsData(Tmptilde) = 1; hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 0.0, Tmptilde); hypre_ParVectorScale(2.0,Rtilde); hypre_ParCSRMatrixMatvec(-1.0, Atilde, Tmptilde, 1.0, Rtilde); hypre_ParVectorDestroy(Tmptilde); } hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 1.0, Xtilde); } if (addlvl == 0) hypre_ParVectorCopy(Xtilde, U_array[0]); } if (add_end < num_levels -1) { fine_grid = num_levels -1; for (j=0; j < num_grid_sweeps[3]; j++) if (rlx_coarse == 18) hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid], 1, 1, l1_norms[fine_grid], 1.0, 1.0 ,0,0,0,0, U_array[fine_grid], Vtemp, Ztemp); else hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid], NULL, rlx_coarse,0,0, relax_weight[fine_grid], omega[fine_grid], l1_norms[fine_grid], U_array[fine_grid], Vtemp, Ztemp); } /* up cycle */ for (level = num_levels-1; level > 0; level--) { fine_grid = level - 1; coarse_grid = level; if (level <= addlvl || level > add_end+1) /* multiplicative version */ { alpha = 1.0; beta = 1.0; hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid], U_array[coarse_grid], beta, U_array[fine_grid]); if (rlx_up != 18) /*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_up,0,*/ for (j=0; j < num_grid_sweeps[2]; j++) hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid], CF_marker_array[fine_grid], rlx_up,rlx_order,2, relax_weight[fine_grid], omega[fine_grid], l1_norms[fine_grid], U_array[fine_grid], Vtemp, Ztemp); else if (rlx_order) { HYPRE_Int loc_relax_points[2]; loc_relax_points[0] = -1; loc_relax_points[1] = 1; for (j=0; j < num_grid_sweeps[2]; j++) for (i=0; i < 2; i++) hypre_ParCSRRelax_L1_Jacobi(A_array[fine_grid],F_array[fine_grid], CF_marker_array[fine_grid], loc_relax_points[i], 1.0, l1_norms[fine_grid], U_array[fine_grid], Vtemp); } else for (j=0; j < num_grid_sweeps[2]; j++) hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid], 1, 1, l1_norms[fine_grid], 1.0, 1.0 ,0,0,0,0, U_array[fine_grid], Vtemp, Ztemp); } else /* additive version */ { alpha = 1.0; beta = 1.0; hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid], U_array[coarse_grid], beta, U_array[fine_grid]); } } return(Solve_err_flag); } HYPRE_Int hypre_CreateLambda(void *amg_vdata) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata; /* Data Structure variables */ MPI_Comm comm; hypre_ParCSRMatrix **A_array; hypre_ParVector **F_array; hypre_ParVector **U_array; hypre_ParCSRMatrix *A_tmp; hypre_ParCSRMatrix *Lambda; hypre_CSRMatrix *L_diag; hypre_CSRMatrix *L_offd; hypre_ParCSRMatrix *Atilde; hypre_CSRMatrix *Atilde_diag; hypre_CSRMatrix *Atilde_offd; HYPRE_Real *Atilde_diag_data; HYPRE_Real *Atilde_offd_data; hypre_CSRMatrix *A_tmp_diag; hypre_CSRMatrix *A_tmp_offd; hypre_ParVector *Xtilde; hypre_ParVector *Rtilde; hypre_Vector *Xtilde_local; hypre_Vector *Rtilde_local; hypre_ParCSRCommPkg *comm_pkg; hypre_ParCSRCommPkg *L_comm_pkg = NULL; hypre_ParCSRCommHandle *comm_handle; HYPRE_Real *L_diag_data; HYPRE_Real *L_offd_data; HYPRE_Real *buf_data = NULL; HYPRE_Real *tmp_data; HYPRE_Real *x_data; HYPRE_Real *r_data; HYPRE_Real *l1_norms; HYPRE_Real *A_tmp_diag_data; HYPRE_Real *A_tmp_offd_data; HYPRE_Real *D_data = NULL; HYPRE_Real *D_data_offd = NULL; HYPRE_Int *L_diag_i; HYPRE_Int *L_diag_j; HYPRE_Int *L_offd_i; HYPRE_Int *L_offd_j; HYPRE_Int *Atilde_diag_i; HYPRE_Int *Atilde_diag_j; HYPRE_Int *Atilde_offd_i; HYPRE_Int *Atilde_offd_j; HYPRE_Int *A_tmp_diag_i; HYPRE_Int *A_tmp_offd_i; HYPRE_Int *A_tmp_diag_j; HYPRE_Int *A_tmp_offd_j; HYPRE_Int *L_recv_ptr = NULL; HYPRE_Int *L_send_ptr = NULL; HYPRE_Int *L_recv_procs = NULL; HYPRE_Int *L_send_procs = NULL; HYPRE_Int *L_send_map_elmts = NULL; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *send_map_elmts; HYPRE_Int *send_map_starts; HYPRE_Int *recv_vec_starts; HYPRE_Int *all_send_procs = NULL; HYPRE_Int *all_recv_procs = NULL; HYPRE_Int *remap = NULL; HYPRE_Int *level_start; HYPRE_Int addlvl; HYPRE_Int additive; HYPRE_Int mult_additive; HYPRE_Int num_levels; HYPRE_Int num_add_lvls; HYPRE_Int num_procs; HYPRE_Int num_sends, num_recvs; HYPRE_Int num_sends_L = 0; HYPRE_Int num_recvs_L = 0; HYPRE_Int send_data_L = 0; HYPRE_Int num_rows_L = 0; HYPRE_Int num_rows_tmp = 0; HYPRE_Int num_cols_offd_L = 0; HYPRE_Int num_cols_offd = 0; HYPRE_Int level, i, j, k; HYPRE_Int this_proc, cnt, cnt_diag, cnt_offd; HYPRE_Int A_cnt_diag, A_cnt_offd; HYPRE_Int cnt_recv, cnt_send, cnt_row, row_start; HYPRE_Int start_diag, start_offd, indx, cnt_map; HYPRE_Int start, j_indx, index, cnt_level; HYPRE_Int max_sends, max_recvs; HYPRE_Int ns; /* Local variables */ HYPRE_Int Solve_err_flag = 0; HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd; HYPRE_Real **l1_norms_ptr = NULL; /*HYPRE_Real *relax_weight = NULL; HYPRE_Int relax_type; */ HYPRE_Int add_rlx; HYPRE_Int add_last_lvl, add_end; HYPRE_Real add_rlx_wt; /* Acquire data and allocate storage */ A_array = hypre_ParAMGDataAArray(amg_data); F_array = hypre_ParAMGDataFArray(amg_data); U_array = hypre_ParAMGDataUArray(amg_data); additive = hypre_ParAMGDataAdditive(amg_data); mult_additive = hypre_ParAMGDataMultAdditive(amg_data); add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data); num_levels = hypre_ParAMGDataNumLevels(amg_data); /*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data); relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/ comm = hypre_ParCSRMatrixComm(A_array[0]); add_rlx = hypre_ParAMGDataAddRelaxType(amg_data); add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data); ns = hypre_ParAMGDataNumGridSweeps(amg_data)[1]; hypre_MPI_Comm_size(comm,&num_procs); l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data); addlvl = hypre_max(additive, mult_additive); if (add_last_lvl != -1) add_end = add_last_lvl+1; else add_end = num_levels; num_add_lvls = add_end+1-addlvl; level_start = hypre_CTAlloc(HYPRE_Int, num_add_lvls+1); send_data_L = 0; num_rows_L = 0; num_cols_offd_L = 0; num_nonzeros_diag = 0; num_nonzeros_offd = 0; level_start[0] = 0; cnt = 1; max_sends = 0; max_recvs = 0; for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp); A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag); A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); num_cols_offd = hypre_CSRMatrixNumCols(A_tmp_offd); num_rows_L += num_rows_tmp; level_start[cnt] = level_start[cnt-1] + num_rows_tmp; cnt++; num_cols_offd_L += num_cols_offd; num_nonzeros_diag += A_tmp_diag_i[num_rows_tmp]; num_nonzeros_offd += A_tmp_offd_i[num_rows_tmp]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); max_sends += num_sends; if (num_sends) send_data_L += hypre_ParCSRCommPkgSendMapStart(comm_pkg,num_sends); max_recvs += hypre_ParCSRCommPkgNumRecvs(comm_pkg); } } if (max_sends >= num_procs ||max_recvs >= num_procs) { max_sends = num_procs; max_recvs = num_procs; } if (max_sends) all_send_procs = hypre_CTAlloc(HYPRE_Int, max_sends); if (max_recvs) all_recv_procs = hypre_CTAlloc(HYPRE_Int, max_recvs); cnt_send = 0; cnt_recv = 0; if (max_sends || max_recvs) { if (max_sends < num_procs && max_recvs < num_procs) { for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); for (j = 0; j < num_sends; j++) all_send_procs[cnt_send++] = send_procs[j]; for (j = 0; j < num_recvs; j++) all_recv_procs[cnt_recv++] = recv_procs[j]; } } if (max_sends) { hypre_qsort0(all_send_procs, 0, max_sends-1); num_sends_L = 1; this_proc = all_send_procs[0]; for (i=1; i < max_sends; i++) { if (all_send_procs[i] > this_proc) { this_proc = all_send_procs[i]; all_send_procs[num_sends_L++] = this_proc; } } L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L); for (j=0; j < num_sends_L; j++) L_send_procs[j] = all_send_procs[j]; hypre_TFree(all_send_procs); } if (max_recvs) { hypre_qsort0(all_recv_procs, 0, max_recvs-1); num_recvs_L = 1; this_proc = all_recv_procs[0]; for (i=1; i < max_recvs; i++) { if (all_recv_procs[i] > this_proc) { this_proc = all_recv_procs[i]; all_recv_procs[num_recvs_L++] = this_proc; } } L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L); for (j=0; j < num_recvs_L; j++) L_recv_procs[j] = all_recv_procs[j]; hypre_TFree(all_recv_procs); } L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1); L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1); for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); } else { num_sends = 0; num_recvs = 0; } for (k = 0; k < num_sends; k++) { this_proc = hypre_BinarySearch(L_send_procs,send_procs[k],num_sends_L); L_send_ptr[this_proc+1] += send_map_starts[k+1]-send_map_starts[k]; } for (k = 0; k < num_recvs; k++) { this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[k],num_recvs_L); L_recv_ptr[this_proc+1] += recv_vec_starts[k+1]-recv_vec_starts[k]; } } L_recv_ptr[0] = 0; for (i=1; i < num_recvs_L; i++) L_recv_ptr[i+1] += L_recv_ptr[i]; L_send_ptr[0] = 0; for (i=1; i < num_sends_L; i++) L_send_ptr[i+1] += L_send_ptr[i]; } else { num_recvs_L = 0; num_sends_L = 0; for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); for (j = 0; j < num_sends; j++) { this_proc = send_procs[j]; if (all_send_procs[this_proc] == 0) num_sends_L++; all_send_procs[this_proc] += send_map_starts[j+1]-send_map_starts[j]; } for (j = 0; j < num_recvs; j++) { this_proc = recv_procs[j]; if (all_recv_procs[this_proc] == 0) num_recvs_L++; all_recv_procs[this_proc] += recv_vec_starts[j+1]-recv_vec_starts[j]; } } } if (max_sends) { L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L); L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1); num_sends_L = 0; for (j=0; j < num_procs; j++) { this_proc = all_send_procs[j]; if (this_proc) { L_send_procs[num_sends_L++] = j; L_send_ptr[num_sends_L] = this_proc + L_send_ptr[num_sends_L-1]; } } } if (max_recvs) { L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L); L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1); num_recvs_L = 0; for (j=0; j < num_procs; j++) { this_proc = all_recv_procs[j]; if (this_proc) { L_recv_procs[num_recvs_L++] = j; L_recv_ptr[num_recvs_L] = this_proc + L_recv_ptr[num_recvs_L-1]; } } } } } if (max_sends) hypre_TFree(all_send_procs); if (max_recvs) hypre_TFree(all_recv_procs); L_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag); L_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd); hypre_CSRMatrixInitialize(L_diag); hypre_CSRMatrixInitialize(L_offd); if (num_nonzeros_diag) { L_diag_data = hypre_CSRMatrixData(L_diag); L_diag_j = hypre_CSRMatrixJ(L_diag); } L_diag_i = hypre_CSRMatrixI(L_diag); if (num_nonzeros_offd) { L_offd_data = hypre_CSRMatrixData(L_offd); L_offd_j = hypre_CSRMatrixJ(L_offd); } L_offd_i = hypre_CSRMatrixI(L_offd); if (ns > 1) { Atilde_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag); Atilde_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd); hypre_CSRMatrixInitialize(Atilde_diag); hypre_CSRMatrixInitialize(Atilde_offd); if (num_nonzeros_diag) { Atilde_diag_data = hypre_CSRMatrixData(Atilde_diag); Atilde_diag_j = hypre_CSRMatrixJ(Atilde_diag); } Atilde_diag_i = hypre_CSRMatrixI(Atilde_diag); if (num_nonzeros_offd) { Atilde_offd_data = hypre_CSRMatrixData(Atilde_offd); Atilde_offd_j = hypre_CSRMatrixJ(Atilde_offd); } Atilde_offd_i = hypre_CSRMatrixI(Atilde_offd); } if (num_rows_L) D_data = hypre_CTAlloc(HYPRE_Real,num_rows_L); if (send_data_L) { L_send_map_elmts = hypre_CTAlloc(HYPRE_Int, send_data_L); buf_data = hypre_CTAlloc(HYPRE_Real,send_data_L); } if (num_cols_offd_L) { D_data_offd = hypre_CTAlloc(HYPRE_Real,num_cols_offd_L); /*L_col_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L);*/ remap = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L); } Rtilde = hypre_CTAlloc(hypre_ParVector, 1); Rtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Rtilde_local); hypre_ParVectorLocalVector(Rtilde) = Rtilde_local; hypre_ParVectorOwnsData(Rtilde) = 1; Xtilde = hypre_CTAlloc(hypre_ParVector, 1); Xtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Xtilde_local); hypre_ParVectorLocalVector(Xtilde) = Xtilde_local; hypre_ParVectorOwnsData(Xtilde) = 1; x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde)); r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde)); cnt = 0; cnt_level = 0; cnt_diag = 0; cnt_offd = 0; cnt_row = 1; L_diag_i[0] = 0; L_offd_i[0] = 0; if (ns > 1) { A_cnt_diag = 0; A_cnt_offd = 0; Atilde_diag_i[0] = 0; Atilde_offd_i[0] = 0; } for (level=addlvl; level < add_end; level++) { row_start = level_start[cnt_level]; if (level != 0) { tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])); if (tmp_data) hypre_TFree(tmp_data); hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[row_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0; tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])); if (tmp_data) hypre_TFree(tmp_data); hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[row_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0; } cnt_level++; start_diag = L_diag_i[cnt_row-1]; start_offd = L_offd_i[cnt_row-1]; A_tmp = A_array[level]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp); comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag); A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd); A_tmp_diag_j = hypre_CSRMatrixJ(A_tmp_diag); A_tmp_offd_j = hypre_CSRMatrixJ(A_tmp_offd); A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag); A_tmp_offd_data = hypre_CSRMatrixData(A_tmp_offd); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); } else { num_sends = 0; num_recvs = 0; } for (i=0; i < num_sends; i++) { this_proc = hypre_BinarySearch(L_send_procs,send_procs[i],num_sends_L); indx = L_send_ptr[this_proc]; for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { L_send_map_elmts[indx++] = row_start + send_map_elmts[j]; } L_send_ptr[this_proc] = indx; } cnt_map = 0; for (i = 0; i < num_recvs; i++) { this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[i],num_recvs_L); indx = L_recv_ptr[this_proc]; for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { remap[cnt_map++] = indx++; } L_recv_ptr[this_proc] = indx; } /* Compute Lambda */ if (add_rlx == 0) { /*HYPRE_Real rlx_wt = relax_weight[level];*/ #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) { D_data[i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]]; L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } if (ns > 1) for (i=0; i < num_rows_tmp; i++) { Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } } else { l1_norms = l1_norms_ptr[level]; #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) { D_data[i] = 1.0/l1_norms[i]; L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } if (ns > 1) for (i=0; i < num_rows_tmp; i++) { Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } } if (num_procs > 1) { index = 0; for (i=0; i < num_sends; i++) { start = send_map_starts[i]; for (j=start; j < send_map_starts[i+1]; j++) buf_data[index++] = D_data[send_map_elmts[j]]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, D_data_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } for (i = 0; i < num_rows_tmp; i++) { j_indx = A_tmp_diag_i[i]; if (ns > 1) { Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j_indx]; Atilde_diag_j[A_cnt_diag++] = i+row_start; } L_diag_data[cnt_diag] = (2.0 - A_tmp_diag_data[j_indx]*D_data[i])*D_data[i]; L_diag_j[cnt_diag++] = i+row_start; for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++) { j_indx = A_tmp_diag_j[j]; L_diag_data[cnt_diag] = (- A_tmp_diag_data[j]*D_data[j_indx])*D_data[i]; L_diag_j[cnt_diag++] = j_indx+row_start; } for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++) { j_indx = A_tmp_offd_j[j]; L_offd_data[cnt_offd] = (- A_tmp_offd_data[j]*D_data_offd[j_indx])*D_data[i]; L_offd_j[cnt_offd++] = remap[j_indx]; } if (ns > 1) { for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++) { j_indx = A_tmp_diag_j[j]; Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j]; Atilde_diag_j[A_cnt_diag++] = j_indx+row_start; } for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++) { j_indx = A_tmp_offd_j[j]; Atilde_offd_data[A_cnt_offd] = A_tmp_offd_data[j]; Atilde_offd_j[A_cnt_offd++] = remap[j_indx]; } } } cnt_row += num_rows_tmp; } if (L_send_ptr) { for (i=num_sends_L-1; i > 0; i--) L_send_ptr[i] = L_send_ptr[i-1]; L_send_ptr[0] = 0; } else L_send_ptr = hypre_CTAlloc(HYPRE_Int,1); if (L_recv_ptr) { for (i=num_recvs_L-1; i > 0; i--) L_recv_ptr[i] = L_recv_ptr[i-1]; L_recv_ptr[0] = 0; } else L_recv_ptr = hypre_CTAlloc(HYPRE_Int,1); L_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg,1); hypre_ParCSRCommPkgNumRecvs(L_comm_pkg) = num_recvs_L; hypre_ParCSRCommPkgNumSends(L_comm_pkg) = num_sends_L; hypre_ParCSRCommPkgRecvProcs(L_comm_pkg) = L_recv_procs; hypre_ParCSRCommPkgSendProcs(L_comm_pkg) = L_send_procs; hypre_ParCSRCommPkgRecvVecStarts(L_comm_pkg) = L_recv_ptr; hypre_ParCSRCommPkgSendMapStarts(L_comm_pkg) = L_send_ptr; hypre_ParCSRCommPkgSendMapElmts(L_comm_pkg) = L_send_map_elmts; hypre_ParCSRCommPkgComm(L_comm_pkg) = comm; Lambda = hypre_CTAlloc(hypre_ParCSRMatrix, 1); hypre_ParCSRMatrixDiag(Lambda) = L_diag; hypre_ParCSRMatrixOffd(Lambda) = L_offd; hypre_ParCSRMatrixCommPkg(Lambda) = L_comm_pkg; hypre_ParCSRMatrixComm(Lambda) = comm; hypre_ParCSRMatrixOwnsData(Lambda) = 1; if (ns > 1) { /*hypre_ParCSRCommPkg *A_comm_pkg = NULL; HYPRE_Int *A_recv_ptr = NULL; HYPRE_Int *A_send_ptr = NULL; HYPRE_Int *A_recv_procs = NULL; HYPRE_Int *A_send_procs = NULL; HYPRE_Int *A_send_map_elmts = NULL; A_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg,1); A_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs+1); A_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends+1); A_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L); A_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L); A_send_map_elmts = hypre_CTAlloc(HYPRE_Int, L_send_ptr[num_sends_L]); for (i=0; i<num_recvs_L+1; i++) A_recv_ptr[i] = L_recv_ptr[i]; for (i=0; i<num_sends_L+1; i++) A_send_ptr[i] = L_send_ptr[i]; for (i=0; i<num_recvs_L; i++) A_recv_procs[i] = L_recv_procs[i]; for (i=0; i<num_sends_L; i++) A_send_procs[i] = L_send_procs[i]; for (i=0; i < L_send_ptr[num_sends_L]; i++) A_send_map_elmts[i] = L_send_map_elmts[i]; hypre_ParCSRCommPkgNumRecvs(A_comm_pkg) = num_recvs_L; hypre_ParCSRCommPkgNumSends(A_comm_pkg) = num_sends_L; hypre_ParCSRCommPkgRecvProcs(A_comm_pkg) = A_recv_procs; hypre_ParCSRCommPkgSendProcs(A_comm_pkg) = A_send_procs; hypre_ParCSRCommPkgRecvVecStarts(A_comm_pkg) = A_recv_ptr; hypre_ParCSRCommPkgSendMapStarts(A_comm_pkg) = A_send_ptr; hypre_ParCSRCommPkgSendMapElmts(A_comm_pkg) = A_send_map_elmts; hypre_ParCSRCommPkgComm(A_comm_pkg) = comm; */ Atilde = hypre_CTAlloc(hypre_ParCSRMatrix, 1); hypre_ParCSRMatrixDiag(Atilde) = Atilde_diag; hypre_ParCSRMatrixOffd(Atilde) = Atilde_offd; hypre_ParCSRMatrixCommPkg(Atilde) = L_comm_pkg; hypre_ParCSRMatrixComm(Atilde) = comm; hypre_ParCSRMatrixOwnsData(Atilde) = 1; hypre_ParAMGDataAtilde(amg_data) = Atilde; } hypre_ParAMGDataLambda(amg_data) = Lambda; hypre_ParAMGDataRtilde(amg_data) = Rtilde; hypre_ParAMGDataXtilde(amg_data) = Xtilde; hypre_TFree(D_data_offd); hypre_TFree(D_data); if (num_procs > 1) hypre_TFree(buf_data); hypre_TFree(remap); hypre_TFree(buf_data); hypre_TFree(level_start); return Solve_err_flag; } HYPRE_Int hypre_CreateDinv(void *amg_vdata) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata; /* Data Structure variables */ hypre_ParCSRMatrix **A_array; hypre_ParVector **F_array; hypre_ParVector **U_array; hypre_ParCSRMatrix *A_tmp; hypre_CSRMatrix *A_tmp_diag; hypre_ParVector *Xtilde; hypre_ParVector *Rtilde; hypre_Vector *Xtilde_local; hypre_Vector *Rtilde_local; HYPRE_Real *x_data; HYPRE_Real *r_data; HYPRE_Real *tmp_data; HYPRE_Real *D_inv = NULL; /*HYPRE_Real *relax_weight = NULL; HYPRE_Real relax_type;*/ HYPRE_Int addlvl; HYPRE_Int num_levels; HYPRE_Int num_rows_L; HYPRE_Int num_rows_tmp; HYPRE_Int level, i; HYPRE_Int add_rlx; HYPRE_Real add_rlx_wt; HYPRE_Int add_last_lvl, add_end; /* Local variables */ HYPRE_Int Solve_err_flag = 0; HYPRE_Real **l1_norms_ptr = NULL; HYPRE_Real *l1_norms; HYPRE_Int l1_start; /* Acquire data and allocate storage */ A_array = hypre_ParAMGDataAArray(amg_data); F_array = hypre_ParAMGDataFArray(amg_data); U_array = hypre_ParAMGDataUArray(amg_data); addlvl = hypre_ParAMGDataSimple(amg_data); num_levels = hypre_ParAMGDataNumLevels(amg_data); add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data); add_rlx = hypre_ParAMGDataAddRelaxType(amg_data); add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data); /*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data); relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/ l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data); /* smooth_option = hypre_ParAMGDataSmoothOption(amg_data); */ if (add_last_lvl == -1 ) add_end = num_levels; else add_end = add_last_lvl; num_rows_L = 0; for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); num_rows_L += num_rows_tmp; } Rtilde = hypre_CTAlloc(hypre_ParVector, 1); Rtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Rtilde_local); hypre_ParVectorLocalVector(Rtilde) = Rtilde_local; hypre_ParVectorOwnsData(Rtilde) = 1; Xtilde = hypre_CTAlloc(hypre_ParVector, 1); Xtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Xtilde_local); hypre_ParVectorLocalVector(Xtilde) = Xtilde_local; hypre_ParVectorOwnsData(Xtilde) = 1; x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde)); r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde)); D_inv = hypre_CTAlloc(HYPRE_Real, num_rows_L); l1_start = 0; for (level=addlvl; level < add_end; level++) { if (level != 0) { tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])); if (tmp_data) hypre_TFree(tmp_data); hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[l1_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0; tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])); if (tmp_data) hypre_TFree(tmp_data); hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[l1_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0; } A_tmp = A_array[level]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); if (add_rlx == 0) { /*HYPRE_Real rlx_wt = relax_weight[level];*/ HYPRE_Int *A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag); HYPRE_Real *A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag); #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) D_inv[l1_start+i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]]; } else { l1_norms = l1_norms_ptr[level]; #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) D_inv[l1_start+i] = 1.0/l1_norms[i]; } l1_start += num_rows_tmp; } hypre_ParAMGDataDinv(amg_data) = D_inv; hypre_ParAMGDataRtilde(amg_data) = Rtilde; hypre_ParAMGDataXtilde(amg_data) = Xtilde; return Solve_err_flag; }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> // HLSL Change Starts #include "llvm/Support/OacrIgnoreCond.h" // HLSL Change - all sema use is heavily language-dependant namespace hlsl { struct UnusualAnnotation; } // HLSL Change Ends namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; class CXXThisExpr; // HLSL Change namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. if (getLangOpts().ModulesHideInternalLinkage) return isVisible(Old) || New->isExternallyVisible(); return true; } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; // HLSL Change Begin // The HLSL rewriter doesn't define a default matrix pack, // so we must preserve the lack of annotations to avoid changing semantics. bool HasDefaultMatrixPack = false; // Uses of #pragma pack_matrix change the default pack. bool DefaultMatrixPackRowMajor = false; // HLSL Change End. enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// \brief Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::MapVector<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. // std::unique_ptr<NSAPI> NSAPIObj; // HLSL Change /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); unsigned deduceWeakPropertyFromType(QualType T) { if ((getLangOpts().getGC() != LangOptions::NonGC && T.isObjCGCWeak()) || (getLangOpts().ObjCAutoRefCount && T.getObjCLifetime() == Qualifiers::OCL_Weak)) return ObjCDeclSpec::DQ_PR_weak; return 0; } /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. // HLSL Change - FIX - We should move param mods to parameter QualTypes QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI, ArrayRef<hlsl::ParameterModifier> ParamMods); // HLSL Change - End QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, llvm::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {(DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Args(Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, llvm::index_sequence_for<Ts...>()); DB << T; } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); VisibleModuleSet VisibleModules; llvm::SmallVector<VisibleModuleSet, 16> VisibleModulesStack; Module *CachedFakeTopLevelModule; public: /// \brief Get the module owning an entity. Module *getOwningModule(Decl *Entity); /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND, SourceLocation Loc); bool isModuleVisible(Module *M) { return VisibleModules.isVisible(M); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return !D->isHidden() || isVisibleSlow(D); } bool hasVisibleMergedDefinition(NamedDecl *Def); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *) : Kind(NC_Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); // HLSL Change Starts // This enumeration is used to determine whether a variable declaration // should shadow a prior declaration rather than merging. enum ShadowMergeState { ShadowMergeState_Disallowed, // shadowing is not allowed ShadowMergeState_Possible, // shadowing is possible (but may not occur) ShadowMergeState_Effective // the declaration should shadow a prior one }; // HLSL Change Ends NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous, ShadowMergeState MergeState = ShadowMergeState_Disallowed); // HLSL Change - add merge state void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SCm, hlsl::ParameterModifier ParamMod); // HLSL Change void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition(FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// \brief The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument }; /// \brief Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, bool NeedDefinition, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), Previous(nullptr) {} bool ShouldSkip; NamedDecl *Previous; }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); typedef void *SkippedDefinitionContext; /// \brief Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, IdentifierInfo *Ident, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld, ShadowMergeState& MergeState); // HLSL Change - add merge state void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// \brief Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(E, nullptr, Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter) { return CorrectDelayedTyposInExpr(ER, nullptr, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); void checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type. /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param nullabilityLoc The location of the nullability specifier. /// /// \param isContextSensitive Whether this nullability specifier was /// written as a context-sensitive keyword (in an Objective-C /// method) or an Objective-C property attribute, rather than as an /// underscored type specifier. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation nullabilityLoc, bool isContextSensitive); /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnHlslDiscardStmt(SourceLocation Loc); // HLSL Change StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable, AD_Partial }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr( CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); // HLSL Change Begins bool CheckHLSLUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation Loc, UnaryExprOrTypeTrait ExprKind); // HLSL Change Ends bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); // HLSL Change Starts //===---------------------------- HLSL Features -------------------------===// /// cbuffer/tbuffer llvm::SmallVector<Decl*, 1> HLSLBuffers; Decl* ActOnStartHLSLBuffer(Scope* bufferScope, bool cbuffer, SourceLocation KwLoc, IdentifierInfo *Ident, SourceLocation IdentLoc, std::vector<hlsl::UnusualAnnotation *>& BufferAttributes, SourceLocation LBrace); void ActOnFinishHLSLBuffer(Decl *Dcl, SourceLocation RBrace); Decl* getActiveHLSLBuffer() const; void ActOnStartHLSLBufferView(); bool IsOnHLSLBufferView(); Decl *ActOnHLSLBufferView(Scope *bufferScope, SourceLocation KwLoc, DeclGroupPtrTy &dcl, bool iscbuf); // HLSL Change Ends //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addRestrictAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. QualType performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// \brief Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXMemberDefaultArgs(Decl *D); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(), sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface(Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Check the application of the Objective-C '__kindof' qualifier to /// the given type. bool checkObjCKindOfType(QualType &type, SourceLocation loc); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = nullptr, ObjCContainerDecl *lexicalDC = nullptr); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaPackMatrix - Called on well formed \#pragma pack_matrix(...). void ActOnPragmaPackMatrix(bool bRowMajor, SourceLocation PragmaLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads, Expr *MinBlocks, unsigned SpellingListIndex); // OpenMP directives and clauses. private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); public: /// \brief Check if the specified variable is used in a private clause in /// Checks if the specified variable is used in one of the private /// clauses in OpenMP constructs. bool IsOpenMPCapturedVar(VarDecl *VD); /// OpenMP constructs. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPPrivateVar(VarDecl *VD, unsigned Level); ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// \brief End analysis of clauses. void EndOpenMPClause(); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// \brief Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// \brief End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// \brief Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, OpenMPDependClauseKind DepKind, SourceLocation DepLoc); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteConstructor(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); // HLSL Change Starts - checking array subscript access to vector or matrix member void CheckHLSLArrayAccess(const Expr *expr); // HLSL Change ends void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinCpuSupports(CallExpr *TheCall); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// \brief Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; // HLSL Change Starts bool DiagnoseHLSLDecl(Declarator& D, DeclContext* DC, Expr *BitWidth, TypeSourceInfo* TInfo, bool isParameter); void TransferUnusualAttributes(Declarator& D, NamedDecl* NewDecl); // HLSL Change Ends /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// \brief To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // HLSL Change Begin - adjust this from T* to T&-like CXXThisExpr *genereateHLSLThis(SourceLocation Loc, QualType ThisType, bool isImplicit); ClassTemplateSpecializationDecl * getHLSLDefaultSpecialization(ClassTemplateDecl *Decl); // HLSL Change End - adjust this from T* to T&-like }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
GB_unaryop__identity_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_fp64_fp64 // op(A') function: GB_tran__identity_fp64_fp64 // C type: double // A type: double // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ double z = (double) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_fp64_fp64 ( double *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dcrtpoly.h
/** * @file dcrtpoly.h Represents integer lattice elements with double-CRT * @author TPOC: contact@palisade-crypto.org * * @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT) * All rights reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. THIS SOFTWARE IS * PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #ifndef LBCRYPTO_LATTICE_DCRTPOLY_H #define LBCRYPTO_LATTICE_DCRTPOLY_H #include <vector> #include <string> #include "../math/backend.h" #include "../utils/inttypes.h" #include "../utils/exception.h" #include "../lattice/elemparams.h" #include "../lattice/ilparams.h" #include "../lattice/ildcrtparams.h" #include "../lattice/ilelement.h" #include "../lattice/poly.h" #include "../math/nbtheory.h" #include "../math/transfrm.h" #include "../math/distrgen.h" #include "../math/quadfloat.h" namespace lbcrypto { /** * @brief Ideal lattice for the double-CRT representation. * The implementation contains a vector of underlying native-integer lattices * The double-CRT representation of polynomials is a common optimization for * lattice encryption operations. Basically, it allows large-modulus polynamials * to be represented as multiple smaller-modulus polynomials. The double-CRT * representations are discussed theoretically here: * - Gentry C., Halevi S., Smart N.P. (2012) Homomorphic Evaluation of the AES * Circuit. In: Safavi-Naini R., Canetti R. (eds) Advances in Cryptology – * CRYPTO 2012. Lecture Notes in Computer Science, vol 7417. Springer, Berlin, * Heidelberg */ template <typename VecType> class DCRTPolyImpl : public ILElement<DCRTPolyImpl<VecType>, VecType> { public: using Integer = typename VecType::Integer; using Params = ILDCRTParams<Integer>; typedef VecType Vector; typedef DCRTPolyImpl<VecType> DCRTPolyType; typedef DiscreteGaussianGeneratorImpl<NativeVector> DggType; typedef DiscreteUniformGeneratorImpl<NativeVector> DugType; typedef TernaryUniformGeneratorImpl<NativeVector> TugType; typedef BinaryUniformGeneratorImpl<NativeVector> BugType; // this class contains an array of these: using PolyType = PolyImpl<NativeVector>; // the composed polynomial type typedef PolyImpl<VecType> PolyLargeType; static const std::string GetElementName() { return "DCRTPolyImpl"; } // CONSTRUCTORS /** * @brief Constructor that initialized m_format to EVALUATION and calls * m_params to nothing */ DCRTPolyImpl(); /** * Constructor that initializes parameters. * *@param params parameter set required for DCRTPoly. *@param format the input format fixed to EVALUATION. Format is a enum type *that indicates if the polynomial is in Evaluation representation or *Coefficient representation. It is defined in inttypes.h. *@param initializeElementToZero */ DCRTPolyImpl(const shared_ptr<Params> params, Format format = EVALUATION, bool initializeElementToZero = false); const DCRTPolyType &operator=(const PolyLargeType &element); const DCRTPolyType &operator=(const NativePoly &element); /** * @brief Constructor based on discrete Gaussian generator. * * @param &dgg the input discrete Gaussian generator. The dgg will be the seed * to populate the towers of the DCRTPoly with random numbers. * @param params parameter set required for DCRTPoly. * @param format the input format fixed to EVALUATION. Format is a enum type * that indicates if the polynomial is in Evaluation representation or * Coefficient representation. It is defined in inttypes.h. */ DCRTPolyImpl(const DggType &dgg, const shared_ptr<Params> params, Format format = EVALUATION); /** * @brief Constructor based on binary distribution generator. This is not * implemented. Will throw an error. * * @param &bug the input binary uniform generator. The bug will be the seed to * populate the towers of the DCRTPoly with random numbers. * @param params parameter set required for DCRTPoly. * @param format the input format fixed to EVALUATION. Format is a enum type * that indicates if the polynomial is in Evaluation representation or * Coefficient representation. It is defined in inttypes.h. */ DCRTPolyImpl(const BugType &bug, const shared_ptr<Params> params, Format format = EVALUATION); /** * @brief Constructor based on ternary distribution generator. * * @param &tug the input ternary uniform generator. The bug will be the seed * to populate the towers of the DCRTPoly with random numbers. * @param params parameter set required for DCRTPoly. * @param format the input format fixed to EVALUATION. Format is a enum type * that indicates if the polynomial is in Evaluation representation or * Coefficient representation. It is defined in inttypes.h. * @param h - Hamming weight for sparse ternary distribution (by default, when * h = 0, the distribution is NOT sparse) */ DCRTPolyImpl(const TugType &tug, const shared_ptr<Params> params, Format format = EVALUATION, uint32_t h = 0); /** * @brief Constructor based on discrete uniform generator. * * @param &dug the input discrete Uniform Generator. * @param params the input params. * @param &format the input format fixed to EVALUATION. Format is a enum type * that indicates if the polynomial is in Evaluation representation or * Coefficient representation. It is defined in inttypes.h. */ DCRTPolyImpl(DugType &dug, const shared_ptr<Params> params, Format format = EVALUATION); /** * @brief Construct using a single Poly. The Poly is copied into every tower. * Each tower will be reduced to it's corresponding modulus via GetModuli(at * tower index). The format is derived from the passed in Poly. * * @param &element Poly to build other towers from. * @param params parameter set required for DCRTPoly. */ DCRTPolyImpl(const PolyLargeType &element, const shared_ptr<Params> params); /** * @brief Construct using a single NativePoly. The NativePoly is copied into * every tower. Each tower will be reduced to it's corresponding modulus via * GetModuli(at tower index). The format is derived from the passed in * NativePoly. * * @param &element Poly to build other towers from. * @param params parameter set required for DCRTPoly. */ DCRTPolyImpl(const NativePoly &element, const shared_ptr<Params> params); /** * @brief Construct using an tower of ILVectro2ns. The params and format for * the DCRTPoly will be derived from the towers. * * @param &towers vector of Polys which correspond to each tower of DCRTPoly. */ DCRTPolyImpl(const std::vector<PolyType> &elements); /** * @brief Create lambda that allocates a zeroed element for the case when it * is called from a templated class * @param params the params to use. * @param format - EVALUATION or COEFFICIENT */ inline static function<DCRTPolyType()> Allocator( const shared_ptr<Params> params, Format format) { return [=]() { return DCRTPolyType(params, format, true); }; } /** * @brief Allocator for discrete uniform distribution. * * @param params Params instance that is is passed. * @param resultFormat resultFormat for the polynomials generated. * @param stddev standard deviation for the discrete gaussian generator. * @return the resulting vector. */ inline static function<DCRTPolyType()> MakeDiscreteGaussianCoefficientAllocator(shared_ptr<Params> params, Format resultFormat, double stddev) { return [=]() { DggType dgg(stddev); DCRTPolyType ilvec(dgg, params, COEFFICIENT); ilvec.SetFormat(resultFormat); return ilvec; }; } /** * @brief Allocator for discrete uniform distribution. * * @param params Params instance that is is passed. * @param format format for the polynomials generated. * @return the resulting vector. */ inline static function<DCRTPolyType()> MakeDiscreteUniformAllocator( shared_ptr<Params> params, Format format) { return [=]() { DugType dug; return DCRTPolyType(dug, params, format); }; } /** * @brief Copy constructor. * * @param &element DCRTPoly to copy from */ DCRTPolyImpl(const DCRTPolyType &element); /** * @brief Move constructor. * * @param &&element DCRTPoly to move from */ DCRTPolyImpl(const DCRTPolyType &&element); // CLONE OPERATIONS /** * @brief Clone the object by making a copy of it and returning the copy * @return new Element */ DCRTPolyType Clone() const { return std::move(DCRTPolyImpl(*this)); } /** * @brief Makes a copy of the DCRTPoly, but it includes only a sequential * subset of the towers that the original holds. * * @param startTower The index number of the first tower to clone * @param endTower The index number of the last tower to clone * @return new Element */ DCRTPolyType CloneTowers(uint32_t startTower, uint32_t endTower) const { vector<NativeInteger> moduli(endTower - startTower + 1); vector<NativeInteger> roots(endTower - startTower + 1); for (uint32_t i = startTower; i <= endTower; i++) { moduli[i - startTower] = this->GetParams()->GetParams()[i]->GetModulus(); roots[i - startTower] = this->GetParams()->GetParams()[i]->GetRootOfUnity(); } auto params = DCRTPolyImpl::Params(this->GetCyclotomicOrder(), moduli, roots, {}, {}, 0); auto res = DCRTPolyImpl(std::make_shared<typename DCRTPolyImpl::Params>(params), EVALUATION, false); for (uint32_t i = startTower; i <= endTower; i++) { res.SetElementAtIndex(i - startTower, this->GetElementAtIndex(i)); } return std::move(res); } /** * @brief Clone the object, but have it contain nothing * @return new Element */ DCRTPolyType CloneEmpty() const { return std::move(DCRTPolyImpl()); } /** * @brief Clone method creates a new DCRTPoly and clones only the params. The * tower values are empty. The tower values can be filled by another * process/function or initializer list. */ DCRTPolyType CloneParametersOnly() const; /** * @brief Clone with noise. This method creates a new DCRTPoly and clones the * params. The tower values will be filled up with noise based on the discrete * gaussian. * * @param &dgg the input discrete Gaussian generator. The dgg will be the seed * to populate the towers of the DCRTPoly with random numbers. * @param format the input format fixed to EVALUATION. Format is a enum type * that indicates if the polynomial is in Evaluation representation or * Coefficient representation. It is defined in inttypes.h. */ DCRTPolyType CloneWithNoise(const DiscreteGaussianGeneratorImpl<VecType> &dgg, Format format = EVALUATION) const; /** * @brief Destructor. */ ~DCRTPolyImpl(); // GETTERS /** * @brief returns the parameters of the element. * @return the element parameter set. */ const shared_ptr<Params> GetParams() const { return m_params; } /** * @brief returns the element's cyclotomic order * @return returns the cyclotomic order of the element. */ const usint GetCyclotomicOrder() const { return m_params->GetCyclotomicOrder(); } /** * @brief returns the element's ring dimension * @return returns the ring dimension of the element. */ const usint GetRingDimension() const { return m_params->GetRingDimension(); } /** * @brief returns the element's modulus * @return returns the modulus of the element. */ const Integer &GetModulus() const { return m_params->GetModulus(); } /** * @brief returns the element's original modulus, derived from Poly * @return returns the modulus of the element. */ const Integer &GetOriginalModulus() const { return m_params->GetOriginalModulus(); } /** * @brief returns the element's root of unity. * @return the element's root of unity. */ const Integer &GetRootOfUnity() const { static Integer t(0); return t; } /** * @brief Get method for length of each component element. * NOTE assumes all components are the same size. * * @return length of the component element */ usint GetLength() const { if (m_vectors.size() == 0) return 0; return m_vectors[0].GetValues().GetLength(); } /** * @brief Get interpolated value of elements at all tower index i. * Note this operation is computationally intense. * @return interpolated value at index i. */ Integer &at(usint i); const Integer &at(usint i) const; /** * @brief Get interpolated value of element at index i. * Note this operation is computationally intense. * @return interpolated value at index i. */ Integer &operator[](usint i); const Integer &operator[](usint i) const; /** * @brief Get method of individual tower of elements. * Note this behavior is different than poly * @param i index of tower to be returned. * @returns a reference to the returned tower */ const PolyType &GetElementAtIndex(usint i) const; /** * @brief Get method of the number of component elements, also known as the * number of towers. * * @return the number of component elements. */ usint GetNumOfElements() const; /** * @brief Get method that returns a vector of all component elements. * * @returns a vector of the component elements. */ const std::vector<PolyType> &GetAllElements() const; /** * @brief Get method of the format. * * @return the format, either COEFFICIENT or EVALUATION */ Format GetFormat() const; /** * @brief Write the element as \f$ \sum\limits{i=0}^{\lfloor {\log q/base} * \rfloor} {(base^i u_i)} \f$ and return the vector of \f$ \left\{u_0, * u_1,...,u_{\lfloor {\log q/base} \rfloor} \right\} \in R_{{base}^{\lceil * {\log q/base} \rceil}} \f$; This is used as a subroutine in the * relinearization procedure. * * @param baseBits is the number of bits in the base, i.e., \f$ base = * 2^{baseBits} \f$. * @return is the pointer where the base decomposition vector is stored */ std::vector<DCRTPolyType> BaseDecompose(usint baseBits, bool evalModeAnswer = true) const; /** * @brief Generate a vector of PolyImpl's as \f$ \left\{x, {base}*x, * {base}^2*x, ..., {base}^{\lfloor {\log q/{base}} \rfloor} \right\}*x \f$, * where \f$ x \f$ is the current PolyImpl object; * used as a subroutine in the relinearization procedure to get powers of a * certain "base" for the secret key element. * * @param baseBits is the number of bits in the base, i.e., \f$ base = * 2^{baseBits} \f$. * @return is the pointer where the base decomposition vector is stored */ std::vector<DCRTPolyType> PowersOfBase(usint baseBits) const; /** * CRT basis decomposition of c as [c qi/q]_qi * * @param &baseBits bits in the base for additional digit decomposition if * base > 0 * @return is the pointer where the resulting vector is stored */ std::vector<DCRTPolyType> CRTDecompose(uint32_t baseBits = 0) const; // VECTOR OPERATIONS /** * @brief Assignment Operator. * * @param &rhs the copied element. * @return the resulting element. */ const DCRTPolyType &operator=(const DCRTPolyType &rhs); /** * @brief Move Assignment Operator. * * @param &rhs the copied element. * @return the resulting element. */ const DCRTPolyType &operator=(DCRTPolyType &&rhs); /** * @brief Initalizer list * * @param &rhs the list to initalized the element. * @return the resulting element. */ DCRTPolyType &operator=(std::initializer_list<uint64_t> rhs); /** * @brief Assignment Operator. The usint val will be set at index zero and all * other indices will be set to zero. * * @param val is the usint to assign to index zero. * @return the resulting vector. */ DCRTPolyType &operator=(uint64_t val); /** * @brief Creates a Poly from a vector of signed integers (used for trapdoor * sampling) * * @param &rhs the vector to set the PolyImpl to. * @return the resulting PolyImpl. */ DCRTPolyType &operator=(std::vector<int64_t> rhs); /** * @brief Creates a Poly from a vector of signed integers (used for trapdoor * sampling) * * @param &rhs the vector to set the PolyImpl to. * @return the resulting PolyImpl. */ DCRTPolyType &operator=(std::vector<int32_t> rhs); /** * @brief Initalizer list * * @param &rhs the list to set the PolyImpl to. * @return the resulting PolyImpl. */ DCRTPolyType &operator=(std::initializer_list<std::string> rhs); /** * @brief Unary minus on a element. * @return additive inverse of the an element. */ DCRTPolyType operator-() const { DCRTPolyType all0(this->GetParams(), this->GetFormat(), true); return all0 - *this; } /** * @brief Equality operator. * * @param &rhs is the specified element to be compared with this element. * @return true if this element represents the same values as the specified * element, false otherwise */ bool operator==(const DCRTPolyType &rhs) const; /** * @brief Performs an entry-wise addition over all elements of each tower with * the towers of the element on the right hand side. * * @param &rhs is the element to add with. * @return is the result of the addition. */ const DCRTPolyType &operator+=(const DCRTPolyType &rhs); /** * @brief Performs an entry-wise subtraction over all elements of each tower * with the towers of the element on the right hand side. * * @param &rhs is the element to subtract from. * @return is the result of the addition. */ const DCRTPolyType &operator-=(const DCRTPolyType &rhs); /** * @brief Permutes coefficients in a polynomial. Moves the ith index to the * first one, it only supports odd indices. * * @param &i is the element to perform the automorphism transform with. * @return is the result of the automorphism transform. */ #if 1 DCRTPolyType AutomorphismTransform(const usint &i) const { DCRTPolyType result(*this); for (usint k = 0; k < m_vectors.size(); k++) { result.m_vectors[k] = m_vectors[k].AutomorphismTransform(i); } return result; } void GenAutormophTable(const usint &k, std::vector<usint> &perm) const { usint n = GetRingDimension(); usint m = n << 1; usint logn = log2(n); usint logm = logn + 1; perm.resize(n); for (usint j = 1; j < m; j += 2) { usint idx = (j * k) - (((j * k) >> logm) << logm); usint jrev = ReverseBits(j >> 1, logn); usint idxrev = ReverseBits(idx >> 1, logn); // result.m_values->operator[](jrev) = GetValues().operator[](idxrev); perm[idxrev] = jrev; } } DCRTPolyType Permute(const std::vector<usint> &perm) const { DCRTPolyType result(*this); #pragma omp parallel for for (usint k = 0; k < m_vectors.size(); k++) { result.m_vectors[k] = m_vectors[k].Permute(perm); } return result; } #else DCRTPolyType AutomorphismTransform(const usint &i) const { DCRTPolyType result(*this); // TODO add table usint m = this->m_params->GetCyclotomicOrder(); usint n = this->m_params->GetRingDimension(); usint logm = log2(m); usint logn = log2(n); for (usint j = 1; j < m; j += 2) { usint idx = (j * k) - (((j * k) >> logm) << logm); usint jrev = ReverseBits(j >> 1, logn); usint idxrev = ReverseBits(idx >> 1, logn); result.m_values->operator[](jrev) = GetValues().operator[](idxrev); } // Simple permutation for (usint k = 0; k < m_vectors.size(); k++) { result.m_vectors[k] = m_vectors[k].AutomorphismTransform(i); } return result; } #endif /** * @brief Transpose the ring element using the automorphism operation * * @return is the result of the transposition. */ DCRTPolyType Transpose() const { if (m_format == COEFFICIENT) { PALISADE_THROW(not_implemented_error, "DCRTPolyImpl element transposition is currently " "implemented only in the Evaluation representation."); } else { usint m = m_params->GetCyclotomicOrder(); return AutomorphismTransform(m - 1); } } /** * @brief Performs an addition operation and returns the result. * * @param &element is the element to add with. * @return is the result of the addition. */ DCRTPolyType Plus(const DCRTPolyType &element) const; /** * @brief Performs a multiplication operation and returns the result. * * @param &element is the element to multiply with. * @return is the result of the multiplication. */ DCRTPolyType Times(const DCRTPolyType &element) const; /** * @brief Performs a subtraction operation and returns the result. * * @param &element is the element to subtract from. * @return is the result of the subtraction. */ DCRTPolyType Minus(const DCRTPolyType &element) const; // SCALAR OPERATIONS /** * @brief Scalar addition - add an element to the first index of each tower. * * @param &element is the element to add entry-wise. * @return is the result of the addition operation. */ DCRTPolyType Plus(const Integer &element) const; /** * @brief Scalar addition for elements in CRT format. * CRT elements are represented as vector of integer elements which * correspond to the represented number modulo the primes in the * tower chain (in same order). * * @param &element is the element to add entry-wise. * @return is the result of the addition operation. */ DCRTPolyType Plus(const vector<Integer> &element) const; /** * @brief Scalar subtraction - subtract an element to all entries. * * @param &element is the element to subtract entry-wise. * @return is the return value of the minus operation. */ DCRTPolyType Minus(const Integer &element) const; /** * @brief Scalar subtraction for elements in CRT format. * CRT elements are represented as vector of integer elements which * correspond to the represented number modulo the primes in the * tower chain (in same order). * * @param &element is the element to subtract entry-wise. * @return is the result of the subtraction operation. */ DCRTPolyType Minus(const vector<Integer> &element) const; /** * @brief Scalar multiplication - multiply all entries. * * @param &element is the element to multiply entry-wise. * @return is the return value of the times operation. */ DCRTPolyType Times(const Integer &element) const; /** * @brief Scalar multiplication - mulltiply by a signed integer * * @param &element is the element to multiply entry-wise. * @return is the return value of the times operation. */ DCRTPolyType Times(int64_t element) const; /** * @brief Scalar multiplication by an integer represented in CRT Basis. * * @param &element is the element to multiply entry-wise. * @return is the return value of the times operation. */ DCRTPolyType Times(const std::vector<NativeInteger> &element) const; /** * @brief Scalar modular multiplication by an integer represented in CRT * Basis. * * @param &element is the element to multiply entry-wise. * @return is the return value of the times operation. */ DCRTPolyType Times(const std::vector<Integer> &element) const; /** * @brief Scalar multiplication followed by division and rounding operation - * operation on all entries. * * @param &p is the element to multiply entry-wise. * @param &q is the element to divide entry-wise. * @return is the return value of the multiply, divide and followed by * rounding operation. */ DCRTPolyType MultiplyAndRound(const Integer &p, const Integer &q) const; /** * @brief Scalar division followed by rounding operation - operation on all * entries. * * @param &q is the element to divide entry-wise. * @return is the return value of the divide, followed by rounding operation. */ DCRTPolyType DivideAndRound(const Integer &q) const; /** * @brief Performs a negation operation and returns the result. * * @return is the result of the negation. */ DCRTPolyType Negate() const; const DCRTPolyType &operator+=(const Integer &element) { for (usint i = 0; i < this->GetNumOfElements(); i++) { this->m_vectors[i] += (element.Mod(this->m_vectors[i].GetModulus())).ConvertToInt(); } return *this; } /** * @brief Performs a subtraction operation and returns the result. * * @param &element is the element to subtract from. * @return is the result of the subtraction. */ const DCRTPolyType &operator-=(const Integer &element) { for (usint i = 0; i < this->GetNumOfElements(); i++) { this->m_vectors[i] -= (element.Mod(this->m_vectors[i].GetModulus())).ConvertToInt(); } return *this; } /** * @brief Performs a multiplication operation and returns the result. * * @param &element is the element to multiply by. * @return is the result of the subtraction. */ const DCRTPolyType &operator*=(const Integer &element); /** * @brief Performs an multiplication operation and returns the result. * * @param &element is the element to multiply with. * @return is the result of the multiplication. */ const DCRTPolyType &operator*=(const DCRTPolyType &element); /** * @brief Get value of element at index i. * * @return value at index i. */ PolyType &ElementAtIndex(usint i); // multiplicative inverse operation /** * @brief Performs a multiplicative inverse operation and returns the result. * * @return is the result of the multiplicative inverse. */ DCRTPolyType MultiplicativeInverse() const; /** * @brief Perform a modulus by 2 operation. Returns the least significant * bit. * * @return is the resulting value. */ DCRTPolyType ModByTwo() const; /** * @brief Modulus - perform a modulus operation. Does proper mapping of * [-modulus/2, modulus/2) to [0, modulus) * * @param modulus is the modulus to use. * @return is the return value of the modulus. */ DCRTPolyType Mod(const Integer &modulus) const { PALISADE_THROW(not_implemented_error, "Mod of an Integer not implemented on DCRTPoly"); } // OTHER FUNCTIONS AND UTILITIES /** * @brief Get method that should not be used * * @return will throw an error. */ const VecType &GetValues() const { PALISADE_THROW(not_implemented_error, "GetValues not implemented on DCRTPoly"); } /** * @brief Set method that should not be used, will throw an error. * * @param &values * @param format */ void SetValues(const VecType &values, Format format) { PALISADE_THROW(not_implemented_error, "SetValues not implemented on DCRTPoly"); } /** * @brief Sets element at index * * @param index where the element should be set */ void SetElementAtIndex(usint index, const PolyType &element) { m_vectors[index] = element; } /** * @brief Sets all values of element to zero. */ void SetValuesToZero(); /** * @brief Adds "1" to every entry in every tower. */ void AddILElementOne(); /** * @brief Add uniformly random values to all components except for the first * one */ DCRTPolyType AddRandomNoise(const Integer &modulus) const { PALISADE_THROW(not_implemented_error, "AddRandomNoise is not currently implemented for DCRTPoly"); } /** * @brief Make DCRTPoly Sparse. Sets every index of each tower not equal to * zero mod the wFactor to zero. * * @param &wFactor ratio between the sparse and none-sparse values. */ void MakeSparse(const uint32_t &wFactor); /** * @brief Returns true if ALL the tower(s) are empty. * @return true if all towers are empty */ bool IsEmpty() const; /** * @brief Drops the last element in the double-CRT representation. The * resulting DCRTPoly element will have one less tower. */ void DropLastElement(); /** * @brief Drops the last i elements in the double-CRT representation. */ void DropLastElements(size_t i); /** * @brief Drops the last element in the double-CRT representation and scales * down by the last CRT modulus. The resulting DCRTPoly element will have one * less tower. */ void DropLastElementAndScale( const std::vector<typename PolyType::Integer> &omega); /** * @brief ModReduces reduces the DCRTPoly element's composite modulus by * dropping the last modulus from the chain of moduli as well as dropping the * last tower. * * @param plaintextModulus is the plaintextModulus used for the DCRTPoly */ void ModReduce(const Integer &plaintextModulus); /** * @brief Interpolates the DCRTPoly to an Poly based on the Chinese Remainder * Transform Interpolation. and then returns a Poly with that single element * * @return the interpolated ring element as a Poly object. */ PolyLargeType CRTInterpolate() const; PolyType DecryptionCRTInterpolate(PlaintextModulus ptm) const; NativePoly ToNativePoly() const; /** * @brief Interpolates the DCRTPoly to an Poly based on the Chinese Remainder * Transform Interpolation, only at element index i, all other elements are * zero. and then returns a Poly with that single element * * @return the interpolated ring element as a Poly object. */ PolyLargeType CRTInterpolateIndex(usint i) const; /** * @brief Computes Round(p/q*x) mod p as [\sum_i x_i*alpha_i + Round(\sum_i * x_i*beta_i)] mod p for fast rounding in RNS; used in the decryption of * BFVrns * * @param &p 64-bit integer (often corresponds to the plaintext modulus) * @param &alpha a vector of precomputed integer factors mod p - for each q_i * @param &beta a vector of precomputed floating-point factors between 0 and 1 * - for each q_i - used when CRT moduli are <= 44 bits * @param &alphaPrecon an NTL-specific vector of precomputed integer factors * mod p - for each q_i * @param &quadBeta a vector of precomputed quad-precision floating-point * factors between 0 and 1 - for each q_i - used when CRT moduli are 58..60 * bits long * @param &extBeta a vector of precomputed extended-double-precision * floating-point factors between 0 and 1 - for each q_i - used when CRT * moduli are 45..57 bits long * @return the result of computation as a polynomial with native 64-bit * coefficients */ PolyType ScaleAndRound(const NativeInteger &p, const std::vector<NativeInteger> &alpha, const std::vector<double> &beta, const std::vector<NativeInteger> &alphaPrecon, #ifndef NO_QUADMATH const std::vector<QuadFloat> &quadBeta, #endif const std::vector<long double> &extBeta) const; /** * @brief Computes and returns the product of primes in the current moduli * chain. Compared to GetModulus, which always returns the product of all * primes in the crypto parameters, this method will return a different * modulus, based on the towers/moduli that are currently in the chain (some * towers are dropped along the way). * * @return the product of moduli in the current towers. */ BigInteger GetWorkingModulus() const; /** * @brief Returns the element parameters for DCRTPoly elements in an extended * CRT basis, which is the concatenation of the towers currently in "this" * DCRTPoly, and the moduli in ParamsP. * * @return element parameters of the extended basis. */ shared_ptr<Params> GetExtendedCRTBasis(shared_ptr<Params> paramsP) const; /** * @brief Performs approximate CRT basis switching. Based on the Fast Basis * Conversion algorithm presented in Section 2.3 of "A full RNS variant of * approximate homomorphic encryption" by Cheon, et. al. * * Suppose we have two CRT bases: C={q_0, ..., q_{L-1}} with Q=q_0*...*q_{L-1} * and B={p_0, ..., p_{K-1}} with P=p_0*...*p_{K-1}. Also, suppose that the * input of the algorithm (the DCRTPoly in "this" in our case), is in basis C. * * The conversion algorithm Conv_{C->B}(this) does not return the * representation of this in basis B, but instead, it returns the * representation of (this + Q*t) in basis B, for some small t (hence we call * it *approximate* CRT basis switch). * * The method computes the conversion as follows: * * Conv_{C->B}(this in C basis) = * Sum_{j=0}^{L-1}(this_j * invhatq_j * hatq_j) mod p_i * * Where: * this_j = this mod q_j * invhatq_j = \hat{q_j} = Q / q_j * hatq_j = \hat{q_j}^{-1} = \hat{q_j}^{-1} mod q_j * * Values for (invhatq_j mod q_j) and (hatq_j mod p_i) must be pre-computed * and supplied as arguments, to ensure the entirety of the conversion happens * in RNS. * * @param &paramsFrom parameters for the CRT basis C * @param &paramsTo parameters for the CRT basis B * @param &hatInvModFrom precomputed values for (invhatq_j mod q_j) * @param &hatInvModFromPrecon ModMul precomputed values for hatInvModFrom * @param &hatModTo precomputed values for (hatq_j mod p_i) * @param &modBarretPrecon 128-bit Barrett reduction precomputed values * @return the representation of (this + Q*t) in basis B. */ DCRTPolyType ApproxSwitchCRTBasis( const shared_ptr<Params> paramsFrom, const shared_ptr<Params> paramsTo, const vector<NativeInteger> &hatInvModFrom, const vector<NativeInteger> &hatInvModFromPrecon, const vector<vector<NativeInteger>> &hatModTo, const vector<DoubleNativeInt> &modBarretPrecon) const; /** * @brief Performs approximate modulus raising in RNS. Based on the algorithm * presented in Section 3.2 of "A full RNS variant of approximate homomorphic * encryption" by Cheon, et. al. Given a DCRTPoly "this" in basis C={q_0, ..., * q_{L-1}} with Q=q_0*...*q_{L-1}, it uses ApproxSwitchCRTBasis internally, * and computes the representation of (this + Q*t) in basis D={q_0, ..., * q_{L-1}, p_L, ..., p_{L+K-1}}, where B={p_0, ..., p_{K-1}} with * P=p_0*...*p_{K-1} is the basis we supply as argument in ParamsP. * * Values for (invhatq_j mod q_j) and (hatq_j mod p_i) must be supplied as * arguments here too, because ApproxSwitchCRTBasis is used internally. * * @param &paramsQ parameters for the CRT basis C * @param &paramsP parameters for the CRT basis B * @param &qHatInvModQj precomputed values for (invhatq_j mod q_j) * @param &qHatInvModQjPrecon ModMul precomputed values for qHatInvModQj * @param &qHatModPi precomputed values for (hatq_j mod p_i) * @param &modBarretPreconP 128-bit Barrett reduction precomputed values for * p_i * @return the representation of (this + Q*t) in the extended basis. */ DCRTPolyType ApproxModUp( const shared_ptr<Params> paramsQ, const shared_ptr<Params> paramsP, const vector<vector<NativeInteger>> &qHatInvModQj, const vector<vector<NativeInteger>> &qHatInvModQjPrecon, const vector<vector<vector<NativeInteger>>> &qHatModPi, const vector<DoubleNativeInt> &modBarretPreconP) const; /** * @brief Performs approximate modulus reduction in RNS. Based on the * algorithm presented in Section 3.2 of "A full RNS variant of approximate * homomorphic encryption" by Cheon, et. al. Given a DCRTPoly "this" in basis * D={q_0, ..., q_{L-1}, p_L, ..., p_{L+K-1}}, it computes the representation * of (P^-1 * this) is basis C={q_0, ..., q_{L-1}}. The reduction is * approximate, so the result is not exactly (P^-1 * this), but a reasonable * approximation to it. * * Values for (invhatq_j mod q_j) and (hatq_j mod p_i) must be supplied as * arguments here too, because ApproxSwitchCRTBasis is used internally. * Moreover, precomputed values for (P^{-1} mod q_j) must also be supplied. * * @param &paramsQ parameters for the CRT basis C. * @param &paramsP parameters for the CRT basis B. * @param &pInvModQj precomputed values for (P^{-1} mod q_j). * @param &pInvModQjPrecon ModMul precomputed values for pInvModQj * @param &pHatInvModPi precomputed values for (invhatq_j mod q_j). * @param &pHatInvModPiPrecon ModMul precomputed values for pHatInvModPi * @param &pHatModQj precomputed values for (hatq_j mod p_i). * @param &modBarretPreconQ 128-bit Barrett reduction precomputed values for * q_j * @return the representation of (P^-1 * this) in basis C. */ DCRTPolyType ApproxModDown( const shared_ptr<Params> paramsQ, const shared_ptr<Params> paramsP, const vector<NativeInteger> &pInvModQj, const vector<NativeInteger> &pInvModQjPrecon, const vector<NativeInteger> &pHatInvModPi, const vector<NativeInteger> &pHatInvModPiPrecon, const vector<vector<NativeInteger>> &pHatModQj, const vector<DoubleNativeInt> &modBarretPreconQ) const; /** * @brief Switches polynomial from one CRT basis Q = q1*q2*...*qn to another * CRT basis S = s1*s2*...*sn * * @param &params parameters for the CRT basis S * @param &qInvModqi a vector of precomputed integer factors (q/qi)^{-1} mod * qi for all qi * @param &qDivqiModsi a matrix of precomputed integer factors (q/qi)^{-1} mod * si for all si, qi combinations * @param &qModsi a vector of precomputed integer factors q mod si for all si * @param &siModulimu Barrett modulo reduction precomputations for si's * @param &qInvModqiPrecon NTL precomputations for (q/qi)^{-1} mod q * @return the polynomial in the CRT basis S */ DCRTPolyType SwitchCRTBasis( const shared_ptr<Params> params, const std::vector<NativeInteger> &qInvModqi, const std::vector<std::vector<NativeInteger>> &qDivqiModsi, const std::vector<NativeInteger> &qModsi, const std::vector<DoubleNativeInt> &siModulimu, const std::vector<NativeInteger> &qInvModqiPrecon) const; /** * @brief Expands polynomial in CRT basis Q = q1*q2*...*qn to a larger CRT * basis Q*S, where S = s1*s2*...*sn; uses SwtichCRTBasis as a subroutine; the * result is in evaluation representation * * @param &paramsQS parameters for the expanded CRT basis Q*S * @param &params parameters for the CRT basis S * @param &qInvModqi a vector of precomputed integer factors (q/qi)^{-1} mod * qi for all qi * @param &qDivqiModsi a matrix of precomputed integer factors (q/qi)^{-1} mod * si for all si, qi combinations * @param &qModsi a vector of precomputed integer factors q mod si for all si * @param &siModulimu Barrett modulo reduction precomputations for si's * @param &qInvModqiPrecon NTL precomputations for (q/qi)^{-1} mod q */ void ExpandCRTBasis( const shared_ptr<Params> paramsQS, const shared_ptr<Params> params, const std::vector<NativeInteger> &qInvModqi, const std::vector<std::vector<NativeInteger>> &qDivqiModsi, const std::vector<NativeInteger> &qModsi, const std::vector<DoubleNativeInt> &siModulimu, const std::vector<NativeInteger> &qInvModqiPrecon); /** * @brief Computes Round(t/q*x) mod t for fast rounding in RNS * @param qModuliTable: basis q = q1 * q2 * ... * @param gamma: redundant modulus * @param t: plaintext modulus * @param gammaInvModt * @param gammaInvModtPrecon - table for gammaInvModt used in preconditioned * modular reduction * @param negqInvModtgammaTable: -1/q mod {t U gamma} * @param negqInvModtgammaPreconTable - used in preconditioned modular * reduction * @param tgammaqDivqiModqiTable * @param tgammaqDivqiModqiPreconTable * @param qDivqiModtgammaTable * @param qDivqiModtgammaPreconTable * @return */ PolyType ScaleAndRound( const std::vector<NativeInteger> &qModuliTable, const NativeInteger &gamma, const NativeInteger &t, const NativeInteger &gammaInvModt, const NativeInteger &gammaInvModtPrecon, const std::vector<NativeInteger> &negqInvModtgammaTable, const std::vector<NativeInteger> &negqInvModtgammaPreconTable, const std::vector<NativeInteger> &tgammaqDivqiModqiTable, const std::vector<NativeInteger> &tgammaqDivqiModqiPreconTable, const std::vector<std::vector<NativeInteger>> &qDivqiModtgammaTable, const std::vector<std::vector<NativeInteger>> &qDivqiModtgammaPreconTable) const; /** *@ brief Expands polynomial in CRT basis q to a larger CRT basis {Bsk U *mtilde}, mtilde is a redundant modulus used to remove q overflows generated *from fast conversion. * @param paramsBsk: container of Bsk moduli and roots on unity * @param qModuli: basis q = q1 * q2 * ... * @param BskmtildeModuli: basis {Bsk U mtilde} ... * @param mtildeqDivqiModqi: mtilde*(q/qi)^-1 (mod qi) * @param mtildeqDivqiModqiPrecon * @param qDivqiModBj: q/qi mod {Bsk U mtilde} * @param qModBski: q mod {Bsk} * @param qModBskiPrecon * @param negqInvModmtilde: -1/q mod mtilde * @param negqInvModmtildePrecon * @param mtildeInvModBskiTable: mtilde^-1 mod {Bsk} * @param mtildeInvModBskiPreconTable */ void FastBaseConvqToBskMontgomery( const shared_ptr<Params> paramsBsk, const std::vector<NativeInteger> &qModuli, const std::vector<NativeInteger> &BskmtildeModuli, const std::vector<DoubleNativeInt> &BskmtildeModulimu, const std::vector<NativeInteger> &mtildeqDivqiModqi, const std::vector<NativeInteger> &mtildeqDivqiModqiPrecon, const std::vector<std::vector<NativeInteger>> &qDivqiModBj, const std::vector<NativeInteger> &qModBski, const std::vector<NativeInteger> &qModBskiPrecon, const NativeInteger &negqInvModmtilde, const NativeInteger &negqInvModmtildePrecon, const std::vector<NativeInteger> &mtildeInvModBskiTable, const std::vector<NativeInteger> &mtildeInvModBskiPreconTable); /** * @brief Scales polynomial in CRT basis {q U Bsk} by scalar t/q. * @param t: plaintext modulus * @param qModuli: basis q = q1 * q2 * ... * @param BskModuli: Bsk basis * @param qDivqiModqi: (q/qi)^-1 mod qi * @param tqDivqiModqiPrecon * @param qDivqiModBj: (q/qi) mod {Bsk} * @param qInvModBi: q^-1 mod {Bsk} * @param qInvModBiPrecon */ void FastRNSFloorq(const NativeInteger &t, const std::vector<NativeInteger> &qModuli, const std::vector<NativeInteger> &BskModuli, const std::vector<DoubleNativeInt> &BskModulimu, const std::vector<NativeInteger> &tqDivqiModqi, const std::vector<NativeInteger> &tqDivqiModqiPrecon, const std::vector<std::vector<NativeInteger>> &qDivqiModBj, const std::vector<NativeInteger> &qInvModBi, const std::vector<NativeInteger> &qInvModBiPrecon); /** * @brief Converts fast polynomial in CRT basis {q U Bsk} to basis {q} using * Shenoy Kumaresan method. * @param qModuli: basis q = q1 * q2 * ... * @param BskModuli: Bsk basis * @param BDivBiModBi: (B/Bi)^-1 mod Bi, where B = m1 * m2 * ... (without * msk). Note in the source paper, B is referred to by M. * @param BDivBiModBiPrecon * @param BDivBiModmsk: B/Bi mod msk * @param BInvModmsk: B^-1 mod msk * @param BInvModmskPrecon * @param BDivBiModqj: B/Bi mod {q} * @param BModqi: B mod {q} * @param BModqiPrecon */ void FastBaseConvSK( const std::vector<NativeInteger> &qModuli, const std::vector<DoubleNativeInt> &qModulimu, const std::vector<NativeInteger> &BskModuli, const std::vector<DoubleNativeInt> &BskModulimu, const std::vector<NativeInteger> &BDivBiModBi, const std::vector<NativeInteger> &BDivBiModBiPrecon, const std::vector<NativeInteger> &BDivBiModmsk, const NativeInteger &BInvModmsk, const NativeInteger &BInvModmskPrecon, const std::vector<std::vector<NativeInteger>> &BDivBiModqj, const std::vector<NativeInteger> &BModqi, const std::vector<NativeInteger> &BModqiPrecon); /** * @brief Computes Round(p/Q*x), where x is in the CRT basis Q*S, * as [\sum_{i=1}^n alpha_i*x_i + Round(\sum_{i=1}^n beta_i*x_i)]_si, * with the result in the Q CRT basis; used in homomorphic multiplication of * BFVrns * * @param &params parameters for the CRT basis Q * @param &alpha a matrix of precomputed integer factors = * {Floor[p*S*[(Q*S/vi)^{-1}]_{vi}/vi]}_si; for all combinations of vi, si; * where vi is a prime modulus in Q*S * @param &beta a vector of precomputed floating-point factors between 0 and 1 * = [p*S*(Q*S/vi)^{-1}]_{vi}/vi; - for each vi * @param &siModulimu Barrett modulo reduction precomputations for si's * @return the result of computation as a polynomial in the CRT basis Q */ DCRTPolyType ScaleAndRound( const shared_ptr<Params> params, const std::vector<std::vector<NativeInteger>> &alpha, const std::vector<long double> &beta, const std::vector<DoubleNativeInt> &siModulimu) const; /** * @brief Convert from Coefficient to CRT or vice versa; calls FFT and inverse * FFT. */ void SwitchFormat(); /** * @brief Switch modulus and adjust the values * * @param &modulus is the modulus to be set * @param &rootOfUnity is the corresponding root of unity for the modulus * @param &modulusArb is the modulus used for arbitrary cyclotomics CRT * @param &rootOfUnityArb is the corresponding root of unity for the modulus * ASSUMPTION: This method assumes that the caller provides the correct * rootOfUnity for the modulus */ void SwitchModulus(const Integer &modulus, const Integer &rootOfUnity, const Integer &modulusArb = Integer(0), const Integer &rootOfUnityArb = Integer(0)) { PALISADE_THROW(not_implemented_error, "SwitchModulus not implemented on DCRTPoly"); } /** * @brief Switch modulus at tower i and adjust the values * * @param index is the index for the tower * @param &modulus is the modulus to be set * @param &rootOfUnity is the corresponding root of unity for the modulus * ASSUMPTION: This method assumes that the caller provides the correct * rootOfUnity for the modulus */ void SwitchModulusAtIndex(usint index, const Integer &modulus, const Integer &rootOfUnity); /** * @brief Determines if inverse exists * * @return is the Boolean representation of the existence of multiplicative * inverse. */ bool InverseExists() const; /** * @brief Returns the infinity norm, basically the largest value in the ring * element. * * @return is the largest value in the ring element. */ double Norm() const; /** * @brief ostream operator * @param os the input preceding output stream * @param vec the element to add to the output stream. * @return a resulting concatenated output stream */ friend inline std::ostream &operator<<(std::ostream &os, const DCRTPolyType &vec) { // os << (vec.m_format == EVALUATION ? "EVAL: " : "COEF: "); for (usint i = 0; i < vec.GetAllElements().size(); i++) { if (i != 0) os << std::endl; os << i << ": "; os << vec.GetAllElements()[i]; } return os; } /** * @brief Element-element addition operator. * @param a first element to add. * @param b second element to add. * @return the result of the addition operation. */ friend inline DCRTPolyType operator+(const DCRTPolyType &a, const DCRTPolyType &b) { return a.Plus(b); } /** * @brief Element-integer addition operator. * @param a first element to add. * @param b integer to add. * @return the result of the addition operation. */ friend inline DCRTPolyType operator+(const DCRTPolyType &a, const Integer &b) { return a.Plus(b); } /** * @brief Integer-element addition operator. * @param a integer to add. * @param b element to add. * @return the result of the addition operation. */ friend inline DCRTPolyType operator+(const Integer &a, const DCRTPolyType &b) { return b.Plus(a); } /** * @brief Element-integer addition operator with CRT integer. * @param a first element to add. * @param b integer to add. * @return the result of the addition operation. */ friend inline DCRTPolyType operator+(const DCRTPolyType &a, const vector<Integer> &b) { return a.Plus(b); } /** * @brief Integer-element addition operator with CRT integer. * @param a integer to add. * @param b element to add. * @return the result of the addition operation. */ friend inline DCRTPolyType operator+(const vector<Integer> &a, const DCRTPolyType &b) { return b.Plus(a); } /** * @brief Element-element subtraction operator. * @param a element to subtract from. * @param b element to subtract. * @return the result of the subtraction operation. */ friend inline DCRTPolyType operator-(const DCRTPolyType &a, const DCRTPolyType &b) { return a.Minus(b); } /** * @brief Element-integer subtraction operator with CRT integer. * @param a first element to subtract. * @param b integer to subtract. * @return the result of the subtraction operation. */ friend inline DCRTPolyType operator-(const DCRTPolyType &a, const vector<Integer> &b) { return a.Minus(b); } /** * @brief Integer-element subtraction operator with CRT integer. * @param a integer to subtract. * @param b element to subtract. * @return the result of the subtraction operation. */ friend inline DCRTPolyType operator-(const vector<Integer> &a, const DCRTPolyType &b) { return b.Minus(a); } /** * @brief Element-integer subtraction operator. * @param a element to subtract from. * @param b integer to subtract. * @return the result of the subtraction operation. */ friend inline DCRTPolyType operator-(const DCRTPolyType &a, const Integer &b) { return a.Minus(b); } /** * @brief Element-element multiplication operator. * @param a element to multiply. * @param b element to multiply. * @return the result of the multiplication operation. */ friend inline DCRTPolyType operator*(const DCRTPolyType &a, const DCRTPolyType &b) { return a.Times(b); } /** * @brief Element-integer multiplication operator. * @param a element to multiply. * @param b integer to multiply. * @return the result of the multiplication operation. */ friend inline DCRTPolyType operator*(const DCRTPolyType &a, const Integer &b) { return a.Times(b); } /** * @brief Element-CRT number multiplication operator. * @param a element to multiply. * @param b integer to multiply, in CRT format. * @return the result of the multiplication operation. */ friend inline DCRTPolyType operator*(const DCRTPolyType &a, const vector<Integer> &b) { return a.Times(b); } /** * @brief Integer-element multiplication operator. * @param a integer to multiply. * @param b element to multiply. * @return the result of the multiplication operation. */ friend inline DCRTPolyType operator*(const Integer &a, const DCRTPolyType &b) { return b.Times(a); } /** * @brief Element-signed-integer multiplication operator. * @param a element to multiply. * @param b integer to multiply. * @return the result of the multiplication operation. */ friend inline DCRTPolyType operator*(const DCRTPolyType &a, int64_t b) { return a.Times(b); } /** * @brief signed-Integer-element multiplication operator. * @param a integer to multiply. * @param b element to multiply. * @return the result of the multiplication operation. */ friend inline DCRTPolyType operator*(int64_t a, const DCRTPolyType &b) { return b.Times(a); } template <class Archive> void save(Archive &ar, std::uint32_t const version) const { ar(::cereal::make_nvp("v", m_vectors)); ar(::cereal::make_nvp("f", m_format)); ar(::cereal::make_nvp("p", m_params)); } template <class Archive> void load(Archive &ar, std::uint32_t const version) { if (version > SerializedVersion()) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar(::cereal::make_nvp("v", m_vectors)); ar(::cereal::make_nvp("f", m_format)); ar(::cereal::make_nvp("p", m_params)); } std::string SerializedObjectName() const { return "DCRTPoly"; } static uint32_t SerializedVersion() { return 1; } private: shared_ptr<Params> m_params; // array of vectors used for double-CRT presentation std::vector<PolyType> m_vectors; // Either Format::EVALUATION (0) or Format::COEFFICIENT (1) Format m_format; }; } // namespace lbcrypto namespace lbcrypto { typedef DCRTPolyImpl<BigVector> DCRTPoly; } #endif
sspr_fmt_plug.c
/* * Format for cracking NetIQ SSPR hashes. * * This software is Copyright (c) 2017, Dhiru Kholia <dhiru [at] openwall.com>, * and it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Special thanks goes to https://github.com/crypticgeek for documenting the * "SHA1_SALT" hashing scheme. */ #if FMT_EXTERNS_H extern struct fmt_main fmt_sspr; #elif FMT_REGISTERS_H john_register_one(&fmt_sspr); #else #include <string.h> #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 4 #endif #endif #include "formats.h" #include "md5.h" #include "sha.h" #include "sha2.h" #include "misc.h" #include "common.h" #include "params.h" #include "options.h" #include "memdbg.h" #define FORMAT_LABEL "sspr" #define FORMAT_NAME "NetIQ SSPR" #define FORMAT_TAG "$sspr$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "MD5/SHA1/SHA256/SHA512 32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH 0 #define PLAINTEXT_LENGTH 125 #define BINARY_SIZE 64 #define BINARY_SIZE_MIN 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define MAX_SALT_LEN 1500 static struct fmt_tests tests[] = { {"$sspr$2$100000$tMR6sNepv6M6nOqOy3SWnAUWo22p0GI7$f0ae3140ce2cf46c13d0b6c4bd4fab65b45b27c0", "openwall@123"}, {"$sspr$2$100000$BrWV47lSy3Mwpp8pb60ZlJS85YS242bo$1f71c58c8dfc16c9037d3cd1cf21d1139cad4fa4", "password@123"}, {"$sspr$1$100000$NONE$64840051a425cbc0b4e2d3750d9e0de3e800de18", "password@12345"}, {"$sspr$1$100000$NONE$5cd2aeb3adf2baeca485672f01486775a208a40e", "openwall@12345"}, {"$sspr$0$100000$NONE$1e6172e71e6af1c15f4c5ca658815835", "abc@12345"}, {"$sspr$0$100000$NONE$1117af8ec9f70e8eed192c6c01776b6b", "abc@123"}, {"$sspr$3$100000$blwmhFBUiq67iEX9WFc8EG8mCxWL4tCR$c0706a057dfdb5d31d6dd40f060c8982e1e134fdf1e7eb0d299009c2f56c1936", "hello@12345"}, {"$sspr$3$100000$lNInqvnmbv9x65N2ltQeCialILG8Fr47$6bd508dcc2a5626c9d7ab3296bcce0538ca0ba24bf43cd2aebe2f58705814a00", "abc@123"}, {"$sspr$4$100000$ZP3ftUBQwrovglISxt9ujUtwslsSMCjj$a2a89e0e185f2a32f18512415e4dfc379629f0222ead58f0207e9c9f7424c36fe9c7a615be6035849c11da1293da78e50e725a664b7f5fe123ede7871f13ae7f", "hello@123"}, {"$sspr$4$100000$ZzhxK3gHP8HVkcELqIeybuRWvZjDirtg$ca5608befc50075bc4a1441de23beb4a034197d70df670addabc62a4a4d26b2e78ee38c50e9d18ce55d31b00fbb9916af12e80bf3e395ff38e58f8a958427602", "hello@12345"}, {"$sspr$2$100000$4YtbuUHaTSHBuE1licTV16KjSZuMMMCn$23b3cf4e1a951b2ed9d5df43632f77092fa93128", "\xe4""bc@123"}, // original password was "äbc@123", application uses a code page {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)]; static struct custom_salt { uint32_t iterations; uint32_t saltlen; uint32_t fmt; char salt[MAX_SALT_LEN]; } *cur_salt; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt); crypt_out = mem_calloc(sizeof(*crypt_out), self->params.max_keys_per_crypt); } static void done(void) { MEM_FREE(saved_key); MEM_FREE(crypt_out); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "$")) == NULL) // type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 0 && value != 1 && value != 2 && value != 3 && value != 4) goto err; if ((p = strtokm(NULL, "$")) == NULL) // iterations goto err; if (!isdec(p)) goto err; if ((p = strtokm(NULL, "$")) == NULL) // salt goto err; if (strlen(p) > MAX_SALT_LEN) goto err; if ((p = strtokm(NULL, "$")) == NULL) // binary goto err; value = hexlenl(p, &extra); if (value < BINARY_SIZE_MIN * 2 || value > BINARY_SIZE * 2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "$"); cs.fmt = atoi(p); p = strtokm(NULL, "$"); cs.iterations = atoi(p); p = strtokm(NULL, "$"); cs.saltlen = strlen(p); strncpy(cs.salt, p, MAX_SALT_LEN); MEM_FREE(keeptr); return &cs; } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; memset(buf.c, 0, BINARY_SIZE); p = strrchr(ciphertext, '$') + 1; for (i = 0; i < BINARY_SIZE_MIN; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { uint32_t c; SHA_CTX ctx; SHA256_CTX sctx; SHA512_CTX sctx2; MD5_CTX mctx; unsigned char buf[64]; if (cur_salt->fmt == 0) { MD5_Init(&mctx); MD5_Update(&mctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); MD5_Final(buf, &mctx); for (c = 1; c < cur_salt->iterations; c++) { MD5_Init(&mctx); MD5_Update(&mctx, buf, 16); MD5_Final(buf, &mctx); } } else if (cur_salt->fmt == 1) { SHA1_Init(&ctx); SHA1_Update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); SHA1_Final(buf, &ctx); for (c = 1; c < cur_salt->iterations; c++) { SHA1_Init(&ctx); SHA1_Update(&ctx, buf, 20); SHA1_Final(buf, &ctx); } } else if (cur_salt->fmt == 2) { SHA1_Init(&ctx); SHA1_Update(&ctx, cur_salt->salt, cur_salt->saltlen); SHA1_Update(&ctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); SHA1_Final(buf, &ctx); for (c = 1; c < cur_salt->iterations; c++) { SHA1_Init(&ctx); SHA1_Update(&ctx, buf, 20); SHA1_Final(buf, &ctx); } } else if (cur_salt->fmt == 3) { SHA256_Init(&sctx); SHA256_Update(&sctx, cur_salt->salt, cur_salt->saltlen); SHA256_Update(&sctx, (const unsigned char*)saved_key[index], strlen(saved_key[index])); SHA256_Final(buf, &sctx); for (c = 1; c < cur_salt->iterations; c++) { SHA256_Init(&sctx); SHA256_Update(&sctx, buf, 32); SHA256_Final(buf, &sctx); } } else if (cur_salt->fmt == 4) { SHA512_Init(&sctx2); SHA512_Update(&sctx2, cur_salt->salt, cur_salt->saltlen); SHA512_Update(&sctx2, (const unsigned char*)saved_key[index], strlen(saved_key[index])); SHA512_Final(buf, &sctx2); for (c = 1; c < cur_salt->iterations; c++) { SHA512_Init(&sctx2); SHA512_Update(&sctx2, buf, 64); SHA512_Final(buf, &sctx2); } } memcpy(crypt_out[index], buf, BINARY_SIZE_MIN); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (((uint32_t*)binary)[0] == crypt_out[index][0]) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE_MIN); } static int cmp_exact(char *source, int index) { return 1; } static void sspr_set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char *get_key(int index) { return saved_key[index]; } static unsigned int get_kdf_type(void *salt) { return ((struct custom_salt *)salt)->fmt; } struct fmt_main fmt_sspr = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "KDF [0:MD5 1:SHA1 2:SHA1_SALT 3:SHA256_SALT 4:SHA512_SALT]", }, { FORMAT_TAG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, fmt_default_split, get_binary, get_salt, { get_kdf_type, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, sspr_set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
GB_binop__pow_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_int16) // A.*B function (eWiseMult): GB (_AemultB_08__pow_int16) // A.*B function (eWiseMult): GB (_AemultB_02__pow_int16) // A.*B function (eWiseMult): GB (_AemultB_04__pow_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_int16) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_int16) // C+=b function (dense accum): GB (_Cdense_accumb__pow_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_int16) // C=scalar+B GB (_bind1st__pow_int16) // C=scalar+B' GB (_bind1st_tran__pow_int16) // C=A+scalar GB (_bind2nd__pow_int16) // C=A'+scalar GB (_bind2nd_tran__pow_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = GB_pow_int16 (aij, bij) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow_int16 (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_INT16 || GxB_NO_POW_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__pow_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = GB_pow_int16 (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = GB_pow_int16 (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_int16 (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow_int16 (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
kernel_cpu.c
#ifdef __cplusplus extern "C" { #endif //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // LIBRARIES //======================================================================================================================================================150 //#include <omp.h> // (in path known to compiler) needed by openmp #include <stdlib.h> // (in path known to compiler) needed by malloc #include <stdio.h> // (in path known to compiler) needed by printf #include <math.h> // (in path known to compiler) needed by exp //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 #include "./../main.h" // (in the main program folder) needed to recognized input variables //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer //======================================================================================================================================================150 // KERNEL_CPU FUNCTION HEADER //======================================================================================================================================================150 #include "kernel_cpu.h" // (in the current directory) //========================================================================================================================================================================================================200 // PLASMAKERNEL_GPU //========================================================================================================================================================================================================200 void kernel_cpu( par_str par, dim_str dim, box_str* box, FOUR_VECTOR* rv, fp* qv, FOUR_VECTOR* fv) { //======================================================================================================================================================150 // Variables //======================================================================================================================================================150 // parameters fp alpha; fp a2; // counters int i, j, k, l; // home box long first_i; FOUR_VECTOR* rA; FOUR_VECTOR* fA; // neighbor box int pointer; long first_j; FOUR_VECTOR* rB; fp* qB; // common fp r2; fp u2; fp fs; fp vij; fp fxij,fyij,fzij; THREE_VECTOR d; //======================================================================================================================================================150 // MCPU SETUP //======================================================================================================================================================150 //omp_set_num_threads(dim.cores_arg); //======================================================================================================================================================150 // INPUTS //======================================================================================================================================================150 alpha = par.alpha; a2 = 2.0*alpha*alpha; //======================================================================================================================================================150 // PROCESS INTERACTIONS //======================================================================================================================================================150 /* #pragma omp parallel for \ private(i, j, k) \ private(first_i, rA, fA) \ private(pointer, first_j, rB, qB) \ private(r2, u2, fs, vij, fxij, fyij, fzij, d) */ for(l=0; l<dim.number_boxes; l=l+1){ //------------------------------------------------------------------------------------------100 // home box - box parameters //------------------------------------------------------------------------------------------100 first_i = box[l].offset; // offset to common arrays //------------------------------------------------------------------------------------------100 // home box - distance, force, charge and type parameters from common arrays //------------------------------------------------------------------------------------------100 rA = &rv[first_i]; fA = &fv[first_i]; //------------------------------------------------------------------------------------------100 // Do for the # of (home+neighbor) boxes //------------------------------------------------------------------------------------------100 for (k=0; k<(1+box[l].nn); k++) { //----------------------------------------50 // neighbor box - get pointer to the right box //----------------------------------------50 if(k==0){ pointer = l; // set first box to be processed to home box } else{ pointer = box[l].nei[k-1].number; // remaining boxes are neighbor boxes } //----------------------------------------50 // neighbor box - box parameters //----------------------------------------50 first_j = box[pointer].offset; //----------------------------------------50 // neighbor box - distance, force, charge and type parameters //----------------------------------------50 rB = &rv[first_j]; qB = &qv[first_j]; //----------------------------------------50 // Do for the # of particles in home box //----------------------------------------50 for (i=0; i<NUMBER_PAR_PER_BOX; i=i+1){ // do for the # of particles in current (home or neighbor) box for (j=0; j<NUMBER_PAR_PER_BOX; j=j+1){ // // coefficients r2 = rA[i].v + rB[j].v - DOT(rA[i],rB[j]); u2 = a2*r2; vij= exp(-u2); fs = 2.*vij; d.x = rA[i].x - rB[j].x; d.y = rA[i].y - rB[j].y; d.z = rA[i].z - rB[j].z; fxij=fs*d.x; fyij=fs*d.y; fzij=fs*d.z; // forces fA[i].v += qB[j]*vij; fA[i].x += qB[j]*fxij; fA[i].y += qB[j]*fyij; fA[i].z += qB[j]*fzij; } // for j } // for i } // for k } // for l /* for (i = 0; i < NUMBER_PAR_PER_BOX; ++i) { fprintf(stderr, "%f %f %f %f\n", fA[i].v, fA[i].x, fA[i].y, fA[i].z); } */ } // main #ifdef __cplusplus } #endif
avx512vnni_gemm.h
#pragma once #include "intgemm_config.h" #ifdef INTGEMM_COMPILER_SUPPORTS_AVX512VNNI #include "avx512_gemm.h" #include "types.h" namespace intgemm { // Workaround extra vmovdqa64 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94663 INTGEMM_AVX512VNNI static inline void VNNI8(__m512i &c, __m512i a, __m512i b) { #if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) asm ("vpdpbusds %2, %1, %0" : "+x"(c) : "x"(a), "mx"(b)); #else c = _mm512_dpbusds_epi32(c, a, b); #endif } struct AVX512VNNI_8bit : public AVX512_8bit { template <typename Callback> INTGEMM_AVX512VNNI static void Multiply(const int8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) { typedef __m512i Register; assert(width % sizeof(Register) == 0); assert(B_cols % 8 == 0); assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0); assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); auto callback_impl = callbacks::CallbackImpl<CPUType::AVX2, Callback>(callback); const int simd_width = width / sizeof(Register); Register zeros = setzero_si<Register>(); // Go over 8 columns of B at a time. #pragma omp for for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { const Register *B0_col = reinterpret_cast<const Register*>(B) + B0_colidx * simd_width; // Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once. for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) { // Iterate over shared (inner) dimension. const Register *A_live = reinterpret_cast<const Register *>(A + A_rowidx * width); const Register *A_end = A_live + simd_width; const Register *B_live = B0_col; // TODO: separate first step. Register sum0 = zeros, sum1 = zeros, sum2 = zeros, sum3 = zeros, sum4 = zeros, sum5 = zeros, sum6 = zeros, sum7 = zeros; for (; A_live != A_end; ++A_live, B_live += 8) { Register a = *A_live; // Retrieve the conveniently consecutive values of B. Register b0 = *B_live; Register b1 = *(B_live + 1); Register b2 = *(B_live + 2); Register b3 = *(B_live + 3); Register b4 = *(B_live + 4); Register b5 = *(B_live + 5); Register b6 = *(B_live + 6); Register b7 = *(B_live + 7); // Get a mask where a is negative. __mmask64 neg_mask = _mm512_test_epi8_mask(a, _mm512_set1_epi8(-128)); Register a_positive = _mm512_abs_epi8(a); // Negate by subtracting from zero with a mask. b0 = _mm512_mask_sub_epi8(b0, neg_mask, zeros, b0); b1 = _mm512_mask_sub_epi8(b1, neg_mask, zeros, b1); b2 = _mm512_mask_sub_epi8(b2, neg_mask, zeros, b2); b3 = _mm512_mask_sub_epi8(b3, neg_mask, zeros, b3); b4 = _mm512_mask_sub_epi8(b4, neg_mask, zeros, b4); b5 = _mm512_mask_sub_epi8(b5, neg_mask, zeros, b5); b6 = _mm512_mask_sub_epi8(b6, neg_mask, zeros, b6); b7 = _mm512_mask_sub_epi8(b7, neg_mask, zeros, b7); VNNI8(sum0, a_positive, b0); VNNI8(sum1, a_positive, b1); VNNI8(sum2, a_positive, b2); VNNI8(sum3, a_positive, b3); VNNI8(sum4, a_positive, b4); VNNI8(sum5, a_positive, b5); VNNI8(sum6, a_positive, b6); VNNI8(sum7, a_positive, b7); } Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); auto total = PermuteSummer(pack0123, pack4567); callback_impl(total, callbacks::OutputBufferInfo(A_rowidx, B0_colidx, A_rows, B_cols)); } } } template <typename Callback> INTGEMM_AVX512VNNI static void Multiply8Shift(const uint8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) { typedef __m512i Register; assert(width % sizeof(Register) == 0); assert(B_cols % 8 == 0); assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0); assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); auto callback_impl = callbacks::CallbackImpl<CPUType::AVX2, Callback>(callback); const int simd_width = width / sizeof(Register); Register zeros = setzero_si<Register>(); // Go over 8 columns of B at a time. #pragma omp for for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { const Register *B0_col = reinterpret_cast<const Register*>(B) + B0_colidx * simd_width; // Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once. for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) { // Iterate over shared (inner) dimension. const Register *A_live = reinterpret_cast<const Register *>(A + A_rowidx * width); const Register *A_end = A_live + simd_width; const Register *B_live = B0_col; // TODO: separate first step. Register sum0 = zeros, sum1 = zeros, sum2 = zeros, sum3 = zeros, sum4 = zeros, sum5 = zeros, sum6 = zeros, sum7 = zeros; for (; A_live != A_end; ++A_live, B_live += 8) { Register a = *A_live; //MultiplyAdd VNNI8(sum0, a, *B_live); VNNI8(sum1, a, *(B_live + 1)); VNNI8(sum2, a, *(B_live + 2)); VNNI8(sum3, a, *(B_live + 3)); VNNI8(sum4, a, *(B_live + 4)); VNNI8(sum5, a, *(B_live + 5)); VNNI8(sum6, a, *(B_live + 6)); VNNI8(sum7, a, *(B_live + 7)); } Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); auto total = PermuteSummer(pack0123, pack4567); callback_impl(total, callbacks::OutputBufferInfo(A_rowidx, B0_colidx, A_rows, B_cols)); } } } template <typename Callback> INTGEMM_AVX512VNNI static void PrepareBias(const int8_t *B, Index width, Index B_cols, Callback callback) { typedef __m512i Register; assert(width % sizeof(Register) == 0); assert(B_cols % 8 == 0); assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); auto callback_impl = callbacks::CallbackImpl<CPUType::AVX2, Callback>(callback); const int simd_width = width / sizeof(Register); Register zeros = setzero_si<Register>(); const Register a = set1_epi8<Register>(1); // Go over 8 columns of B at a time. #pragma omp for for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { const Register *B0_col = reinterpret_cast<const Register*>(B) + B0_colidx * simd_width; const Register *B_live = B0_col; //In order to make the code look as much as possible as the above function const Register *B_end = B_live + simd_width*8; // TODO: separate first step. Register sum0 = zeros, sum1 = zeros, sum2 = zeros, sum3 = zeros, sum4 = zeros, sum5 = zeros, sum6 = zeros, sum7 = zeros; for (; B_live != B_end; B_live += 8) { // Retrieve the conveniently consecutive values of B. VNNI8(sum0, a, *B_live); VNNI8(sum1, a, *(B_live + 1)); VNNI8(sum2, a, *(B_live + 2)); VNNI8(sum3, a, *(B_live + 3)); VNNI8(sum4, a, *(B_live + 4)); VNNI8(sum5, a, *(B_live + 5)); VNNI8(sum6, a, *(B_live + 6)); VNNI8(sum7, a, *(B_live + 7)); } Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); auto total = PermuteSummer(pack0123, pack4567); callback_impl(total, callbacks::OutputBufferInfo(0, B0_colidx, 1, B_cols)); } } constexpr static const char *const kName = "8-bit AVX512VNNI"; static const CPUType kUses = CPUType::AVX512VNNI; }; } // namespace intgemm #endif
seidel.base.pluto.par.c
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> double A[N][N+13]; void init_arrays() { int i, j; for (i=0; i<N; i++) for (j=0; j<N; j++) A[i][j] = i*i+j*j; } double rtclock() { struct timezone tzp; struct timeval tp; int stat; gettimeofday (&tp, &tzp); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main() { init_arrays(); double annot_t_start=0, annot_t_end=0, annot_t_total=0; int annot_i; for (annot_i=0; annot_i<REPS; annot_i++) { annot_t_start = rtclock(); #include <math.h> #include <assert.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define S1(zT0,zT1,zT2,t,i,j) {A[i][j]=(A[1+i][1+j]+A[1+i][j]+A[1+i][j-1]+A[i][1+j]+A[i][j]+A[i][j-1]+A[i-1][1+j]+A[i-1][j]+A[i-1][j-1])/9;} int c1, c2, c3, c4, c5, c6; register int lb, ub, lb1, ub1, lb2, ub2; register int lbv, ubv; /* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 0.02s. */ for (c1=-1;c1<=floord(2*T+N-4,32);c1++) { lb1=max(max(0,ceild(16*c1-15,32)),ceild(32*c1-T+1,32)); ub1=min(min(floord(T+N-3,32),floord(32*c1+31,32)),floord(32*c1+N+29,64)); #pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6) for (c2=lb1; c2<=ub1; c2++) { for (c3=max(max(max(max(ceild(64*c2-N-28,32),0),ceild(16*c2-15,16)),ceild(16*c1-15,16)),ceild(64*c1-64*c2-29,32));c3<=min(min(min(min(floord(32*c1-32*c2+N+29,16),floord(T+N-3,16)),floord(32*c2+T+N+28,32)),floord(64*c2+N+59,32)),floord(32*c1+N+60,32));c3++) { for (c4=max(max(max(max(-32*c2+32*c3-N-29,16*c3-N+2),32*c2-N+2),0),32*c1-32*c2);c4<=min(min(min(min(32*c1-32*c2+31,T-1),floord(32*c3+29,2)),32*c2+30),-32*c2+32*c3+30);c4++) { /*@ begin Loop( transform UnrollJam(ufactor=8) for (c5=max(max(32*c2,32*c3-c4-N+2),c4+1);c5<=min(min(c4+N-2,32*c2+31),32*c3-c4+30);c5++) transform Unroll(ufactor=8) for (c6=max(c4+c5+1,32*c3);c6<=min(c4+c5+N-2,32*c3+31);c6++) { S1(c1-c2,-c1+2*c2,-c1+c3,c4,-c4+c5,-c4-c5+c6) ; } ) @*/{ for (c5 = max(max(32 * c2, 32 * c3 - c4 - N + 2), c4 + 1); c5 <= min(min(c4 + N - 2, 32 * c2 + 31), 32 * c3 - c4 + 30) - 7; c5 = c5 + 8) { for (c6 = max(c4 + c5 + 1, 32 * c3); c6 <= min(c4 + c5 + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 7)); } for (; c6 <= min(c4 + c5 + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + c6); for (c6 = max(c4 + (c5 + 1) + 1, 32 * c3); c6 <= min(c4 + (c5 + 1) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 1) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 1), -c4 - (c5 + 1) + c6); for (c6 = max(c4 + (c5 + 2) + 1, 32 * c3); c6 <= min(c4 + (c5 + 2) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 2) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 2), -c4 - (c5 + 2) + c6); for (c6 = max(c4 + (c5 + 3) + 1, 32 * c3); c6 <= min(c4 + (c5 + 3) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 3) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 3), -c4 - (c5 + 3) + c6); for (c6 = max(c4 + (c5 + 4) + 1, 32 * c3); c6 <= min(c4 + (c5 + 4) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 4) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 4), -c4 - (c5 + 4) + c6); for (c6 = max(c4 + (c5 + 5) + 1, 32 * c3); c6 <= min(c4 + (c5 + 5) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 5) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 5), -c4 - (c5 + 5) + c6); for (c6 = max(c4 + (c5 + 6) + 1, 32 * c3); c6 <= min(c4 + (c5 + 6) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 6) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 6), -c4 - (c5 + 6) + c6); for (c6 = max(c4 + (c5 + 7) + 1, 32 * c3); c6 <= min(c4 + (c5 + 7) + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + (c6 + 7)); } for (; c6 <= min(c4 + (c5 + 7) + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + (c5 + 7), -c4 - (c5 + 7) + c6); } for (; c5 <= min(min(c4 + N - 2, 32 * c2 + 31), 32 * c3 - c4 + 30); c5 = c5 + 1) { for (c6 = max(c4 + c5 + 1, 32 * c3); c6 <= min(c4 + c5 + N - 2, 32 * c3 + 31) - 7; c6 = c6 + 8) { S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + c6); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 1)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 2)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 3)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 4)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 5)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 6)); S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + (c6 + 7)); } for (; c6 <= min(c4 + c5 + N - 2, 32 * c3 + 31); c6 = c6 + 1) S1(c1 - c2, -c1 + 2 * c2, -c1 + c3, c4, -c4 + c5, -c4 - c5 + c6); } } /*@ end @*/ } } } } /* End of CLooG code */ annot_t_end = rtclock(); annot_t_total += annot_t_end - annot_t_start; } annot_t_total = annot_t_total / REPS; #ifndef TEST printf("%f\n", annot_t_total); #else { int i, j; for (i=0; i<N; i++) { for (j=0; j<N; j++) { if (j%100==0) printf("\n"); printf("%f ",A[i][j]); } printf("\n"); } } #endif return ((int) A[0][0]); }
GB_unaryop__lnot_int64_int64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_int64_int64 // op(A') function: GB_tran__lnot_int64_int64 // C type: int64_t // A type: int64_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_int64_int64 ( int64_t *Cx, // Cx and Ax may be aliased int64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_int64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
PoissonSolverMixed.h
// // Cubism3D // Copyright (c) 2018 CSE-Lab, ETH Zurich, Switzerland. // Distributed under the terms of the MIT license. // // Created by Guido Novati (novatig@ethz.ch). // #ifndef CubismUP_3D_PoissonSolverMixed_h #define CubismUP_3D_PoissonSolverMixed_h #include "PoissonSolver.h" CubismUP_3D_NAMESPACE_BEGIN class PoissonSolverMixed : public PoissonSolver { void * fwd, * bwd; ptrdiff_t alloc_local=0,local_n0=0,local_0_start=0,local_n1=0,local_1_start=0; const double h = sim.uniformH(); inline bool DFT_X() const { return sim.BCx_flag == periodic; } inline bool DFT_Y() const { return sim.BCy_flag == periodic; } inline bool DFT_Z() const { return sim.BCz_flag == periodic; } protected: template<bool DFTX, bool DFTY, bool DFTZ> void _solve() { // if BC flag == 1 fourier, else cosine transform const Real normX = (DFTX ? 1.0 : 0.5) / gsize[0]; const Real normY = (DFTY ? 1.0 : 0.5) / gsize[1]; const Real normZ = (DFTZ ? 1.0 : 0.5) / gsize[2]; const Real waveFacX = (DFTX ? 2 : 1) * M_PI / gsize[0]; const Real waveFacY = (DFTY ? 2 : 1) * M_PI / gsize[1]; const Real waveFacZ = (DFTZ ? 2 : 1) * M_PI / gsize[2]; // factor 1/h here is becz input to this solver is h^3 * RHS: // (other h^2 goes away from FD coef or wavenumeber coef) const Real norm_factor = (normX / h) * normY * normZ; Real *const in_out = data; const long nKx = static_cast<long>(gsize[0]); const long nKy = static_cast<long>(gsize[1]); const long nKz = static_cast<long>(gsize[2]); const long shifty = static_cast<long>(local_1_start); // BALANCE TWO PROBLEMS: // - if only grid consistent odd DOF and even DOF do not 'talk' to each others // - if only spectral then nont really div free // COMPROMISE: define a tolerance that balances two effects static const Real tol = 0.01; #pragma omp parallel for schedule(static) for(long lj = 0; lj<static_cast<long>(local_n1); ++lj) { const long j = shifty + lj; //memory index plus shift due to decomp const long ky = DFTY ? ((j <= nKy/2) ? j : nKy-j) : j; const Real rky2 = std::pow( (ky + (DFTY? 0 : (Real)0.5)) * waveFacY, 2); const Real denY = (1-tol) * (std::cos(2*waveFacY*j)-1)/2 - tol*rky2; for(long i = 0; i<static_cast<long>(gsize[0]); ++ i) { const long kx = DFTX ? ((i <= nKx/2) ? i : nKx-i) : i; const Real rkx2 = std::pow( (kx + (DFTX? 0 : (Real)0.5)) * waveFacX, 2); const Real denX = (1-tol) * (std::cos(2*waveFacX*i)-1)/2 - tol*rkx2; for(long k = 0; k<static_cast<long>(gsize[2]); ++ k) { const size_t linidx = (lj*gsize[0] +i)*gsize[2] + k; const long kz = DFTZ ? ((k <= nKz/2) ? k : nKz-k) : k; const Real rkz2 = std::pow( (kz + (DFTZ? 0 : (Real)0.5)) * waveFacZ, 2); const Real denZ = (1-tol) * (std::cos(2*waveFacZ*k)-1)/2 - tol*rkz2; in_out[linidx] *= norm_factor/(denX + denY + denZ); } } } //if (shifty==0 && DFTX && DFTY && DFTZ) in_out[0] = 0; if (shifty==0) in_out[0] = 0; } public: PoissonSolverMixed(SimulationData & s); void solve(); ~PoissonSolverMixed(); }; CubismUP_3D_NAMESPACE_END #endif // CubismUP_3D_PoissonSolverMixed_h
ft_ao.c
/* * Fourier transformed AO pair * \int e^{-i Gv \cdot r} i(r) * j(r) dr^3 * * eval_gz, b, gxyz, gs: * - when eval_gz is GTO_Gv_uniform_orth * > b (reciprocal vectors) is diagonal 3x3 matrix * > Gv k-space grids = dot(b.T,gxyz) * > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv]) * > gs[3]: The number of *positive* G-vectors along each direction. * - when eval_gz is GTO_Gv_uniform_nonorth * > b is 3x3 matrix = 2\pi * scipy.linalg.inv(cell.lattice_vectors).T * > Gv k-space grids = dot(b.T,gxyz) * > gxyz[3,nGv] = (kx[:nGv], ky[:nGv], kz[:nGv]) * > gs[3]: The number of *positive* G-vectors along each direction. * - when eval_gz is GTO_Gv_general * only Gv is needed * - when eval_gz is GTO_Gv_nonuniform_orth * > b is the basic G value for each cartesian component * Gx = b[:gs[0]] * Gy = b[gs[0]:gs[0]+gs[1]] * Gz = b[gs[0]+gs[1]:] * > gs[3]: Number of basic G values along each direction. * > gxyz[3,nGv] are used to index the basic G value * > Gv is not used */ /* * */ #include <stdlib.h> #include <string.h> #include <math.h> #include <assert.h> #include <complex.h> #include "config.h" #include "cint.h" #define SQRTPI 1.7724538509055160272981674833411451 #define EXPCUTOFF 100 #define NCTRMAX 72 typedef struct { int *atm; int *bas; double *env; int *shls; int natm; int nbas; int i_l; int j_l; int k_l; int l_l; int nfi; // number of cartesion components int nfj; int nfk; int nfl; int nf; // = nfi*nfj*nfk*nfl; int _padding; int x_ctr[4]; int gbits; int ncomp_e1; // = 1 if spin free, = 4 when spin included, it int ncomp_e2; // corresponds to POSX,POSY,POSZ,POS1, see cint_const.h int ncomp_tensor; // e.g. = 3 for gradients /* values may diff based on the g0_2d4d algorithm */ int li_ceil; // power of x, == i_l if nabla is involved, otherwise == i_l int lj_ceil; int lk_ceil; int ll_ceil; int g_stride_i; // nrys_roots * shift of (i++,k,l,j) int g_stride_k; // nrys_roots * shift of (i,k++,l,j) int g_stride_l; // nrys_roots * shift of (i,k,l++,j) int g_stride_j; // nrys_roots * shift of (i,k,l,j++) int nrys_roots; int g_size; // ref to cint2e.c g = malloc(sizeof(double)*g_size) int g2d_ijmax; int g2d_klmax; double common_factor; double _padding1; double rirj[3]; // diff by sign in different g0_2d4d algorithm double rkrl[3]; double *rx_in_rijrx; double *rx_in_rklrx; double *ri; double *rj; double *rk; double *rl; // Other definitions in CINTEnvVars are different in libcint and qcint. // They should not used in this function. } CINTEnvVars; void CINTg1e_index_xyz(int *idx, const CINTEnvVars *envs); double CINTsquare_dist(const double *r1, const double *r2); double CINTcommon_fac_sp(int l); int CINTinit_int1e_EnvVars(CINTEnvVars *envs, const int *ng, const int *shls, const int *atm, const int natm, const int *bas, const int nbas, const double *env); static void init1e_envs(CINTEnvVars *envs, const int *shls, const int *atm, const int natm, const int *bas, const int nbas, const double *env) { int ng[] = {0, 0, 0, 0, 0, 0, 0, 0}; CINTinit_int1e_EnvVars(envs, ng, shls, atm, natm, bas, nbas, env); int dli, dlj; if (envs->li_ceil < envs->lj_ceil) { dli = envs->li_ceil + 1; dlj = envs->li_ceil + envs->lj_ceil + 1; } else { dli = envs->li_ceil + envs->lj_ceil + 1; dlj = envs->lj_ceil + 1; } envs->g_stride_i = 1; envs->g_stride_j = dli; envs->g_size = dli * dlj; } static const int _LEN_CART[] = { 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, 66, 78, 91, 105, 120, 136 }; static const int _CUM_LEN_CART[] = { 1, 4, 10, 20, 35, 56, 84, 120, 165, 220, 286, 364, 455, 560, 680, 816, }; /* * WHEREX_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if x > 0] * WHEREY_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if y > 0] * WHEREZ_IF_L_INC1 = [xyz2addr(x,y,z) for x,y,z in loopcart(L_MAX) if z > 0] */ static const int _UPIDY[] = { 1, 3, 4, 6, 7, 8, 10, 11, 12, 13, 15, 16, 17, 18, 19, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48, 49, 50, 51, 52, 53, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103, 105,106,107,108,109,110,111,112,113,114,115,116,117,118, 120,121,122,123,124,125,126,127,128,129,130,131,132,133,134, }; static const int _UPIDZ[] = { 2, 4, 5, 7, 8, 9, 11, 12, 13, 14, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33, 34, 35, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93, 94, 95, 96, 97, 98, 99,100,101,102,103,104, 106,107,108,109,110,111,112,113,114,115,116,117,118,119, 121,122,123,124,125,126,127,128,129,130,131,132,133,134,135, }; /* * _DOWN_XYZ, _DOWN_XYZ_ORDER, _DOWN1, _DOWN2 labels the index in the 1D * recursive relation f_{i+1} = i/2a * f_{i-1} + X * f_{i} * _DOWN_XYZ_ORDER i in i/2a * _DOWN2 index of f_{i-1} * _DOWN_XYZ index of X * _DOWN1 index of f_{i} */ static const int _DOWN1[] = { -1, 0, 0, 0, 0, 1, 2, 1, 2, 2, 0, 0, 0, 3, 4, 5, 3, 3, 5, 5, 0, 0, 0, 3, 2, 5, 6, 7, 8, 9, 6, 6, 8, 9, 9, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 11, 12, 13, 14, 10, 10, 12, 13, 14, 14, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 16, 17, 18, 19, 20, 15, 15, 17, 18, 19, 20, 20, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 22, 23, 24, 25, 26, 27, 21, 21, 23, 24, 25, 26, 27, 27, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 29, 30, 31, 32, 33, 34, 35, 28, 28, 30, 31, 32, 33, 34, 35, 35, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 36, 36, 38, 39, 40, 41, 42, 43, 44, 44, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 45, 45, 47, 48, 49, 50, 51, 52, 53, 54, 54, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 55, 55, 57, 58, 59, 60, 61, 62, 63, 64, 65, 65, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 66, 66, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 77, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 78, 78, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 90, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 91, 91, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 104, 0, 0, 0, 3, 2, 5, 6, 3, 5, 9, 10, 6, 12, 9, 14, 15, 10, 17, 18, 14, 20, 21, 15, 23, 24, 25, 20, 27, 28, 21, 30, 31, 32, 33, 27, 35, 36, 28, 38, 39, 40, 41, 42, 35, 44, 45, 36, 47, 48, 49, 50, 51, 52, 44, 54, 55, 45, 57, 58, 59, 60, 61, 62, 63, 54, 65, 66, 55, 68, 69, 70, 71, 72, 73, 74, 75, 65, 77, 78, 66, 80, 81, 82, 83, 84, 85, 86, 87, 88, 77, 90, 91, 78, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 90, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 105, 105, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 119, }; static const int _DOWN2[] = { -1, -1, -1, -1, 0, -1, -1, 0, -1, 0, 0, -1, -1, -1, -1, -1, 1, -1, -1, 2, 0, -1, -1, 3, -1, 5, -1, -1, -1, -1, 3, -1, 5, -1, 5, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, -1, -1, -1, -1, -1, 6, -1, 8, 9, -1, 9, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, -1, -1, -1, -1, -1, -1, 10, -1, 12, 13, 14, -1, 14, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, -1, -1, -1, -1, -1, -1, -1, 15, -1, 17, 18, 19, 20, -1, 20, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, -1, -1, -1, -1, -1, -1, -1, -1, 21, -1, 23, 24, 25, 26, 27, -1, 27, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, 30, 31, 32, 33, 34, 35, -1, 35, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 36, -1, 38, 39, 40, 41, 42, 43, 44, -1, 44, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, 47, 48, 49, 50, 51, 52, 53, 54, -1, 54, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, 57, 58, 59, 60, 61, 62, 63, 64, 65, -1, 65, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, -1, 77, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, -1, 90, 0, -1, -1, 3, -1, 5, 6, -1, -1, 9, 10, -1, 12, -1, 14, 15, -1, 17, 18, -1, 20, 21, -1, 23, 24, 25, -1, 27, 28, -1, 30, 31, 32, 33, -1, 35, 36, -1, 38, 39, 40, 41, 42, -1, 44, 45, -1, 47, 48, 49, 50, 51, 52, -1, 54, 55, -1, 57, 58, 59, 60, 61, 62, 63, -1, 65, 66, -1, 68, 69, 70, 71, 72, 73, 74, 75, -1, 77, 78, -1, 80, 81, 82, 83, 84, 85, 86, 87, 88, -1, 90, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, -1, 104, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 91, -1, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, -1, 104, }; static const int _DOWN_XYZ[] = { 2, 0, 1, 2, 0, 0, 0, 1, 1, 2, 0, 1, 2, 0, 0, 0, 1, 2, 1, 2, 0, 1, 2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 0, 1, 2, 0, 1, 0, 0, 2, 1, 0, 0, 2, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 2, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, }; static const int _DOWN_XYZ_ORDER[] = { 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 2, 0, 0, 0, 0, 0, 2, 0, 0, 2, 3, 0, 0, 1, 0, 1, 0, 0, 0, 0, 3, 0, 1, 0, 3, 4, 0, 0, 2, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 4, 0, 2, 1, 0, 4, 5, 0, 0, 3, 0, 3, 2, 0, 0, 2, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 5, 0, 3, 2, 1, 0, 5, 6, 0, 0, 4, 0, 4, 3, 0, 0, 3, 2, 0, 2, 0, 2, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 6, 0, 4, 3, 2, 1, 0, 6, 7, 0, 0, 5, 0, 5, 4, 0, 0, 4, 3, 0, 3, 0, 3, 2, 0, 2, 2, 0, 2, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 5, 4, 3, 2, 1, 0, 7, 8, 0, 0, 6, 0, 6, 5, 0, 0, 5, 4, 0, 4, 0, 4, 3, 0, 3, 3, 0, 3, 2, 0, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 6, 5, 4, 3, 2, 1, 0, 8, 9, 0, 0, 7, 0, 7, 6, 0, 0, 6, 5, 0, 5, 0, 5, 4, 0, 4, 4, 0, 4, 3, 0, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 7, 6, 5, 4, 3, 2, 1, 0, 9, 10, 0, 0, 8, 0, 8, 7, 0, 0, 7, 6, 0, 6, 0, 6, 5, 0, 5, 5, 0, 5, 4, 0, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 8, 7, 6, 5, 4, 3, 2, 1, 0, 10, 11, 0, 0, 9, 0, 9, 8, 0, 0, 8, 7, 0, 7, 0, 7, 6, 0, 6, 6, 0, 6, 5, 0, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11, 0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 11, 12, 0, 0, 10, 0, 10, 9, 0, 0, 9, 8, 0, 8, 0, 8, 7, 0, 7, 7, 0, 7, 6, 0, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 12, 13, 0, 0, 11, 0, 11, 10, 0, 0, 10, 9, 0, 9, 0, 9, 8, 0, 8, 8, 0, 8, 7, 0, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13, 0, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 13, 14, 0, 0, 12, 0, 12, 11, 0, 0, 11, 10, 0, 10, 0, 10, 9, 0, 9, 9, 0, 9, 8, 0, 8, 8, 8, 0, 8, 7, 0, 7, 7, 7, 7, 0, 7, 6, 0, 6, 6, 6, 6, 6, 0, 6, 5, 0, 5, 5, 5, 5, 5, 5, 0, 5, 4, 0, 4, 4, 4, 4, 4, 4, 4, 0, 4, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 0, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 14, }; #define WHEREX_IF_L_INC1(i) i #define WHEREY_IF_L_INC1(i) _UPIDY[i] #define WHEREZ_IF_L_INC1(i) _UPIDZ[i] #define STARTX_IF_L_DEC1(i) 0 #define STARTY_IF_L_DEC1(i) ((i<2)?0:_LEN_CART[i-2]) #define STARTZ_IF_L_DEC1(i) (_LEN_CART[i-1]-1) #define ADDR_IF_L_DEC1(l,m) _DOWN1[_CUM_LEN_CART[l-1]+m] #define ADDR_IF_L_DEC2(l,m) _DOWN2[_CUM_LEN_CART[l-1]+m] #define DEC1_XYZ(l,m) _DOWN_XYZ[_CUM_LEN_CART[l-1]+m] #define DEC1_XYZ_ORDER(l,m) _DOWN_XYZ_ORDER[_CUM_LEN_CART[l-1]+m] static int vrr1d(double complex *g, double *rijri, double aij, double *Gv, int topl, int nGv) { int cumxyz = 1; if (topl == 0) { return cumxyz; } double *kx = Gv; double *ky = kx + nGv; double *kz = ky + nGv; int i, n, m, l; double a2; double complex *p0, *p1, *p2, *dec1, *dec2; double *ka2 = malloc(sizeof(double) * nGv*3); double *kxa2 = ka2; double *kya2 = kxa2 + nGv; double *kza2 = kya2 + nGv; a2 = .5 / aij; for (n = 0; n < nGv; n++) { kxa2[n] = kx[n] * a2; kya2[n] = ky[n] * a2; kza2[n] = kz[n] * a2; } p0 = g + nGv; for (n = 0; n < nGv; n++) { p0[ n] = (rijri[0] - kxa2[n]*_Complex_I) * g[n]; p0[nGv +n] = (rijri[1] - kya2[n]*_Complex_I) * g[n]; p0[nGv*2+n] = (rijri[2] - kza2[n]*_Complex_I) * g[n]; } cumxyz += 3; for (l = 1; l < topl; l++) { p0 = g + cumxyz * nGv; dec1 = p0 - _LEN_CART[l ] * nGv; dec2 = dec1 - _LEN_CART[l-1] * nGv; for (i = 0; i < _LEN_CART[l+1]; i++) { m = DEC1_XYZ(l+1,i); kxa2 = ka2 + m * nGv; a2 = .5/aij * DEC1_XYZ_ORDER(l+1,i); p1 = dec1 + ADDR_IF_L_DEC1(l+1,i) * nGv; p2 = dec2 + ADDR_IF_L_DEC2(l+1,i) * nGv; if (ADDR_IF_L_DEC2(l+1,i) < 0) { for (n = 0; n < nGv; n++) { p0[n] = (rijri[m]-kxa2[n]*_Complex_I)*p1[n]; } } else { for (n = 0; n < nGv; n++) { p0[n] = a2*p2[n] + (rijri[m]-kxa2[n]*_Complex_I)*p1[n]; } } p0 += nGv; } cumxyz += _LEN_CART[l+1]; } free(ka2); return cumxyz; } /* * if li = 3, lj = 1 * (10 + X*00 -> 01): * gs + X*fs -> fp */ static void vrr2d_ket_inc1(double complex *out, const double complex *g, double *rirj, int li, int lj, int nGv) { if (lj == 0) { memcpy(out, g, sizeof(double complex)*_LEN_CART[li]*nGv); return; } const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double complex *g00 = g; const double complex *g10 = g + row_00*col_00*nGv; int i, j, n; const double complex *p00, *p10; double complex *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * nGv; p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * nGv; for (n = 0; n < nGv; n++) { p01[n] = p10[n] + rirj[0] * p00[n]; } p01 += nGv; } } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * nGv; p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * nGv; for (n = 0; n < nGv; n++) { p01[n] = p10[n] + rirj[1] * p00[n]; } p01 += nGv; } } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * nGv; p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * nGv; for (n = 0; n < nGv; n++) { p01[n] = p10[n] + rirj[2] * p00[n]; } p01 += nGv; } } } /* * transpose i, j when store in out */ static void vrr2d_inc1_swapij(double complex *out, const double complex *g, double *rirj, int li, int lj, int nGv) { if (lj == 0) { memcpy(out, g, sizeof(double complex)*_LEN_CART[li]*nGv); return; } const int row_01 = _LEN_CART[lj]; const int row_10 = _LEN_CART[li+1]; const int row_00 = _LEN_CART[li ]; const int col_00 = _LEN_CART[lj-1]; const double complex *g00 = g; const double complex *g10 = g + row_00*col_00*nGv; int i, j, n; const double complex *p00, *p10; double complex *p01 = out; for (j = STARTX_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * nGv; p10 = g10 + (j*row_10+WHEREX_IF_L_INC1(i)) * nGv; p01 = out + i*row_01 * nGv; for (n = 0; n < nGv; n++) { p01[n] = p10[n] + rirj[0] * p00[n]; } } out += nGv; } for (j = STARTY_IF_L_DEC1(lj); j < _LEN_CART[lj-1]; j++) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * nGv; p10 = g10 + (j*row_10+WHEREY_IF_L_INC1(i)) * nGv; p01 = out + i*row_01 * nGv; for (n = 0; n < nGv; n++) { p01[n] = p10[n] + rirj[1] * p00[n]; } } out += nGv; } j = STARTZ_IF_L_DEC1(lj); if (j < _LEN_CART[lj-1]) { for (i = 0; i < row_00; i++) { p00 = g00 + (j*row_00+i) * nGv; p10 = g10 + (j*row_10+WHEREZ_IF_L_INC1(i)) * nGv; p01 = out + i*row_01 * nGv; for (n = 0; n < nGv; n++) { p01[n] = p10[n] + rirj[2] * p00[n]; } } } } /* (li+lj,0) => (li,lj) */ static void vrr2d(double complex *out, double complex *g, double complex *gbuf2, CINTEnvVars *envs, int nGv) { const int li = envs->li_ceil; const int lj = envs->lj_ceil; const int nmax = li + lj; const double *ri = envs->ri; const double *rj = envs->rj; double complex *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rirj[3]; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; g00 = gbuf2; g01 = g; for (j = 1; j < lj; j++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (i = li; i <= nmax-j; i++) { vrr2d_ket_inc1(pg01, pg00, rirj, i, j, nGv); row_01 = _LEN_CART[i]; col_01 = _LEN_CART[j]; row_00 = _LEN_CART[i ]; col_00 = _LEN_CART[j-1]; pg00 += row_00*col_00 * nGv; pg01 += row_01*col_01 * nGv; } } vrr2d_ket_inc1(out, g01, rirj, li, lj, nGv); } /* (0,li+lj) => (li,lj) */ static void hrr2d(double complex *out, double complex *g, double complex *gbuf2, CINTEnvVars *envs, int nGv) { const int li = envs->li_ceil; const int lj = envs->lj_ceil; const int nmax = li + lj; const double *ri = envs->ri; const double *rj = envs->rj; double complex *g00, *g01, *gswap, *pg00, *pg01; int row_01, col_01, row_00, col_00; int i, j; double rjri[3]; rjri[0] = rj[0] - ri[0]; rjri[1] = rj[1] - ri[1]; rjri[2] = rj[2] - ri[2]; g00 = gbuf2; g01 = g; for (i = 1; i < li; i++) { gswap = g00; g00 = g01; g01 = gswap; pg00 = g00; pg01 = g01; for (j = lj; j <= nmax-i; j++) { vrr2d_ket_inc1(pg01, pg00, rjri, j, i, nGv); row_01 = _LEN_CART[j]; col_01 = _LEN_CART[i]; row_00 = _LEN_CART[j ]; col_00 = _LEN_CART[i-1]; pg00 += row_00*col_00 * nGv; pg01 += row_01*col_01 * nGv; } } vrr2d_inc1_swapij(out, g01, rjri, lj, li, nGv); } /* * Recursive relation */ static void aopair_rr_igtj_early(double complex *g, double ai, double aj, CINTEnvVars *envs, void (*eval_gz)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv) { const int topl = envs->li_ceil + envs->lj_ceil; const double aij = ai + aj; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rijri[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijri[0] = rij[0] - ri[0]; rijri[1] = rij[1] - ri[1]; rijri[2] = rij[2] - ri[2]; (*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, nGv); vrr1d(g, rijri, aij, Gv, topl, nGv); } static void aopair_rr_iltj_early(double complex *g, double ai, double aj, CINTEnvVars *envs, void (*eval_gz)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv) { const int topl = envs->li_ceil + envs->lj_ceil; const double aij = ai + aj; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rijrj[3]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijrj[0] = rij[0] - rj[0]; rijrj[1] = rij[1] - rj[1]; rijrj[2] = rij[2] - rj[2]; (*eval_gz)(g, aij, rij, fac, Gv, b, gxyz, gs, nGv); vrr1d(g, rijrj, aij, Gv, topl, nGv); } static void aopair_rr_igtj_lazy(double complex *g, double ai, double aj, CINTEnvVars *envs, void (*eval_gz)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv) { const int nmax = envs->li_ceil + envs->lj_ceil; const int lj = envs->lj_ceil; const int dj = envs->g_stride_j; const size_t NGv = nGv; const double aij = ai + aj; const double a2 = .5 / aij; const double *ri = envs->ri; const double *rj = envs->rj; double rij[3], rirj[3], rijri[3]; double complex *gx = g; double complex *gy = gx + envs->g_size * NGv; double complex *gz = gy + envs->g_size * NGv; double *kx = Gv; double *ky = kx + nGv; double *kz = ky + nGv; size_t off0, off1, off2; int i, j, n, ptr; double ia2; rirj[0] = ri[0] - rj[0]; rirj[1] = ri[1] - rj[1]; rirj[2] = ri[2] - rj[2]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijri[0] = rij[0] - ri[0]; rijri[1] = rij[1] - ri[1]; rijri[2] = rij[2] - ri[2]; for (n = 0; n < nGv; n++) { gx[n] = 1; gy[n] = 1; } (*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, nGv); if (nmax > 0) { for (n = 0; n < nGv; n++) { if (gz[n] != 0) { gx[nGv+n] = (rijri[0] - kx[n]*a2*_Complex_I) * gx[n]; gy[nGv+n] = (rijri[1] - ky[n]*a2*_Complex_I) * gy[n]; gz[nGv+n] = (rijri[2] - kz[n]*a2*_Complex_I) * gz[n]; } } } for (i = 1; i < nmax; i++) { off0 = (i-1) * nGv; off1 = i * nGv; off2 = (i+1) * nGv; ia2 = i * a2; for (n = 0; n < nGv; n++) { if (gz[n] != 0) { gx[off2+n] = ia2 * gx[off0+n] + (rijri[0] - kx[n]*a2*_Complex_I) * gx[off1+n]; gy[off2+n] = ia2 * gy[off0+n] + (rijri[1] - ky[n]*a2*_Complex_I) * gy[off1+n]; gz[off2+n] = ia2 * gz[off0+n] + (rijri[2] - kz[n]*a2*_Complex_I) * gz[off1+n]; } } } for (j = 1; j <= lj; j++) { ptr = dj * j; for (i = ptr; i <= ptr + nmax - j; i++) { off0 = i * NGv - dj * NGv; // [i, j-1] off1 = (i+1) * NGv - dj * NGv; // [i+1,j-1] off2 = i * NGv; // [i, j ] for (n = 0; n < nGv; n++) { if (gz[n] != 0) { gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n]; gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n]; gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n]; } } } } } static void aopair_rr_iltj_lazy(double complex *g, double ai, double aj, CINTEnvVars *envs, void (*eval_gz)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv) { const int nmax = envs->li_ceil + envs->lj_ceil; const int li = envs->li_ceil; const int dj = envs->g_stride_j; const double aij = ai + aj; const double a2 = .5 / aij; const double *ri = envs->ri; const double *rj = envs->rj; const size_t NGv = nGv; double rij[3], rirj[3], rijrj[3]; double complex *gx = g; double complex *gy = gx + envs->g_size * NGv; double complex *gz = gy + envs->g_size * NGv; double *kx = Gv; double *ky = kx + nGv; double *kz = ky + nGv; size_t off0, off1, off2; int i, j, n; double ia2; rirj[0] = rj[0] - ri[0]; rirj[1] = rj[1] - ri[1]; rirj[2] = rj[2] - ri[2]; rij[0] = (ai * ri[0] + aj * rj[0]) / aij; rij[1] = (ai * ri[1] + aj * rj[1]) / aij; rij[2] = (ai * ri[2] + aj * rj[2]) / aij; rijrj[0] = rij[0] - rj[0]; rijrj[1] = rij[1] - rj[1]; rijrj[2] = rij[2] - rj[2]; for (n = 0; n < nGv; n++) { gx[n] = 1; gy[n] = 1; } (*eval_gz)(gz, aij, rij, fac, Gv, b, gxyz, gs, nGv); if (nmax > 0) { off0 = dj * nGv; for (n = 0; n < nGv; n++) { if (gz[n] != 0) { gx[off0+n] = (rijrj[0] - kx[n]*a2*_Complex_I) * gx[n]; gy[off0+n] = (rijrj[1] - ky[n]*a2*_Complex_I) * gy[n]; gz[off0+n] = (rijrj[2] - kz[n]*a2*_Complex_I) * gz[n]; } } } for (i = 1; i < nmax; i++) { off0 = (i-1) * dj * NGv; off1 = i * dj * NGv; off2 = (i+1) * dj * NGv; ia2 = i * a2; for (n = 0; n < nGv; n++) { if (gz[n] != 0) { gx[off2+n] = ia2 * gx[off0+n] + (rijrj[0] - kx[n]*a2*_Complex_I) * gx[off1+n]; gy[off2+n] = ia2 * gy[off0+n] + (rijrj[1] - ky[n]*a2*_Complex_I) * gy[off1+n]; gz[off2+n] = ia2 * gz[off0+n] + (rijrj[2] - kz[n]*a2*_Complex_I) * gz[off1+n]; } } } for (i = 1; i <= li; i++) { for (j = 0; j <= nmax - i; j++) { off0 = (i-1) * NGv + j * dj * NGv; // [i-1,j ] off1 = (i-1) * NGv + (j+1) * dj * NGv; // [i-1,j+1] off2 = i * NGv + j * dj * NGv; // [i ,j ] for (n = 0; n < nGv; n++) { if (gz[n] != 0) { gx[off2+n] = gx[off1+n] + rirj[0] * gx[off0+n]; gy[off2+n] = gy[off1+n] + rirj[1] * gy[off0+n]; gz[off2+n] = gz[off1+n] + rirj[2] * gz[off0+n]; } } } } } static void inner_prod(double complex *g, double complex *gout, const int *idx, const CINTEnvVars *envs, int nGv, int empty) { const size_t NGv = nGv; int ix, iy, iz, n, k; double complex *gz = g + envs->g_size * NGv * 2; if (empty) { for (n = 0; n < envs->nf; n++) { ix = idx[n*3+0]; iy = idx[n*3+1]; iz = idx[n*3+2]; for (k = 0; k < NGv; k++) { if (gz[k] != 0) { gout[n*NGv+k] = g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k]; } else { gout[n*NGv+k] = 0; } } } } else { for (n = 0; n < envs->nf; n++) { ix = idx[n*3+0]; iy = idx[n*3+1]; iz = idx[n*3+2]; for (k = 0; k < NGv; k++) { if (gz[k] != 0) { gout[n*NGv+k] += g[ix*NGv+k] * g[iy*NGv+k] * g[iz*NGv+k]; } } } } } static void prim_to_ctr(double complex *gc, const size_t nf, const double complex *gp, const int nprim, const int nctr, const double *coeff, int empty) { double complex *pgc = gc; size_t n, i; double c; if (empty) { for (n = 0; n < nctr; n++) { c = coeff[nprim*n]; for (i = 0; i < nf; i++) { pgc[i] = gp[i] * c; } pgc += nf; } } else { for (n = 0; n < nctr; n++) { c = coeff[nprim*n]; if (c != 0) { for (i = 0; i < nf; i++) { pgc[i] += gp[i] * c; } } pgc += nf; } } } static const int _GBUFSIZE[] = { 1, 4, 10, 10, 20, 48, 20, 35, 75, 150, 35, 56, 108, 216, 384, 56, 84, 147, 294, 510, 850, 84, 120, 192, 384, 654, 1090, 1640, 120, 165, 243, 486, 816, 1360, 2040, 3030 }; #define bufsize(i,j) _GBUFSIZE[((i>=j) ? (i*(i+1)/2+j) : (j*(j+1)/2+i))] int GTO_aopair_early_contract(double complex *out, CINTEnvVars *envs, void (*eval_gz)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv) { const int *shls = envs->shls; const int *bas = envs->bas; const double *env = envs->env; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); const int nf = envs->nf; const double *ri = envs->ri; const double *rj = envs->rj; const double *ai = env + bas(PTR_EXP, i_sh); const double *aj = env + bas(PTR_EXP, j_sh); const double *ci = env + bas(PTR_COEFF, i_sh); const double *cj = env + bas(PTR_COEFF, j_sh); double fac1i, fac1j; double aij, dij, eij; int ip, jp, n; int empty[2] = {1, 1}; int *jempty = empty + 0; int *iempty = empty + 1; const size_t NGv = nGv; const size_t len1 = bufsize(i_l,j_l) * NGv; const size_t leni = len1 * i_ctr; const size_t lenj = len1 * i_ctr * j_ctr; double complex *gctrj = malloc(sizeof(double complex)*(lenj+leni+len1)); double complex *g = gctrj + lenj; double complex *gctri, *g1d; if (j_ctr == 1) { gctri = gctrj; iempty = jempty; } else { gctri = g; g += leni; } g1d = g; void (*aopair_rr)(); int offset_g1d; if (i_l >= j_l) { aopair_rr = aopair_rr_igtj_early; offset_g1d = _CUM_LEN_CART[i_l] - _LEN_CART[i_l]; } else { aopair_rr = aopair_rr_iltj_early; offset_g1d = _CUM_LEN_CART[j_l] - _LEN_CART[j_l]; } int len_g1d = _CUM_LEN_CART[i_l+j_l] - offset_g1d; double rrij = CINTsquare_dist(ri, rj); double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l); *jempty = 1; for (jp = 0; jp < j_prim; jp++) { if (j_ctr == 1) { fac1j = fac1 * cj[jp]; } else { fac1j = fac1; *iempty = 1; } for (ip = 0; ip < i_prim; ip++) { aij = ai[ip] + aj[jp]; eij = (ai[ip] * aj[jp] / aij) * rrij; if (eij > EXPCUTOFF) { continue; } dij = exp(-eij) / (aij * sqrt(aij)); fac1i = fac1j * dij; (*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz, fac*fac1i, Gv, b, gxyz, gs, nGv); prim_to_ctr(gctri, len_g1d*NGv, g1d+offset_g1d*NGv, i_prim, i_ctr, ci+ip, *iempty); *iempty = 0; } if (!*iempty) { if (j_ctr > 1) { prim_to_ctr(gctrj, i_ctr*len_g1d*NGv, gctri, j_prim,j_ctr, cj+jp, *jempty); } *jempty = 0; } } if (!*jempty) { g1d = gctrj; for (n = 0; n < i_ctr*j_ctr; n++) { if (i_l >= j_l) { vrr2d(out+n*nf*NGv, g1d, gctrj+lenj, envs, nGv); } else { hrr2d(out+n*nf*NGv, g1d, gctrj+lenj, envs, nGv); } g1d += len_g1d * NGv; } } free(gctrj); return !*jempty; } int GTO_aopair_lazy_contract(double complex *gctr, CINTEnvVars *envs, void (*eval_gz)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs,int nGv) { const int *shls = envs->shls; const int *bas = envs->bas; const double *env = envs->env; const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); const int nf = envs->nf; const double *ri = envs->ri; const double *rj = envs->rj; const double *ai = env + bas(PTR_EXP, i_sh); const double *aj = env + bas(PTR_EXP, j_sh); const double *ci = env + bas(PTR_COEFF, i_sh); const double *cj = env + bas(PTR_COEFF, j_sh); double fac1i, fac1j; double aij, dij, eij; int ip, jp; int empty[3] = {1, 1, 1}; int *jempty = empty + 0; int *iempty = empty + 1; int *gempty = empty + 2; const size_t NGv = nGv; const int len1 = envs->g_size * 3 * NGv; const int leng = nf * NGv; const int leni = nf * i_ctr * NGv; double complex *g = malloc(sizeof(double complex) * (len1+leng+leni)); double complex *g1 = g + len1; double complex *gout, *gctri; if (j_ctr == 1) { gctri = gctr; iempty = jempty; } else { gctri = g1; g1 += leni; } if (i_ctr == 1) { gout = gctri; gempty = iempty; } else { gout = g1; } void (*aopair_rr)(); if (i_l >= j_l) { aopair_rr = aopair_rr_igtj_lazy; } else { aopair_rr = aopair_rr_iltj_lazy; } int *idx = malloc(sizeof(int) * nf * 3); CINTg1e_index_xyz(idx, envs); double rrij = CINTsquare_dist(ri, rj); double fac1 = SQRTPI * M_PI * CINTcommon_fac_sp(i_l) * CINTcommon_fac_sp(j_l); *jempty = 1; for (jp = 0; jp < j_prim; jp++) { if (j_ctr == 1) { fac1j = fac1 * cj[jp]; } else { fac1j = fac1; *iempty = 1; } for (ip = 0; ip < i_prim; ip++) { aij = ai[ip] + aj[jp]; eij = (ai[ip] * aj[jp] / aij) * rrij; if (eij > EXPCUTOFF) { continue; } dij = exp(-eij) / (aij * sqrt(aij)); if (i_ctr == 1) { fac1i = fac1j * dij * ci[ip]; } else { fac1i = fac1j * dij; } (*aopair_rr)(g, ai[ip], aj[jp], envs, eval_gz, fac*fac1i, Gv, b, gxyz, gs, nGv); inner_prod(g, gout, idx, envs, nGv, *gempty); if (i_ctr > 1) { prim_to_ctr(gctri, nf*NGv, gout, i_prim, i_ctr, ci+ip, *iempty); } *iempty = 0; } if (!*iempty) { if (j_ctr > 1) { prim_to_ctr(gctr, i_ctr*nf*NGv, gctri, j_prim, j_ctr, cj+jp, *jempty); } *jempty = 0; } } free(g); free(idx); return !*jempty; } void GTO_Gv_general(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv) { double *kx = Gv; double *ky = kx + nGv; double *kz = ky + nGv; const double cutoff = EXPCUTOFF * aij * 4; int n; double kR, kk; for (n = 0; n < nGv; n++) { kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n]; if (kk < cutoff) { kR = kx[n] * rij[0] + ky[n] * rij[1] + kz[n] * rij[2]; out[n] = exp(-.25*kk/aij) * fac * (cos(kR) - sin(kR)*_Complex_I); } else { out[n] = 0; } } } /* * Gv = dot(b.T,gxyz) + kpt * kk = dot(Gv, Gv) * kr = dot(rij, Gv) = dot(rij,b.T, gxyz) + dot(rij,kpt) = dot(br, gxyz) + dot(rij,kpt) * out = fac * exp(-.25 * kk / aij) * (cos(kr) - sin(kr) * _Complex_I); * * b: the first 9 elements are 2\pi*inv(a^T), then 3 elements for k_{ij}, * followed by 3*nGv floats for Gbase */ void GTO_Gv_orth(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv) { const int nx = gs[0]; const int ny = gs[1]; const int nz = gs[2]; double br[3]; // dot(rij, b) br[0] = rij[0] * b[0]; br[1] = rij[1] * b[4]; br[2] = rij[2] * b[8]; double *kpt = b + 9; double kr[3]; kr[0] = rij[0] * kpt[0]; kr[1] = rij[1] * kpt[1]; kr[2] = rij[2] * kpt[2]; double *Gxbase = b + 12; double *Gybase = Gxbase + nx; double *Gzbase = Gybase + ny; double *kx = Gv; double *ky = kx + nGv; double *kz = ky + nGv; double complex zbuf[nx+ny+nz]; double complex *csx = zbuf; double complex *csy = csx + nx; double complex *csz = csy + ny; double kkpool[nx+ny+nz]; double *kkx = kkpool; double *kky = kkx + nx; double *kkz = kky + ny; int *gx = gxyz; int *gy = gx + nGv; int *gz = gy + nGv; const double cutoff = EXPCUTOFF * aij * 4; int n, ix, iy, iz; double Gr, kk; for (n = 0; n < nx+ny+nz; n++) { kkpool[n] = -1; } for (n = 0; n < nGv; n++) { ix = gx[n]; iy = gy[n]; iz = gz[n]; if (kkx[ix] < 0) { Gr = Gxbase[ix] * br[0] + kr[0]; kkx[ix] = .25 * kx[n]*kx[n] / aij; csx[ix] = exp(-kkx[ix]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kky[iy] < 0) { Gr = Gybase[iy] * br[1] + kr[1]; kky[iy] = .25 * ky[n]*ky[n] / aij; csy[iy] = exp(-kky[iy]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kkz[iz] < 0) { Gr = Gzbase[iz] * br[2] + kr[2]; kkz[iz] = .25 * kz[n]*kz[n] / aij; csz[iz] = fac * exp(-kkz[iz]) * (cos(Gr)-sin(Gr)*_Complex_I); } if (kkx[ix] + kky[iy] + kkz[iz] < cutoff) { out[n] = csx[ix] * csy[iy] * csz[iz]; } else { out[n] = 0; } } } void GTO_Gv_nonorth(double complex *out, double aij, double *rij, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv) { const int nx = gs[0]; const int ny = gs[1]; const int nz = gs[2]; double br[3]; // dot(rij, b) br[0] = rij[0] * b[0]; br[0] += rij[1] * b[1]; br[0] += rij[2] * b[2]; br[1] = rij[0] * b[3]; br[1] += rij[1] * b[4]; br[1] += rij[2] * b[5]; br[2] = rij[0] * b[6]; br[2] += rij[1] * b[7]; br[2] += rij[2] * b[8]; double *kpt = b + 9; double kr[3]; kr[0] = rij[0] * kpt[0]; kr[1] = rij[1] * kpt[1]; kr[2] = rij[2] * kpt[2]; double *Gxbase = b + 12; double *Gybase = Gxbase + nx; double *Gzbase = Gybase + ny; double *kx = Gv; double *ky = kx + nGv; double *kz = ky + nGv; double complex zbuf[nx+ny+nz]; double complex *csx = zbuf; double complex *csy = csx + nx; double complex *csz = csy + ny; char empty[nx+ny+nz]; char *xempty = empty; char *yempty = xempty + nx; char *zempty = yempty + ny; memset(empty, 1, sizeof(char)*(nx+ny+nz)); int *gx = gxyz; int *gy = gx + nGv; int *gz = gy + nGv; const double cutoff = EXPCUTOFF * aij * 4; int n, ix, iy, iz; double Gr, kk; for (n = 0; n < nGv; n++) { ix = gx[n]; iy = gy[n]; iz = gz[n]; kk = kx[n] * kx[n] + ky[n] * ky[n] + kz[n] * kz[n]; if (kk < cutoff) { ix = gx[n]; iy = gy[n]; iz = gz[n]; if (xempty[ix]) { Gr = Gxbase[ix] * br[0] + kr[0]; csx[ix] = cos(Gr)-sin(Gr)*_Complex_I; xempty[ix] = 0; } if (yempty[iy]) { Gr = Gybase[iy] * br[1] + kr[1]; csy[iy] = cos(Gr)-sin(Gr)*_Complex_I; yempty[iy] = 0; } if (zempty[iz]) { Gr = Gzbase[iz] * br[2] + kr[2]; csz[iz] = fac * (cos(Gr)-sin(Gr)*_Complex_I); zempty[iz] = 0; } out[n] = exp(-.25*kk/aij) * csx[ix]*csy[iy]*csz[iz]; } else { out[n] = 0; } } } static void zcopy_ij(double complex *out, const double complex *gctr, const int mi, const int mj, const int ni, const int nGv) { const size_t NGv = nGv; int i, j, k; for (j = 0; j < mj; j++) { for (i = 0; i < mi; i++) { for (k = 0; k < NGv; k++) { out[i*NGv+k] = gctr[i*NGv+k]; } } out += ni * NGv; gctr += mi * NGv; } } static void aopair_c2s_cart(double complex *out, double complex *gctr, CINTEnvVars *envs, int *dims, int nGv) { const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int nfi = envs->nfi; const int nfj = envs->nfj; const int ni = nfi*i_ctr; const int nj = nfj*j_ctr; const int nf = envs->nf; const size_t NGv = nGv; int ic, jc; double complex *pout; for (jc = 0; jc < nj; jc += nfj) { for (ic = 0; ic < ni; ic += nfi) { pout = out + (dims[0] * jc + ic) * NGv; zcopy_ij(pout, gctr, nfi, nfj, dims[0], nGv); gctr += nf * NGv; } } } #define C2S(sph, nket, cart, l) \ (double complex *)CINTc2s_ket_sph((double *)(sph), nket, (double *)(cart), l) #define OF_CMPLX 2 static void aopair_c2s_sph(double complex *out, double complex *gctr, CINTEnvVars *envs, int *dims, int nGv) { const int i_l = envs->i_l; const int j_l = envs->j_l; const int i_ctr = envs->x_ctr[0]; const int j_ctr = envs->x_ctr[1]; const int di = i_l * 2 + 1; const int dj = j_l * 2 + 1; const int ni = di*i_ctr; const int nj = dj*j_ctr; const int nfi = envs->nfi; const int nf = envs->nf; int ic, jc, k; const size_t NGv = nGv; const int buflen = nfi*dj; double complex *buf1 = malloc(sizeof(double complex) * buflen*2 * NGv); double complex *buf2 = buf1 + buflen * NGv; double complex *pout, *pij, *buf; for (jc = 0; jc < nj; jc += dj) { for (ic = 0; ic < ni; ic += di) { buf = C2S(buf1, nfi*nGv*OF_CMPLX, gctr, j_l); pij = C2S(buf2, nGv*OF_CMPLX, buf, i_l); for (k = nGv; k < dj*nGv; k+=nGv) { pout = C2S(buf2+k*di, nGv*OF_CMPLX, buf+k*nfi, i_l); } pout = out + (dims[0] * jc + ic) * nGv; zcopy_ij(pout, pij, di, dj, dims[0], nGv); gctr += nf * NGv; } } free(buf1); } /************************************************* * * eval_aopair is one of GTO_aopair_early_contract, * GTO_aopair_lazy_contract * * eval_gz is one of GTO_Gv_general, GTO_Gv_uniform_orth, * GTO_Gv_uniform_nonorth, GTO_Gv_nonuniform_orth * *************************************************/ int GTO_ft_ovlp_cart(double complex *out, int *shls, int *dims, int (*eval_aopair)(), void (*eval_gz)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { CINTEnvVars envs; init1e_envs(&envs, shls, atm, natm, bas, nbas, env); const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_ctr = bas(NCTR_OF, i_sh); const int j_ctr = bas(NCTR_OF, j_sh); const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); size_t ntot = envs.nf * i_ctr * j_ctr * (size_t)nGv; double complex *gctr = malloc(sizeof(double complex) * ntot); if (eval_gz == NULL) { eval_gz = GTO_Gv_general; } if (eval_gz != GTO_Gv_general) { assert(gxyz != NULL); } if (eval_aopair == NULL) { if (i_prim*j_prim < i_ctr*j_ctr*3) { eval_aopair = GTO_aopair_lazy_contract; } else { eval_aopair = GTO_aopair_early_contract; } } int has_value = (*eval_aopair)(gctr, &envs, eval_gz, fac, Gv, b, gxyz, gs, nGv); if (has_value) { aopair_c2s_cart(out, gctr, &envs, dims, nGv); } free(gctr); return has_value; } int GTO_ft_ovlp_sph(double complex *out, int *shls, int *dims, int (*eval_aopair)(), void (*eval_gz)(), double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { CINTEnvVars envs; init1e_envs(&envs, shls, atm, natm, bas, nbas, env); const int i_sh = shls[0]; const int j_sh = shls[1]; const int i_ctr = bas(NCTR_OF, i_sh); const int j_ctr = bas(NCTR_OF, j_sh); const int i_prim = bas(NPRIM_OF, i_sh); const int j_prim = bas(NPRIM_OF, j_sh); size_t ntot = envs.nf * i_ctr * j_ctr * (size_t)nGv; double complex *gctr = malloc(sizeof(double complex) * ntot); if (eval_gz == NULL) { eval_gz = GTO_Gv_general; } if (eval_gz != GTO_Gv_general) { assert(gxyz != NULL); } if (eval_aopair == NULL) { if (i_prim*j_prim < i_ctr*j_ctr*3) { eval_aopair = GTO_aopair_lazy_contract; } else { eval_aopair = GTO_aopair_early_contract; } } int has_value = (*eval_aopair)(gctr, &envs, eval_gz, fac, Gv, b, gxyz, gs, nGv); if (has_value) { aopair_c2s_sph(out, gctr, &envs, dims, nGv); } free(gctr); return has_value; } /************************************************* * *************************************************/ static void zcopy_s2_igtj(double complex *out, double complex *in, int nGv, int ip, int di, int dj) { const size_t ip1 = ip + 1; const size_t NGv = nGv; int i, j, n; double complex *pin; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + NGv * (j*di+i); for (n = 0; n < NGv; n++) { out[j*NGv+n] = pin[n]; } } out += (ip1 + i) * NGv; } } static void zcopy_s2_ieqj(double complex *out, double complex *in, int nGv, int ip, int di, int dj) { const size_t ip1 = ip + 1; const size_t NGv = nGv; int i, j, n; double complex *pin; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pin = in + NGv * (j*di+i); for (n = 0; n < NGv; n++) { out[j*NGv+n] = pin[n]; } } out += (ip1 + i) * NGv; } } void GTO_ft_fill_s1(int (*intor)(), void (*eval_gz)(), double complex *mat, int ish, int jsh, double *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; ish += ish0; jsh += jsh0; const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; int shls[2] = {ish, jsh}; int dims[2] = {nrow, ncol}; (*intor)(mat+off*nGv, shls, dims, NULL, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } void GTO_ft_fill_s1hermi(int (*intor)(), void (*eval_gz)(), double complex *mat, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int ish1 = shls_slice[1]; const int jsh1 = shls_slice[3]; const int nrow = ao_loc[ish1] - ao_loc[ish0]; const int ncol = ao_loc[jsh1] - ao_loc[jsh0]; const size_t off = ao_loc[ish] - ao_loc[ish0] + (ao_loc[jsh] - ao_loc[jsh0]) * nrow; const size_t NGv = nGv; int shls[2] = {ish, jsh}; int dims[2] = {nrow, ncol}; (*intor)(mat+off*NGv, shls, dims, NULL, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); if (ip != jp && ish0 == jsh0 && ish1 == jsh1) { const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; double complex *in = mat + off * NGv; double complex *out = mat + (ao_loc[jsh] - ao_loc[jsh0] + (ao_loc[ish] - ao_loc[ish0]) * nrow) * NGv; int i, j, n; double complex *pout, *pin; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pin = in + NGv * (j*nrow+i); pout = out + NGv * (i*nrow+j); for (n = 0; n < nGv; n++) { pout[n] = pin[n]; } } } } } void GTO_ft_fill_s2(int (*intor)(), void (*eval_gz)(), double complex *mat, int ish, int jsh, double complex *buf, int *shls_slice, int *ao_loc, double complex fac, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int jsh0 = shls_slice[2]; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { return; } const int di = ao_loc[ish+1] - ao_loc[ish]; const int dj = ao_loc[jsh+1] - ao_loc[jsh]; const int i0 = ao_loc[ish0]; const size_t off = ip * (ip + 1) / 2 - i0 * (i0 + 1) / 2 + jp; int shls[2] = {ish, jsh}; int dims[2] = {di, dj}; (*intor)(buf, shls, dims, NULL, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); if (ip != jp) { zcopy_s2_igtj(mat+off*nGv, buf, nGv, ip, di, dj); } else { zcopy_s2_ieqj(mat+off*nGv, buf, nGv, ip, di, dj); } } /* * Fourier transform AO pairs and add to mat (inplace) */ void GTO_ft_ovlp_mat(int (*intor)(), void (*eval_gz)(), void (*fill)(), double complex *mat, int *shls_slice, int *ao_loc, double phase, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const double complex fac = cos(phase) + sin(phase)*_Complex_I; #pragma omp parallel default(none) \ shared(intor, eval_gz, fill, mat, shls_slice, ao_loc, \ Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env) { int i, j, ij; double complex *buf = malloc(sizeof(double complex) * NCTRMAX*NCTRMAX*(size_t)nGv); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { i = ij / njsh; j = ij % njsh; (*fill)(intor, eval_gz, mat, i, j, buf, shls_slice, ao_loc, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } free(buf); } } /* * Given npair of shls in shls_lst, FT their AO pair value and add to * out (inplace) */ void GTO_ft_ovlp_shls(int (*intor)(), void (*eval_gz)(), double complex *out, int npair, int *shls_lst, int *ao_loc, double phase, double *Gv, double *b, int *gxyz, int *gs, int nGv, int *atm, int natm, int *bas, int nbas, double *env) { int n, di, dj, ish, jsh; int *ijloc = malloc(sizeof(int) * npair); ijloc[0] = 0; for (n = 1; n < npair; n++) { ish = shls_lst[n*2-2]; jsh = shls_lst[n*2-1]; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; ijloc[n] = ijloc[n-1] + di*dj; } const double complex fac = cos(phase) + sin(phase)*_Complex_I; const size_t NGv = nGv; #pragma omp parallel default(none) \ shared(intor, out, Gv, b, gxyz, gs, nGv, npair, shls_lst, ao_loc, \ eval_gz, atm, natm, bas, nbas, env, ijloc) \ private(n) { int ish, jsh; int dims[2]; #pragma omp for schedule(dynamic) for (n = 0; n < npair; n++) { ish = shls_lst[n*2 ]; jsh = shls_lst[n*2+1]; dims[0] = ao_loc[ish+1] - ao_loc[ish]; dims[1] = ao_loc[jsh+1] - ao_loc[jsh]; (*intor)(out+ijloc[n]*NGv, shls_lst+n*2, dims, NULL, eval_gz, fac, Gv, b, gxyz, gs, nGv, atm, natm, bas, nbas, env); } } free(ijloc); }
core_zhessq.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c * **/ #include "core_blas.h" #include "plasma_types.h" #include "core_lapack.h" #include <math.h> /******************************************************************************/ void core_zhessq(plasma_enum_t uplo, int n, const plasma_complex64_t *A, int lda, double *scale, double *sumsq) { int ione = 1; if (uplo == PlasmaUpper) { for (int j = 1; j < n; j++) // TODO: Inline this operation. LAPACK_zlassq(&j, &A[lda*j], &ione, scale, sumsq); } else { // PlasmaLower for (int j = 0; j < n-1; j++) { int len = n-j-1; // TODO: Inline this operation. LAPACK_zlassq(&len, &A[lda*j+j+1], &ione, scale, sumsq); } } *sumsq *= 2.0; for (int i = 0; i < n; i++) { // diagonal is real, ignore imaginary part if (creal(A[lda*i+i]) != 0.0) { // != propagates nan double absa = fabs(creal(A[lda*i+i])); if (*scale < absa) { *sumsq = 1.0 + *sumsq*((*scale/absa)*(*scale/absa)); *scale = absa; } else { *sumsq = *sumsq + ((absa/(*scale))*(absa/(*scale))); } } } } /******************************************************************************/ void core_omp_zhessq(plasma_enum_t uplo, int n, const plasma_complex64_t *A, int lda, double *scale, double *sumsq, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(in:A[0:lda*n]) \ depend(out:scale[0:n]) \ depend(out:sumsq[0:n]) { if (sequence->status == PlasmaSuccess) { *scale = 0.0; *sumsq = 1.0; core_zhessq(uplo, n, A, lda, scale, sumsq); } } }
activations.c
#include "activations.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <float.h> char *get_activation_string(ACTIVATION a) { switch(a){ case LOGISTIC: return "logistic"; case LOGGY: return "loggy"; case RELU: return "relu"; case ELU: return "elu"; case SELU: return "selu"; case GELU: return "gelu"; case RELIE: return "relie"; case RAMP: return "ramp"; case LINEAR: return "linear"; case TANH: return "tanh"; case PLSE: return "plse"; case LEAKY: return "leaky"; case STAIR: return "stair"; case HARDTAN: return "hardtan"; case LHTAN: return "lhtan"; default: break; } return "relu"; } ACTIVATION get_activation(char *s) { if (strcmp(s, "logistic")==0) return LOGISTIC; if (strcmp(s, "swish") == 0) return SWISH; if (strcmp(s, "mish") == 0) return MISH; if (strcmp(s, "hard_mish") == 0) return HARD_MISH; if (strcmp(s, "normalize_channels") == 0) return NORM_CHAN; if (strcmp(s, "normalize_channels_softmax") == 0) return NORM_CHAN_SOFTMAX; if (strcmp(s, "normalize_channels_softmax_maxval") == 0) return NORM_CHAN_SOFTMAX_MAXVAL; if (strcmp(s, "loggy")==0) return LOGGY; if (strcmp(s, "relu")==0) return RELU; if (strcmp(s, "relu6") == 0) return RELU6; if (strcmp(s, "elu")==0) return ELU; if (strcmp(s, "selu") == 0) return SELU; if (strcmp(s, "gelu") == 0) return GELU; if (strcmp(s, "relie")==0) return RELIE; if (strcmp(s, "plse")==0) return PLSE; if (strcmp(s, "hardtan")==0) return HARDTAN; if (strcmp(s, "lhtan")==0) return LHTAN; if (strcmp(s, "linear")==0) return LINEAR; if (strcmp(s, "ramp")==0) return RAMP; if (strcmp(s, "revleaky") == 0) return REVLEAKY; if (strcmp(s, "leaky")==0) return LEAKY; if (strcmp(s, "tanh")==0) return TANH; if (strcmp(s, "stair")==0) return STAIR; fprintf(stderr, "Couldn't find activation function %s, going with ReLU\n", s); return RELU; } float activate(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_activate(x); case LOGISTIC: return logistic_activate(x); case LOGGY: return loggy_activate(x); case RELU: return relu_activate(x); case ELU: return elu_activate(x); case SELU: return selu_activate(x); case GELU: return gelu_activate(x); case RELIE: return relie_activate(x); case RAMP: return ramp_activate(x); case REVLEAKY: case LEAKY: return leaky_activate(x); case TANH: return tanh_activate(x); case PLSE: return plse_activate(x); case STAIR: return stair_activate(x); case HARDTAN: return hardtan_activate(x); case LHTAN: return lhtan_activate(x); } return 0; } void activate_array(float *x, const int n, const ACTIVATION a) { int i; if (a == LINEAR) {} else if (a == LEAKY) { #pragma omp parallel for for (i = 0; i < n; ++i) { x[i] = leaky_activate(x[i]); } } else if (a == LOGISTIC) { #pragma omp parallel for for (i = 0; i < n; ++i) { x[i] = logistic_activate(x[i]); } } else { for (i = 0; i < n; ++i) { x[i] = activate(x[i], a); } } } void activate_array_swish(float *x, const int n, float * output_sigmoid, float * output) { int i; #pragma omp parallel for for (i = 0; i < n; ++i) { float x_val = x[i]; float sigmoid = logistic_activate(x_val); output_sigmoid[i] = sigmoid; output[i] = x_val * sigmoid; } } // https://github.com/digantamisra98/Mish void activate_array_mish(float *x, const int n, float * activation_input, float * output) { const float MISH_THRESHOLD = 20; int i; #pragma omp parallel for for (i = 0; i < n; ++i) { float x_val = x[i]; activation_input[i] = x_val; // store value before activation output[i] = x_val * tanh_activate( softplus_activate(x_val, MISH_THRESHOLD) ); } } static float hard_mish_yashas(float x) { if (x > 0) return x; if (x > -2) return x * x / 2 + x; return 0; } void activate_array_hard_mish(float *x, const int n, float * activation_input, float * output) { int i; #pragma omp parallel for for (i = 0; i < n; ++i) { float x_val = x[i]; activation_input[i] = x_val; // store value before activation output[i] = hard_mish_yashas(x_val); } } void activate_array_normalize_channels(float *x, const int n, int batch, int channels, int wh_step, float *output) { int size = n / channels; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { int wh_i = i % wh_step; int b = i / wh_step; const float eps = 0.0001; if (i < size) { float sum = eps; int k; for (k = 0; k < channels; ++k) { float val = x[wh_i + k * wh_step + b*wh_step*channels]; if (val > 0) sum += val; } for (k = 0; k < channels; ++k) { float val = x[wh_i + k * wh_step + b*wh_step*channels]; if (val > 0) val = val / sum; else val = 0; output[wh_i + k * wh_step + b*wh_step*channels] = val; } } } } void activate_array_normalize_channels_softmax(float *x, const int n, int batch, int channels, int wh_step, float *output, int use_max_val) { int size = n / channels; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { int wh_i = i % wh_step; int b = i / wh_step; const float eps = 0.0001; if (i < size) { float sum = eps; float max_val = -FLT_MAX; int k; if (use_max_val) { for (k = 0; k < channels; ++k) { float val = x[wh_i + k * wh_step + b*wh_step*channels]; if (val > max_val || k == 0) max_val = val; } } else max_val = 0; for (k = 0; k < channels; ++k) { float val = x[wh_i + k * wh_step + b*wh_step*channels]; sum += expf(val - max_val); } for (k = 0; k < channels; ++k) { float val = x[wh_i + k * wh_step + b*wh_step*channels]; val = expf(val - max_val) / sum; output[wh_i + k * wh_step + b*wh_step*channels] = val; } } } } void gradient_array_normalize_channels_softmax(float *x, const int n, int batch, int channels, int wh_step, float *delta) { int size = n / channels; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { int wh_i = i % wh_step; int b = i / wh_step; if (i < size) { float grad = 0; int k; for (k = 0; k < channels; ++k) { const int index = wh_i + k * wh_step + b*wh_step*channels; float out = x[index]; float d = delta[index]; grad += out*d; } for (k = 0; k < channels; ++k) { const int index = wh_i + k * wh_step + b*wh_step*channels; float d = delta[index]; d = d * grad; delta[index] = d; } } } } void gradient_array_normalize_channels(float *x, const int n, int batch, int channels, int wh_step, float *delta) { int size = n / channels; int i; #pragma omp parallel for for (i = 0; i < size; ++i) { int wh_i = i % wh_step; int b = i / wh_step; if (i < size) { float grad = 0; int k; for (k = 0; k < channels; ++k) { const int index = wh_i + k * wh_step + b*wh_step*channels; float out = x[index]; float d = delta[index]; grad += out*d; } for (k = 0; k < channels; ++k) { const int index = wh_i + k * wh_step + b*wh_step*channels; if (x[index] > 0) { float d = delta[index]; d = d * grad; delta[index] = d; } } } } } float gradient(float x, ACTIVATION a) { switch(a){ case LINEAR: return linear_gradient(x); case LOGISTIC: return logistic_gradient(x); case LOGGY: return loggy_gradient(x); case RELU: return relu_gradient(x); case RELU6: return relu6_gradient(x); case NORM_CHAN: //return relu_gradient(x); case NORM_CHAN_SOFTMAX_MAXVAL: //... case NORM_CHAN_SOFTMAX: printf(" Error: should be used custom NORM_CHAN or NORM_CHAN_SOFTMAX-function for gradient \n"); exit(0); return 0; case ELU: return elu_gradient(x); case SELU: return selu_gradient(x); case GELU: return gelu_gradient(x); case RELIE: return relie_gradient(x); case RAMP: return ramp_gradient(x); case REVLEAKY: case LEAKY: return leaky_gradient(x); case TANH: return tanh_gradient(x); case PLSE: return plse_gradient(x); case STAIR: return stair_gradient(x); case HARDTAN: return hardtan_gradient(x); case LHTAN: return lhtan_gradient(x); } return 0; } void gradient_array(const float *x, const int n, const ACTIVATION a, float *delta) { int i; #pragma omp parallel for for(i = 0; i < n; ++i){ delta[i] *= gradient(x[i], a); } } // https://github.com/BVLC/caffe/blob/04ab089db018a292ae48d51732dd6c66766b36b6/src/caffe/layers/swish_layer.cpp#L54-L56 void gradient_array_swish(const float *x, const int n, const float * sigmoid, float * delta) { int i; #pragma omp parallel for for (i = 0; i < n; ++i) { float swish = x[i]; delta[i] *= swish + sigmoid[i]*(1 - swish); } } // https://github.com/digantamisra98/Mish void gradient_array_mish(const int n, const float * activation_input, float * delta) { int i; #pragma omp parallel for for (i = 0; i < n; ++i) { const float MISH_THRESHOLD = 20.0f; // implementation from TensorFlow: https://github.com/tensorflow/addons/commit/093cdfa85d334cbe19a37624c33198f3140109ed // implementation from Pytorch: https://github.com/thomasbrandon/mish-cuda/blob/master/csrc/mish.h#L26-L31 float inp = activation_input[i]; const float sp = softplus_activate(inp, MISH_THRESHOLD); const float grad_sp = 1 - exp(-sp); const float tsp = tanh(sp); const float grad_tsp = (1 - tsp*tsp) * grad_sp; const float grad = inp * grad_tsp + tsp; delta[i] *= grad; //float x = activation_input[i]; //float d = 2 * expf(x) + expf(2 * x) + 2; //float w = 4 * (x + 1) + 4 * expf(2 * x) + expf(3 * x) + expf(x)*(4 * x + 6); //float derivative = expf(x) * w / (d * d); //delta[i] *= derivative; } } static float hard_mish_yashas_grad(float x) { if (x > 0) return 1; if (x > -2) return x + 1; return 0; } void gradient_array_hard_mish(const int n, const float * activation_input, float * delta) { int i; #pragma omp parallel for for (i = 0; i < n; ++i) { float inp = activation_input[i]; delta[i] *= hard_mish_yashas_grad(inp); } }
threshold.c
/* Copyright 2014. The Regents of the University of California. * Copyright 2015-2017. Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: * 2013-2017 Martin Uecker <martin.uecker@med.uni-goettingen.de> * 2015-2016 Jon Tamir <jtamir@eecs.berkeley.edu> * 2015 Frank Ong <frankong@berkeley.edu> */ #include <stdbool.h> #include <complex.h> #include "num/flpmath.h" #include "num/multind.h" #include "num/init.h" #include "num/ops_p.h" #include "iter/prox.h" #include "iter/thresh.h" #include "misc/mmio.h" #include "misc/misc.h" #include "misc/debug.h" #include "misc/opts.h" #include "lowrank/lrthresh.h" #include "linops/waveop.h" #include "dfwavelet/prox_dfwavelet.h" // FIXME: lowrank interface should not be coupled to mri.h -- it should take D as an input #ifndef DIMS #define DIMS 16 #endif // FIXME: consider moving this to a more accessible location? static void wthresh(unsigned int D, const long dims[D], float lambda, unsigned int flags, complex float* out, const complex float* in) { long minsize[D]; md_singleton_dims(D, minsize); long course_scale[3] = MD_INIT_ARRAY(3, 16); md_copy_dims(3, minsize, course_scale); unsigned int wflags = 7; // FIXME for (unsigned int i = 0; i < 3; i++) if (dims[i] < minsize[i]) wflags = MD_CLEAR(wflags, i); long strs[D]; md_calc_strides(D, strs, dims, CFL_SIZE); const struct linop_s* w = linop_wavelet_create(D, wflags, dims, strs, minsize, false); const struct operator_p_s* p = prox_unithresh_create(D, w, lambda, flags); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void lrthresh(unsigned int D, const long dims[D], int llrblk, float lambda, unsigned int flags, complex float* out, const complex float* in) { long blkdims[MAX_LEV][D]; int levels = llr_blkdims(blkdims, ~flags, dims, llrblk); UNUSED(levels); const struct operator_p_s* p = lrthresh_create(dims, false, ~flags, (const long (*)[])blkdims, lambda, false, false, false); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void dfthresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in) { long minsize[3]; md_singleton_dims(3, minsize); long coarse_scale[3] = MD_INIT_ARRAY(3, 16); md_min_dims(3, ~0u, minsize, dims, coarse_scale); complex float res[3]; res[0] = 1.; res[1] = 1.; res[2] = 1.; assert(3 == dims[TE_DIM]); const struct operator_p_s* p = prox_dfwavelet_create(dims, minsize, res, TE_DIM, lambda, false); operator_p_apply(p, 1., D, dims, out, D, dims, in); operator_p_free(p); } static void hard_thresh(unsigned int D, const long dims[D], float lambda, complex float* out, const complex float* in) { long size = md_calc_size(DIMS, dims) * 2; const float* inf = (const float*)in; float* outf = (float*)out; #pragma omp parallel for for (long i = 0; i < size; i++) outf[i] = inf[i] > lambda ? inf[i] : 0.; } static const char usage_str[] = "lambda <input> <output>"; static const char help_str[] = "Perform (soft) thresholding with parameter lambda."; int main_threshold(int argc, char* argv[]) { unsigned int flags = 0; enum th_type { NONE, WAV, LLR, DFW, MPDFW, HARD } th_type = NONE; int llrblk = 8; const struct opt_s opts[] = { OPT_SELECT('H', enum th_type, &th_type, HARD, "hard thresholding"), OPT_SELECT('W', enum th_type, &th_type, WAV, "daubechies wavelet soft-thresholding"), OPT_SELECT('L', enum th_type, &th_type, LLR, "locally low rank soft-thresholding"), OPT_SELECT('D', enum th_type, &th_type, DFW, "divergence-free wavelet soft-thresholding"), OPT_UINT('j', &flags, "bitmask", "joint soft-thresholding"), OPT_INT('b', &llrblk, "blocksize", "locally low rank block size"), }; cmdline(&argc, argv, 3, 3, usage_str, help_str, ARRAY_SIZE(opts), opts); num_init(); const int N = DIMS; long dims[N]; complex float* idata = load_cfl(argv[2], N, dims); complex float* odata = create_cfl(argv[3], N, dims); float lambda = atof(argv[1]); switch (th_type) { case WAV: wthresh(N, dims, lambda, flags, odata, idata); break; case LLR: lrthresh(N, dims, llrblk, lambda, flags, odata, idata); break; case DFW: dfthresh(N, dims, lambda, odata, idata); break; case HARD: hard_thresh(N, dims, lambda, odata, idata); break; default: md_zsoftthresh(N, dims, lambda, flags, odata, idata); } unmap_cfl(N, dims, idata); unmap_cfl(N, dims, odata); return 0; }
3d25pt.c
/* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 4; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt; t++) { for (i = 4; i < Nz-4; i++) { for (j = 4; j < Ny-4; j++) { for (k = 4; k < Nx-4; k++) { A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*( coef0* A[t%2][i ][j ][k ] + coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] + A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] + A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) + coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] + A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] + A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) + coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] + A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] + A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) + coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] + A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] + A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) ); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
chat.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <arpa/inet.h> #include <sys/socket.h> #include <sys/ioctl.h> #include <sys/time.h> #include <sys/epoll.h> #include <fcntl.h> #include <omp.h> #include "myqueue.h" #define IP "127.0.0.1" #define PORT 3000 #define MAX_CLIENT 2 #define MAX_DATA 1024 #define MAX_EVENTS 10 #define BUF_SIZE 200 #define FILE_SIZE 10485760 // 10M int launch_chat(int client_num); int launch_server(void); int get_server_status(void); /* file_buf에서 한줄읽어 buf에 써주는 함수 */ int read_line(char* buf, char* file_buf, int* cursor); void exit_server(int* client_fd, int serverSock); void exit_client(int clientSock); int setnonblocking(int fd); char file_buf[FILE_SIZE]; int main(int argc, char *argv[]) { int ret = -1; int num_client; int client_num; if ((argc != 2) && (argc != 3)) { usage: fprintf(stderr, "usage: %s s|c client_num\n", argv[0]); goto leave; } if ((strlen(argv[1]) != 1)) goto usage; switch (argv[1][0]) { case 's': // Launch Server ret = launch_server(); break; case 'c': // Launch client client_num = strtol(argv[2], NULL, 10); ret = launch_chat(client_num); break; default: goto usage; } leave: return ret; } int launch_chat(int client_num) { int clientSock; struct sockaddr_in serverAddr; fd_set rfds, wfds, efds; int ret = -1; char rdata[MAX_DATA]; int i, send_isready = 0; struct timeval tm; char buf[BUF_SIZE]; int connect_isover = 0; int read_fd, write_fd, cursor = 0; char open_path[BUF_SIZE]; char write_path[BUF_SIZE]; char line_buf[BUF_SIZE]; sprintf(open_path, "./test/input%d.txt", client_num); sprintf(write_path, "./test/output%d.txt", client_num); /* open file */ if((read_fd = open(open_path, O_RDONLY)) == -1){ perror("open error. put read_path"); goto leave; } if((write_fd = open(write_path, O_WRONLY | O_CREAT, 0644)) == -1){ perror("open error. put write_path"); goto leave; } /* read file */ if(read(read_fd, file_buf, FILE_SIZE) == -1){ perror("read error"); goto leave; } /* set socket */ if ((ret = clientSock = socket(PF_INET, SOCK_STREAM, 0)) == -1) { perror("socket"); goto leave; } serverAddr.sin_family = AF_INET; serverAddr.sin_addr.s_addr = inet_addr(IP); serverAddr.sin_port = htons(PORT); if ((ret = connect(clientSock, (struct sockaddr*)&serverAddr, sizeof(serverAddr)))) { perror("connect"); goto leave1; } printf("[CLIENT] Connected to %s\n", inet_ntoa(*(struct in_addr *)&serverAddr.sin_addr)); // start select version of chatting ... i = 1; ioctl(0, FIONBIO, (unsigned long *)&i); if ((ret = ioctl(clientSock, FIONBIO, (unsigned long *)&i))) { perror("ioctlsocket"); goto leave1; } #pragma omp parallel sections { #pragma omp section { while(!connect_isover){ // sleep(1); if(send_isready == 1){ if((ret = read_line(buf, file_buf, &cursor)) == -1){ printf("meet endline\n"); if ((ret = send(clientSock, "@", 1, MSG_DONTWAIT)) < 0){ perror("send error"); exit_client(clientSock); } printf("send @\n"); break; }else{ printf("send line: "); for(i=0; i<ret; i++){ printf("%c", buf[i]); } if ((ret = send(clientSock, buf, ret, MSG_DONTWAIT)) < 0){ perror("send error"); exit_client(clientSock); } } } } } #pragma omp section { //입력을 계속해서 기다림. while (!connect_isover) { // sleep(1); FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&efds); FD_SET(clientSock, &rfds); FD_SET(clientSock, &efds); FD_SET(0, &rfds); // 0은 stdin if ((ret = select(clientSock + 1, &rfds, &wfds, &efds, &tm)) < 0) { perror("select"); exit_client(clientSock); } else if (!ret) continue; if (FD_ISSET(clientSock, &efds)) { perror("Connection closed"); exit_client(clientSock); } if (FD_ISSET(clientSock, &rfds)) { if ((ret = recv(clientSock, rdata, MAX_DATA, 0)) < 0) { perror("Connection closed by remote host"); exit_client(clientSock); } if(rdata[0] == '&'){ printf("received &. send file line by line.\n"); send_isready = 1; }else{ if(rdata[0] == '%' || rdata[ret-1] == '%'){ ret--; printf("received %. diconnect\n"); close(clientSock); connect_isover = 1; } //client가 server에게서 입력받은 한줄을 그대로 파일에 적기 //%수신하면 서버와 연결끊고 fclose(fd) if(ret>0){ printf("received: "); printf("received length: %d\n", ret); for(i=0; i< ret; i++){ printf("%c", rdata[i]); } write(write_fd, rdata, ret); } } fflush(stdout); } } } } fflush(stdout); leave1: close(clientSock); leave: return -1; } int launch_server(void) { struct epoll_event ev, events[MAX_EVENTS]; int conn_sock, nfds, epollfd; int serverSock; struct sockaddr_in Addr; socklen_t AddrSize = sizeof(Addr); char data[MAX_DATA], *p; int ret, count, i; int num_client = 0; Queue queue; int flag; int n; char* buf; // client에게서 받은 문장 int client_fd[MAX_CLIENT]; // client의 파일디스크립터 저장 char* temp_buf; int num_fin = 0, num_closed = 0; // MAX_CLIENT가 되면 종료한다. int send_isover = 0; InitQueue(&queue); if ((ret = serverSock = socket(PF_INET, SOCK_STREAM, 0)) < 0) { perror("socket"); goto leave; } /* 소켓 옵션주기 */ setsockopt(serverSock, SOL_SOCKET, SO_REUSEADDR, (void *)&i, sizeof(i)); /* 포트번호 할당, IP할당 등 */ Addr.sin_family = AF_INET; Addr.sin_addr.s_addr = INADDR_ANY; Addr.sin_port = htons(PORT); /* bind함수로 주소 부여 */ if ((ret = bind(serverSock, (struct sockaddr *)&Addr,sizeof(Addr)))) { perror("bind"); goto error; } /* listen 대기중 */ if ((ret = listen(serverSock, 5))) { perror("listen"); goto error; } /* epoll_create로 epoll을 위한 자원을 준비한다. */ epollfd = epoll_create(10); if(epollfd == -1){ perror("epoll_create"); exit(EXIT_FAILURE); } /* non-blocking */ setnonblocking(serverSock); ev.events = EPOLLIN; ev.data.fd = serverSock; /* epoll_ctl로 감시할 채널들을 하나씩 직접 지정한다. */ if(epoll_ctl(epollfd, EPOLL_CTL_ADD, serverSock, &ev) == -1){ perror("epoll_ctl: serverSock"); } #pragma omp parallel sections { #pragma omp section { // 모든 CLIENT가 아직 @를 전송하지 않았거나 보내줄 버퍼가 남아있다면 while(num_fin < MAX_CLIENT || !IsEmpty(&queue)){ // sleep(1); if(!IsEmpty(&queue)){ temp_buf = Dequeue(&queue); printf("send length: %d\n", strlen(temp_buf)); printf("send buf: "); for(i=0; i<strlen(temp_buf); i++) printf("%c", temp_buf[i]); for(i=0; i<MAX_CLIENT; i++){ if((ret = send(client_fd[i], temp_buf, strlen(temp_buf), 0)) < 0){ perror("send error"); exit_server(client_fd, serverSock); } } free(temp_buf); } } send_isover = 1; } #pragma omp section { for(;;){ // sleep(1); // 마지막 timeout이 -1이면 계속 기다리는 block모드와 동일. if((nfds = epoll_wait(epollfd, events, MAX_EVENTS, 10)) == -1){ perror("epoll_pwait"); exit_server(client_fd, serverSock); } for(n=0;n<nfds;++n){ if(events[n].data.fd == serverSock){ // accept할 준비가 되었다. conn_sock = accept(serverSock, (struct sockaddr *) &Addr, &AddrSize); if(conn_sock == -1){ perror("accept error"); exit_server(client_fd, serverSock); } printf("connected\n"); /* set non-blocking */ setnonblocking(conn_sock); ev.events = EPOLLIN | EPOLLET; // 읽을수있는지, edge trigger // EPOLLIN: 수신할 데이터가 존재하는 이벤트 // EPOLLET: edge trigger 방식으로 감지, 디폴트는 level trigger ev.data.fd = conn_sock; if(epoll_ctl(epollfd, EPOLL_CTL_ADD, conn_sock, &ev) == -1){ perror("epoll_ctl: conn_sock"); exit(EXIT_FAILURE); } client_fd[num_client] = conn_sock; num_client++; if(num_client >= MAX_CLIENT){ for(i=0; i<MAX_CLIENT; i++) if ((ret = send(client_fd[i], "&", 1, 0)) < 0) { perror("sends"); exit_server(client_fd, serverSock); } printf("successfully send & to clients\n"); } printf("num_client: %d\n", num_client); }else{ /* receive buffer */ if (!(ret = count = recv(events[n].data.fd, data, MAX_DATA, 0))) { fprintf(stderr, "Connect Closed by Client\n"); num_closed++; if(num_closed >= MAX_CLIENT){ printf("all connection closed"); for(i=0; i<MAX_CLIENT; i++) close(client_fd[i]); } break; } if (ret < 0) { perror("recv"); exit_server(client_fd, serverSock); } /* print received buffer */ printf("received: "); for(i=0; i< ret; i++){ printf("%c", data[i]); } /* if server get @ */ if(data[count-1] == '@' || data[0] == '@'){ count--; num_fin++; if(num_fin >= MAX_CLIENT){ while(!send_isover){} for(i=0; i<MAX_CLIENT; i++){ if((ret = send(client_fd[i], "%", 1, 0)) < 0){ perror("send error"); exit_server(client_fd, serverSock); } printf("send c%d: %\n", i); } } } /* if server get string */ if(count>0){ // buf를 새로 할당해서 data를 집어넣는다. buf = (char*)malloc(sizeof(char) * BUF_SIZE); printf("received length: %d\n", count); for(i=0; i<MAX_CLIENT; i++){ if(client_fd[i] == events[n].data.fd){ snprintf(buf, count+5 ,"c%d: %s\n", i+1, data); break; } } // buf를 새로 할당받고 buf를 가리키는 포인터를 queue에 push해준다. Enqueue(&queue, buf); } } } } } } error: for(i=0; i<MAX_CLIENT; i++) close(client_fd[i]); close(serverSock); leave: return ret; } int read_line(char* buf, char* file_buf, int* cursor) { int ch, count = 0; if(file_buf[*cursor] == '\n' || file_buf[*cursor] == '\0') return -1; do{ ch = file_buf[*cursor+count]; buf[count] = ch; count++; }while(ch != '\n' && ch!='\0'); *cursor += count; buf[count] = '\0'; return count; } int launch_clients(int num_client) { return 0; } int get_server_status(void) { return 0; } void exit_client(int clientSock){ close(clientSock); } void exit_server(int* client_fd, int serverSock){ int i; for(i=0; i<MAX_CLIENT; i++) close(client_fd[i]); close(serverSock); exit(EXIT_FAILURE); } int setnonblocking(int fd) { int flags; #if defined(O_NONBLOCK) if (-1 == (flags = fcntl(fd, F_GETFL, 0))) flags = 0; return fcntl(fd, F_SETFL, flags | O_NONBLOCK); #else flags = 1; return ioctl(fd, FIOBIO, &flags); #endif }
simple_particle_filter.h
// -*- mode: c++ -*- /********************************************************************* * Software License Agreement (BSD License) * * Copyright (c) 2015, JSK Lab * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/o2r other materials provided * with the distribution. * * Neither the name of the JSK Lab nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. *********************************************************************/ #ifndef JSK_PCL_ROS_ROS_COLLABORATIVE_PARTICLE_FILTER_H_ #define JSK_PCL_ROS_ROS_COLLABORATIVE_PARTICLE_FILTER_H_ #include <pcl/tracking/particle_filter.h> #include <pcl/tracking/impl/particle_filter.hpp> namespace pcl { namespace tracking { // Particle filter class which is friendly collaborative with ROS programs. // Default behavior is same to the ParticleFilterTracker but you can customize // the behavior from outer of the class. template <typename PointInT, typename StateT> class ROSCollaborativeParticleFilterTracker: public ParticleFilterTracker<PointInT, StateT> { public: using Tracker<PointInT, StateT>::input_; using ParticleFilterTracker<PointInT, StateT>::particles_; using ParticleFilterTracker<PointInT, StateT>::changed_; typedef boost::shared_ptr<ROSCollaborativeParticleFilterTracker> Ptr; typedef typename Tracker<PointInT, StateT>::PointCloudIn PointCloudIn; typedef typename PointCloudIn::Ptr PointCloudInPtr; typedef typename PointCloudIn::ConstPtr PointCloudInConstPtr; typedef typename Tracker<PointInT, StateT>::PointCloudState PointCloudState; typedef typename PointCloudState::Ptr PointCloudStatePtr; typedef typename PointCloudState::ConstPtr PointCloudStateConstPtr; typedef PointCoherence<PointInT> Coherence; typedef boost::shared_ptr< Coherence > CoherencePtr; typedef boost::shared_ptr< const Coherence > CoherenceConstPtr; typedef PointCloudCoherence<PointInT> CloudCoherence; typedef boost::shared_ptr< CloudCoherence > CloudCoherencePtr; typedef boost::shared_ptr< const CloudCoherence > CloudCoherenceConstPtr; typedef boost::function<StateT (const StateT&)> CustomSampleFunc; typedef boost::function<void (PointCloudInConstPtr, StateT&)> CustomLikelihoodFunc; ROSCollaborativeParticleFilterTracker(): ParticleFilterTracker<PointInT, StateT>() { motion_ratio_ = 0.0; changed_ = true; } void setParticles(PointCloudStatePtr particles) { particles_ = particles; } void setCustomSampleFunc(CustomSampleFunc f) { custom_sample_func_ = f; } void setLikelihoodFunc(CustomLikelihoodFunc f) { custom_likelihood_func_ = f; } protected: bool initCompute() { // Do nothing return true; } void weight() { if (!particles_) { std::cerr << "no particles" << std::endl; } if (!input_) { std::cerr << "no input pointcloud" << std::endl; } #ifdef _OPENMP #pragma omp parallel for #endif for (size_t i = 0; i < particles_->points.size (); i++) { custom_likelihood_func_ (input_, particles_->points[i]); } normalizeWeight(); } void resample() { std::vector<int> a (particles_->points.size ()); std::vector<double> q (particles_->points.size ()); this->genAliasTable (a, q, particles_); // memoize the original list of particles PointCloudStatePtr origparticles = particles_; particles_.reset(new PointCloudState()); particles_->points.reserve(origparticles->points.size() + 1); // particle_num_?? // the first particle, it is a just copy of the maximum result // StateT p = representative_state_; // particles_->points.push_back (p); // with motion int motion_num = static_cast<int> (particles_->points.size ()) * static_cast<int> (motion_ratio_); for ( int i = 1; i < motion_num; i++ ) { int target_particle_index = sampleWithReplacement (a, q); StateT p = origparticles->points[target_particle_index]; p = custom_sample_func_(p); p = p + motion_; particles_->points.push_back (p); } // no motion for ( int i = motion_num; i < particle_num_; i++ ) { int target_particle_index = sampleWithReplacement (a, q); StateT p = origparticles->points[target_particle_index]; // add noise using gaussian p = custom_sample_func_(p); particles_->points.push_back (p); } } bool initParticles(bool) { // Do nothing return true; } void computeTracking() { // r->w->u ? // w->u->r ? for (int i = 0; i < iteration_num_; i++) { resample(); weight(); update(); } } void normalizeWeight() { double n = 0.0; for (size_t i = 0; i < particles_->points.size(); i++) { n = particles_->points[i].weight + n; } if (n != 0.0) { for (size_t i = 0; i < particles_->points.size(); i++) { particles_->points[i].weight = particles_->points[i].weight / n; } } else { for (size_t i = 0; i < particles_->points.size(); i++) { particles_->points[i].weight = 1.0 / particles_->points.size(); } } } using ParticleFilterTracker<PointInT, StateT>::iteration_num_; using ParticleFilterTracker<PointInT, StateT>::update; using ParticleFilterTracker<PointInT, StateT>::representative_state_; using ParticleFilterTracker<PointInT, StateT>::motion_ratio_; using ParticleFilterTracker<PointInT, StateT>::particle_num_; using ParticleFilterTracker<PointInT, StateT>::motion_; using ParticleFilterTracker<PointInT, StateT>::sampleWithReplacement; CustomSampleFunc custom_sample_func_; CustomLikelihoodFunc custom_likelihood_func_; private: }; } } #endif
mandelcpu.c
#include "mandelmain.h" void mandelbrotCPU(struct RenderSettings rs) { double x1 = rs.xoffset - 2.0 / rs.zoom * rs.width / rs.height; double x2 = rs.xoffset + 2.0 / rs.zoom * rs.width / rs.height; double y1 = rs.yoffset + 2.0 / rs.zoom; double pixel_pitch = (x2 - x1) / rs.width; #pragma omp parallel for schedule(dynamic) for (int y = 0; y < rs.height; y++) { double cReal, cImag, zReal, zImag, z2Real, z2Imag, zrzi; Uint32 color; Uint32 colorbias; for (int x = 0; x < rs.width; x++) { // map screen coords to (0,0) -> (-2,2) through (WW,WH) -> (2, -2) cReal = x1 + pixel_pitch * x; cImag = y1 - pixel_pitch * y; zReal = cReal; zImag = cImag; color = 0; // black as default for values that converge to 0 // Mandelbrot calc for current (x,y) pixel for (int i = 0; i < rs.iterations; i++) { z2Real = zReal * zReal; z2Imag = zImag * zImag; zrzi = zReal * zImag; zReal = cReal + z2Real - z2Imag; zImag = zrzi + zrzi + cImag; if (z2Real + z2Imag > 4.0f) { colorbias = MIN(255, i * 510.0 / rs.iterations); color = (0x000000FF | (colorbias << 24) | (colorbias << 16) | colorbias << 8); break; } } rs.outputBuffer[x + y * rs.width] = color; } } }; void mandelbrotAVX(struct RenderSettings rs) { double x1 = rs.xoffset - 2.0 / rs.zoom * rs.width / rs.height; double x2 = rs.xoffset + 2.0 / rs.zoom * rs.width / rs.height; double y1 = rs.yoffset + 2.0 / rs.zoom; double pixel_pitch = (x2 - x1) / rs.width; __m256d vxpitch = _mm256_set1_pd(pixel_pitch); __m256d vx1 = _mm256_set1_pd(x1); __m256d vOne = _mm256_set1_pd(1); __m256d vFour = _mm256_set1_pd(4); #pragma omp parallel for schedule(dynamic) for (int y = 0; y < rs.height; y++) { __m256d vzrzi; __m256d vcImag = _mm256_set1_pd(y1 - pixel_pitch * y); for (int x = 0; x < rs.width - (rs.width % 4); x += 4) { // map screen coords to (0,0) -> (-2,2) through (WW,WH) -> (2, -2) __m256d mx = _mm256_set_pd(x + 3, x + 2, x + 1, x); __m256d vcReal = _mm256_add_pd(_mm256_mul_pd(mx, vxpitch), vx1); __m256d vzReal = vcReal; __m256d vzImag = vcImag; __m256d vz2Real = vcReal; __m256d vz2Imag = vcImag; __m256d vIter = _mm256_set1_pd(0); // Mandelbrot calc for current (x,y) pixel for (int i = 0; i < rs.iterations; i++) { vz2Real = _mm256_mul_pd(vzReal, vzReal); vz2Imag = _mm256_mul_pd(vzImag, vzImag); vzrzi = _mm256_mul_pd(vzReal, vzImag); vzReal = _mm256_add_pd(_mm256_sub_pd(vz2Real, vz2Imag), vcReal); vzImag = _mm256_add_pd(_mm256_add_pd(vzrzi, vzrzi), vcImag); __m256d mag2 = _mm256_add_pd(vz2Real, vz2Imag); __m256d mask = _mm256_cmp_pd(mag2, vFour, _CMP_LT_OQ); vIter = _mm256_add_pd(_mm256_and_pd(mask, vOne), vIter); if (_mm256_testz_pd(mask, _mm256_set1_pd(-1))) { break; } } // convert 4x double vector (256) to 4x int32 (128) and copy to ram as uint32[4] Uint32 iters[4]; _mm_store_si128((__m128i *)iters, _mm256_cvtpd_epi32(vIter)); Uint32 color, colorbias; // calculate color for the 4 dumped pixels for (int ii = 0; ii < 4; ii++) { if (iters[ii] == rs.iterations) { color = 0x000000FF; } else { colorbias = MIN(255, iters[ii] * 510.0 / rs.iterations); color = (0x000000FF | (colorbias << 24) | (colorbias << 16) | colorbias << 8); } rs.outputBuffer[x + y * rs.width + ii] = color; } } } }; void mandelbrotGMP(struct RenderSettings rs) { mpf_set_default_prec(96); mpf_t gx1, gx2, gy1, gpixel_pitch, gTmp; mpf_inits(gx1, gx2, gy1, gpixel_pitch, gTmp, NULL); mpf_set_d(gx1, 2.0f * rs.width / rs.height); mpf_set_d(gTmp, rs.zoom); mpf_div(gx1, gx1, gTmp); mpf_set_d(gy1, 2.0f); mpf_div(gy1, gy1, gTmp); mpf_set_d(gTmp, rs.xoffset); mpf_add(gx2, gTmp, gx1); mpf_sub(gx1, gTmp, gx1); mpf_set_d(gTmp, rs.yoffset); mpf_add(gy1, gTmp, gy1); mpf_set_d(gTmp, rs.width); mpf_sub(gpixel_pitch, gx2, gx1); mpf_div(gpixel_pitch, gpixel_pitch, gTmp); #pragma omp parallel for schedule(dynamic) for (int y = 0; y < rs.height; y++) { mpf_t gcReal, gcImag, gzReal, gzImag, gz2Real, gz2Imag, gzrzi, gzTmp; mpf_inits(gcReal, gcImag, gzReal, gzImag, gz2Real, gz2Imag, gzrzi, gzTmp, NULL); Uint32 color; Uint32 colorbias; for (int x = 0; x < rs.width; x++) { // map screen coords to (0,0) -> (-2,2) through (WW,WH) -> (2, -2) mpf_mul_ui(gcReal, gpixel_pitch, x); mpf_add(gcReal, gx1, gcReal); mpf_mul_ui(gcImag, gpixel_pitch, y); mpf_sub(gcImag, gy1, gcImag); mpf_set(gzReal, gcReal); mpf_set(gzImag, gcImag); color = 0; // black as default for values that converge to 0 // Mandelbrot calc for current (x,y) pixel for (int i = 0; i < rs.iterations; i++) { mpf_mul(gz2Real, gzReal, gzReal); mpf_mul(gz2Imag, gzImag, gzImag); mpf_add(gzTmp, gz2Real, gz2Imag); if (mpf_cmp_ui(gzTmp, 4) > 0) { colorbias = MIN(255, i * 510.0 / rs.iterations); color = (0x000000FF | (colorbias << 24) | (colorbias << 16) | colorbias << 8); break; } mpf_mul(gzrzi, gzReal, gzImag); mpf_add(gzReal, gcReal, gz2Real); mpf_sub(gzReal, gzReal, gz2Imag); mpf_add(gzImag, gzrzi, gzrzi); mpf_add(gzImag, gzImag, gcImag); } rs.outputBuffer[x + y * rs.width] = color; } mpf_clears(gcReal, gcImag, gzReal, gzImag, gz2Real, gz2Imag, gzrzi, gzTmp, NULL); } };